language
stringclasses
6 values
original_string
stringlengths
25
887k
text
stringlengths
25
887k
Python
def loader_callable_from_functions(cls, load_fn, defaults_fn): """Return a function that creates a new backend loader. for more details about load_fn and defaults_fn. :return: a callable (with no arguments) that will generate and return a BackendLoader object. """ def loader(): return cls(load_fn=load_fn, defaults_fn=defaults_fn) return loader
def loader_callable_from_functions(cls, load_fn, defaults_fn): """Return a function that creates a new backend loader. for more details about load_fn and defaults_fn. :return: a callable (with no arguments) that will generate and return a BackendLoader object. """ def loader(): return cls(load_fn=load_fn, defaults_fn=defaults_fn) return loader
Python
def collect_backend_plugins(): """Collect backend plugins. Look for entry points in namespace "myia.backend". Each entry point must be a backend module. From a backend module we must be able to import two functions: - `load_options(**backend_options)`: must check backend options and return a dictionary with valid options. Used to cache loaded backends. - `load_backend(backend_options)`: must take a dictionary of valid backend options and return a new instance of backend. Used to effectively load the backend if not already in cache. :return: a dictionary mapping a backend name to a loader function to generate BackendLoader instances. """ # Manually register Python backend from myia package. backends = { "python": BackendLoader.loader_callable_from_pkg( "myia.compile.backends.python" ) } backends.update( { entry_point.name: BackendLoader.loader_callable_from_pkg( entry_point.module_name ) for entry_point in pkg_resources.iter_entry_points("myia.backend") } ) return backends
def collect_backend_plugins(): """Collect backend plugins. Look for entry points in namespace "myia.backend". Each entry point must be a backend module. From a backend module we must be able to import two functions: - `load_options(**backend_options)`: must check backend options and return a dictionary with valid options. Used to cache loaded backends. - `load_backend(backend_options)`: must take a dictionary of valid backend options and return a new instance of backend. Used to effectively load the backend if not already in cache. :return: a dictionary mapping a backend name to a loader function to generate BackendLoader instances. """ # Manually register Python backend from myia package. backends = { "python": BackendLoader.loader_callable_from_pkg( "myia.compile.backends.python" ) } backends.update( { entry_point.name: BackendLoader.loader_callable_from_pkg( entry_point.module_name ) for entry_point in pkg_resources.iter_entry_points("myia.backend") } ) return backends
Python
def parse_default(): """Parses the default backend. Returns name and options from the environment or builtin default. See the documentation of get_default() for the backend string syntax. """ backend_spec = os.environ.get("MYIA_BACKEND", "pytorch") backend, *opts = backend_spec.split("?", maxsplit=1) if len(opts) == 1: opts = urllib.parse.parse_qs( opts[0], keep_blank_values=True, strict_parsing=True, errors="strict", ) for k in opts: assert len(opts[k]) == 1 opts[k] = opts[k][0] else: assert len(opts) == 0 opts = {} return backend, opts
def parse_default(): """Parses the default backend. Returns name and options from the environment or builtin default. See the documentation of get_default() for the backend string syntax. """ backend_spec = os.environ.get("MYIA_BACKEND", "pytorch") backend, *opts = backend_spec.split("?", maxsplit=1) if len(opts) == 1: opts = urllib.parse.parse_qs( opts[0], keep_blank_values=True, strict_parsing=True, errors="strict", ) for k in opts: assert len(opts[k]) == 1 opts[k] = opts[k][0] else: assert len(opts) == 0 opts = {} return backend, opts
Python
def load_backend(name, options=None): """Load the named backend. Returns the backend class registered for the name. If you pass None as the name, this will load the default backend. See the documentation for get_default() for more information. Raises: UnknownBackend: The name is not recognized. LoadingError: There was an error loading the backend. """ if name is None: assert options is None return get_default() if options is None: options = {} if name not in _backends: raise UnknownBackend(name) backend_loader = _backends[name]() options = backend_loader.load_options(**options) key = (name, tuple(sorted(list(options.items())))) res = _active_backends.get(key, None) if res is None: try: res = backend_loader.load_backend(options) _active_backends[key] = res except Exception as e: raise LoadingError(name) from e return res
def load_backend(name, options=None): """Load the named backend. Returns the backend class registered for the name. If you pass None as the name, this will load the default backend. See the documentation for get_default() for more information. Raises: UnknownBackend: The name is not recognized. LoadingError: There was an error loading the backend. """ if name is None: assert options is None return get_default() if options is None: options = {} if name not in _backends: raise UnknownBackend(name) backend_loader = _backends[name]() options = backend_loader.load_options(**options) key = (name, tuple(sorted(list(options.items())))) res = _active_backends.get(key, None) if res is None: try: res = backend_loader.load_backend(options) _active_backends[key] = res except Exception as e: raise LoadingError(name) from e return res
Python
def register_backend(name, load_fn, defaults_fn): """Register a new backend. This is to allow third party libraries to register their own backends if loaded by the user. Built-in backends are preregistered. Arguments: name (str): Name of the backend, must be unique load_fn: function that will load the backend. This must return a callable that will take keyword arguments for options. defaults_fn: function that takes the same default arguments as load_fn and maps them to canonical and/or default values. """ assert name not in _backends _backends[name] = BackendLoader.loader_callable_from_functions( load_fn, defaults_fn )
def register_backend(name, load_fn, defaults_fn): """Register a new backend. This is to allow third party libraries to register their own backends if loaded by the user. Built-in backends are preregistered. Arguments: name (str): Name of the backend, must be unique load_fn: function that will load the backend. This must return a callable that will take keyword arguments for options. defaults_fn: function that takes the same default arguments as load_fn and maps them to canonical and/or default values. """ assert name not in _backends _backends[name] = BackendLoader.loader_callable_from_functions( load_fn, defaults_fn )
Python
def compile(self, graph, argspec, outspec): """Compile the group of graphs rooted at `graph`. This function takes in a fully typed graph cluster rooted at `graph` with a manager and must return a callable that accepts arguments of the same type and number as the root graph. """ raise NotImplementedError("compile")
def compile(self, graph, argspec, outspec): """Compile the group of graphs rooted at `graph`. This function takes in a fully typed graph cluster rooted at `graph` with a manager and must return a callable that accepts arguments of the same type and number as the root graph. """ raise NotImplementedError("compile")
Python
def supports_prim_group(self, prim_group: PrimGroup): """Return True if given primitive group is supported. :param prim_group: a PrimitiveGroup object """ raise NotImplementedError("supports_prim_group")
def supports_prim_group(self, prim_group: PrimGroup): """Return True if given primitive group is supported. :param prim_group: a PrimitiveGroup object """ raise NotImplementedError("supports_prim_group")
Python
def defaults(self): """Return defaults for this object.""" if self._defaults is None: defaults = resolve_from_path(self._defaults_location) if not isinstance(defaults, dict): defaults = getattr(defaults, self.defaults_field) self._defaults = defaults return self._defaults
def defaults(self): """Return defaults for this object.""" if self._defaults is None: defaults = resolve_from_path(self._defaults_location) if not isinstance(defaults, dict): defaults = getattr(defaults, self.defaults_field) self._defaults = defaults return self._defaults
Python
def repr_(obj: Any, **kwargs: Any): """Return unique string representation of object with additional info. The usual representation is `<module.Class object at address>`. This function returns `<module.Class(key=value) object at address>` instead, to make objects easier to identify by their attributes. Arguments: obj: object to represent **kwargs: The attributes and their values that will be printed as part of the string representation. """ name = f"{obj.__module__}.{obj.__class__.__name__}" info = ", ".join(f"{key}={value}" for key, value in kwargs.items()) address = str(hex(id(obj))) return f"<{name}({info}) object at {address}>"
def repr_(obj: Any, **kwargs: Any): """Return unique string representation of object with additional info. The usual representation is `<module.Class object at address>`. This function returns `<module.Class(key=value) object at address>` instead, to make objects easier to identify by their attributes. Arguments: obj: object to represent **kwargs: The attributes and their values that will be printed as part of the string representation. """ name = f"{obj.__module__}.{obj.__class__.__name__}" info = ", ".join(f"{key}={value}" for key, value in kwargs.items()) address = str(hex(id(obj))) return f"<{name}({info}) object at {address}>"
Python
def list_str(lst: List): """Return string representation of a list. Unlike the default string representation, this calls `str` instead of `repr` on each element. """ elements = ", ".join(str(elem) for elem in lst) return f"[{elements}]"
def list_str(lst: List): """Return string representation of a list. Unlike the default string representation, this calls `str` instead of `repr` on each element. """ elements = ", ".join(str(elem) for elem in lst) return f"[{elements}]"
Python
def register(self, handler, run_history=False): """Register a handler for this event. This returns the handler so that register can be used as a decorator. Arguments: handler: A function. The handler's first parameter will always be the event. run_history: Whether to call the handler for all previous events or not. """ self._handlers.append(handler) if run_history and self.history: for entry in self.history(): if isinstance(entry, tuple): handler(self, *entry) else: handler(self, entry) return handler
def register(self, handler, run_history=False): """Register a handler for this event. This returns the handler so that register can be used as a decorator. Arguments: handler: A function. The handler's first parameter will always be the event. run_history: Whether to call the handler for all previous events or not. """ self._handlers.append(handler) if run_history and self.history: for entry in self.history(): if isinstance(entry, tuple): handler(self, *entry) else: handler(self, entry) return handler
Python
def trigger(self, stringify=lambda err: f"* {err.args[0]}"): """Raise an exception if an error occurred.""" if self.errors: msg = "\n".join(stringify(e) for e in self.errors) exc = self.exc_class(msg) exc.errors = self.errors raise exc
def trigger(self, stringify=lambda err: f"* {err.args[0]}"): """Raise an exception if an error occurred.""" if self.errors: msg = "\n".join(stringify(e) for e in self.errors) exc = self.exc_class(msg) exc.errors = self.errors raise exc
Python
def core(fn=None, **flags): """Wrap a graph that defines a core Myia function. The following flags can be set: core: (default: True) Indicates that this is a core function (only informative at the moment). ignore_values: (default: False) Make the inferrer ignore argument values for the parameters (leads to less specialization). """ flags = { # This is a function defined in Myia's core "core": True, "reference": True, **flags, } fn._myia_flags = flags return fn
def core(fn=None, **flags): """Wrap a graph that defines a core Myia function. The following flags can be set: core: (default: True) Indicates that this is a core function (only informative at the moment). ignore_values: (default: False) Make the inferrer ignore argument values for the parameters (leads to less specialization). """ flags = { # This is a function defined in Myia's core "core": True, "reference": True, **flags, } fn._myia_flags = flags return fn
Python
def resolve_from_path(path): """Resolve a module or object from a path of the form x.y.z.""" modname, field = path.rsplit(".", 1) mod = __import__(modname, fromlist=[field]) return getattr(mod, field)
def resolve_from_path(path): """Resolve a module or object from a path of the form x.y.z.""" modname, field = path.rsplit(".", 1) mod = __import__(modname, fromlist=[field]) return getattr(mod, field)
Python
def assert_scalar(*args): """Assert that the arguments are all scalars.""" # TODO: These checks should be stricter, e.g. require that all args # have exactly the same type, but right now there is some mixing between # numpy types and int/float. for x in args: if isinstance(x, np.ndarray): if x.shape != (): msg = f"Expected scalar, not array with shape {x.shape}" raise TypeError(msg) elif not isinstance(x, (int, float, np.number)): raise TypeError(f"Expected scalar, not {type(x)}")
def assert_scalar(*args): """Assert that the arguments are all scalars.""" # TODO: These checks should be stricter, e.g. require that all args # have exactly the same type, but right now there is some mixing between # numpy types and int/float. for x in args: if isinstance(x, np.ndarray): if x.shape != (): msg = f"Expected scalar, not array with shape {x.shape}" raise TypeError(msg) elif not isinstance(x, (int, float, np.number)): raise TypeError(f"Expected scalar, not {type(x)}")
Python
def dfs( root: T, succ: Callable[[T], Iterable[T]], include: Callable[[T], str] = always_include, ) -> Iterable[T]: """Perform a depth-first search. Arguments: root: The node to start from. succ: A function that returns a node's successors. include: A function that returns whether to include a node in the search. * Return 'follow' to include the node and follow its edges. * Return 'nofollow' to include the node but not follow its edges. * Return 'exclude' to not include the node, nor follow its edges. """ seen: Set[T] = set() to_visit = [root] while to_visit: node = to_visit.pop() if node in seen: continue seen.add(node) incl = include(node) if incl == FOLLOW: yield node to_visit += succ(node) elif incl == NOFOLLOW: yield node elif incl == EXCLUDE: pass else: raise ValueError( "include(node) must return one of: " '"follow", "nofollow", "exclude"' )
def dfs( root: T, succ: Callable[[T], Iterable[T]], include: Callable[[T], str] = always_include, ) -> Iterable[T]: """Perform a depth-first search. Arguments: root: The node to start from. succ: A function that returns a node's successors. include: A function that returns whether to include a node in the search. * Return 'follow' to include the node and follow its edges. * Return 'nofollow' to include the node but not follow its edges. * Return 'exclude' to not include the node, nor follow its edges. """ seen: Set[T] = set() to_visit = [root] while to_visit: node = to_visit.pop() if node in seen: continue seen.add(node) incl = include(node) if incl == FOLLOW: yield node to_visit += succ(node) elif incl == NOFOLLOW: yield node elif incl == EXCLUDE: pass else: raise ValueError( "include(node) must return one of: " '"follow", "nofollow", "exclude"' )
Python
def toposort( root: T, succ: Callable[[T], Iterable[T]], include: Callable[[T], str] = always_include, allow_cycles=False, ) -> Iterable[T]: """Yield the nodes in the tree starting at root in topological order. Arguments: root: The node to start from. succ: A function that returns a node's successors. include: A function that returns whether to include a node in the search. * Return 'follow' to include the node and follow its edges. * Return 'nofollow' to include the node but not follow its edges. * Return 'exclude' to not include the node, nor follow its edges. allow_cycles: Return an arbitrary order for graphs with cycles instead of an error """ done: Set[T] = set() todo: List[T] = [root] rank: Dict[T, int] = {} cycles = set() while todo: node = todo[-1] if node in done: todo.pop() continue if node in rank and rank[node] != len(todo): if allow_cycles: cycles.add(node) else: raise ValueError("cycle") else: rank[node] = len(todo) cont = False incl = include(node) if incl == FOLLOW: for i in succ(node): if i not in done and i not in cycles: todo.append(i) cont = True elif incl == NOFOLLOW: pass elif incl == EXCLUDE: done.add(node) todo.pop() continue else: raise ValueError( "include(node) must return one of: " '"follow", "nofollow", "exclude"' ) if cont: continue done.add(node) yield node todo.pop()
def toposort( root: T, succ: Callable[[T], Iterable[T]], include: Callable[[T], str] = always_include, allow_cycles=False, ) -> Iterable[T]: """Yield the nodes in the tree starting at root in topological order. Arguments: root: The node to start from. succ: A function that returns a node's successors. include: A function that returns whether to include a node in the search. * Return 'follow' to include the node and follow its edges. * Return 'nofollow' to include the node but not follow its edges. * Return 'exclude' to not include the node, nor follow its edges. allow_cycles: Return an arbitrary order for graphs with cycles instead of an error """ done: Set[T] = set() todo: List[T] = [root] rank: Dict[T, int] = {} cycles = set() while todo: node = todo[-1] if node in done: todo.pop() continue if node in rank and rank[node] != len(todo): if allow_cycles: cycles.add(node) else: raise ValueError("cycle") else: rank[node] = len(todo) cont = False incl = include(node) if incl == FOLLOW: for i in succ(node): if i not in done and i not in cycles: todo.append(i) cont = True elif incl == NOFOLLOW: pass elif incl == EXCLUDE: done.add(node) todo.pop() continue else: raise ValueError( "include(node) must return one of: " '"follow", "nofollow", "exclude"' ) if cont: continue done.add(node) yield node todo.pop()
Python
def gc(self): """Garbage-collect unused canonical objects.""" to_gc = list(self.to_gc) self.to_gc.clear() for hsh in to_gc: if not self.hashes[hsh]: del self.hashes[hsh]
def gc(self): """Garbage-collect unused canonical objects.""" to_gc = list(self.to_gc) self.to_gc.clear() for hsh in to_gc: if not self.hashes[hsh]: del self.hashes[hsh]
Python
def eqkey(x): """Return the equality key for x.""" if getattr(x, "_incomplete", False): raise IncompleteException() elif isinstance(x, EqKey): return x elif isinstance(x, (list, tuple)): return ItemEK(x, range(len(x))) elif isinstance(x, dict): return ItemEK(x, x.keys()) elif hasattr(x, "__eqkey__"): return x.__eqkey__() else: assert not isinstance(x, (set, frozenset)) return Atom(x, x)
def eqkey(x): """Return the equality key for x.""" if getattr(x, "_incomplete", False): raise IncompleteException() elif isinstance(x, EqKey): return x elif isinstance(x, (list, tuple)): return ItemEK(x, range(len(x))) elif isinstance(x, dict): return ItemEK(x, x.keys()) elif hasattr(x, "__eqkey__"): return x.__eqkey__() else: assert not isinstance(x, (set, frozenset)) return Atom(x, x)
Python
def deep_eqkey(obj, path=frozenset()): """Return a key for equality tests for non-recursive structures.""" if obj is None or isinstance(obj, (int, float)): return obj cached = getattr(obj, "$intern_deep_eqkey", None) if cached is not None: return cached oid = id(obj) if oid in path: _maybe_setattr(obj, "$intern_deep_eqkey", RECURSIVE) return RECURSIVE key = eqkey(obj) if isinstance(key, ElementsBase): subs = [deep_eqkey(x, path | {oid}) for x in key.values] if RECURSIVE in subs: _maybe_setattr(obj, "$intern_deep_eqkey", RECURSIVE) return RECURSIVE dk = ( key.type, type(key.values)(subs), ) else: assert isinstance(key, Atom) dk = key.type, key.value _maybe_setattr(obj, "$intern_deep_eqkey", dk) return dk
def deep_eqkey(obj, path=frozenset()): """Return a key for equality tests for non-recursive structures.""" if obj is None or isinstance(obj, (int, float)): return obj cached = getattr(obj, "$intern_deep_eqkey", None) if cached is not None: return cached oid = id(obj) if oid in path: _maybe_setattr(obj, "$intern_deep_eqkey", RECURSIVE) return RECURSIVE key = eqkey(obj) if isinstance(key, ElementsBase): subs = [deep_eqkey(x, path | {oid}) for x in key.values] if RECURSIVE in subs: _maybe_setattr(obj, "$intern_deep_eqkey", RECURSIVE) return RECURSIVE dk = ( key.type, type(key.values)(subs), ) else: assert isinstance(key, Atom) dk = key.type, key.value _maybe_setattr(obj, "$intern_deep_eqkey", dk) return dk
Python
def hashrec(obj, n=10): """Hash a (possibly self-referential) object. This explores the object breadth-first and uses the first n elements to compute the hash. Arguments: obj: The object for which to compute a hash. n: The maximum number of contributions to the hash. """ count = 0 h = [] for key in _bfs(obj): if count == n: break count += 1 if isinstance(key, Atom): h.extend((key.type, key.value)) else: h.extend((key.type, len(key.values))) return pyhash(tuple(h))
def hashrec(obj, n=10): """Hash a (possibly self-referential) object. This explores the object breadth-first and uses the first n elements to compute the hash. Arguments: obj: The object for which to compute a hash. n: The maximum number of contributions to the hash. """ count = 0 h = [] for key in _bfs(obj): if count == n: break count += 1 if isinstance(key, Atom): h.extend((key.type, key.value)) else: h.extend((key.type, len(key.values))) return pyhash(tuple(h))
Python
def eqrec(obj1, obj2, cache=None): """Compare two (possibly self-referential) objects for equality.""" id1 = id(obj1) id2 = id(obj2) if (id1, id2) in cache: return True cache.add((id1, id2)) key1 = eqkey(obj1) key2 = eqkey(obj2) if type(key1) is not type(key2) or key1.type is not key2.type: return False if isinstance(key1, Atom): return key1.value == key2.value elif isinstance(key1, ElementsBase): v1 = key1.values v2 = key2.values if len(v1) != len(v2): return False for x1, x2 in zip(v1, v2): if not eqrec(x1, x2, cache): return False else: return True else: raise AssertionError()
def eqrec(obj1, obj2, cache=None): """Compare two (possibly self-referential) objects for equality.""" id1 = id(obj1) id2 = id(obj2) if (id1, id2) in cache: return True cache.add((id1, id2)) key1 = eqkey(obj1) key2 = eqkey(obj2) if type(key1) is not type(key2) or key1.type is not key2.type: return False if isinstance(key1, Atom): return key1.value == key2.value elif isinstance(key1, ElementsBase): v1 = key1.values v2 = key2.values if len(v1) != len(v2): return False for x1, x2 in zip(v1, v2): if not eqrec(x1, x2, cache): return False else: return True else: raise AssertionError()
Python
def eq(obj1, obj2): """Compare two (possibly self-referential) objects for equality.""" if obj1 is obj2: return True key1 = deep_eqkey(obj1) if key1 is RECURSIVE: return eqrec(obj1, obj2, set()) else: key2 = deep_eqkey(obj2) return key1 == key2
def eq(obj1, obj2): """Compare two (possibly self-referential) objects for equality.""" if obj1 is obj2: return True key1 = deep_eqkey(obj1) if key1 is RECURSIVE: return eqrec(obj1, obj2, set()) else: key2 = deep_eqkey(obj2) return key1 == key2
Python
def new(cls, *args, **kwargs): """Instantiates a non-interned instance.""" obj = object.__new__(cls) obj.__init__(*args, **kwargs) return obj
def new(cls, *args, **kwargs): """Instantiates a non-interned instance.""" obj = object.__new__(cls) obj.__init__(*args, **kwargs) return obj
Python
def empty(cls): """Create an empty, incomplete instance.""" inst = object.__new__(cls) inst._incomplete = True return inst
def empty(cls): """Create an empty, incomplete instance.""" inst = object.__new__(cls) inst._incomplete = True return inst
Python
async def embed(info, x): """Return a constant that embeds the identity of the input node.""" typ = sensitivity_transform(await x.get()) key = SymbolicKeyInstance(x.node, typ) return Constant(key)
async def embed(info, x): """Return a constant that embeds the identity of the input node.""" typ = sensitivity_transform(await x.get()) key = SymbolicKeyInstance(x.node, typ) return Constant(key)
Python
def combine_relations(self, root_name, relations): """Combine a name and a list of relations in a single string.""" if root_name is None: return None ids = [r for r in relations if isinstance(r, int)] relations = [r for r in relations if not isinstance(r, int)] if ids: relations.append(ids[-1]) tags = [ self.relation_symbols.get(r, f"{r}:") for r in reversed(relations) ] return "".join(tags) + root_name
def combine_relations(self, root_name, relations): """Combine a name and a list of relations in a single string.""" if root_name is None: return None ids = [r for r in relations if isinstance(r, int)] relations = [r for r in relations if not isinstance(r, int)] if ids: relations.append(ids[-1]) tags = [ self.relation_symbols.get(r, f"{r}:") for r in reversed(relations) ] return "".join(tags) + root_name
Python
def const_fn(self, node): """Return name of function, if constant. Given an `Apply` node of a constant function, return the name of that function, otherwise return None. """ fn = node.inputs[0] if node.inputs else None if fn and fn.is_constant(): return self.label(fn, False) else: return None
def const_fn(self, node): """Return name of function, if constant. Given an `Apply` node of a constant function, return the name of that function, otherwise return None. """ fn = node.inputs[0] if node.inputs else None if fn and fn.is_constant(): return self.label(fn, False) else: return None
Python
def label(x, labeler=default_labeler): """Return an informative textual label for a node.""" if isinstance(x, Primitive): return x.name elif isinstance(x, (ANFNode, Graph, DebugInfo)): return labeler.name(x, True) else: return repr(x)
def label(x, labeler=default_labeler): """Return an informative textual label for a node.""" if isinstance(x, Primitive): return x.name elif isinstance(x, (ANFNode, Graph, DebugInfo)): return labeler.name(x, True) else: return repr(x)
Python
async def infer_casttag( self, engine, x: lib.AbstractTaggedUnion, tag: xtype.Int[64] ): """Infer the return type of primitive `casttag`.""" opts = await lib.force_pending(x.options) tag_v = self.require_constant(tag, argnum=2, range={i for i, _ in opts}) for i, typ in opts: if i == tag_v: return typ raise AssertionError("Unreachable")
async def infer_casttag( self, engine, x: lib.AbstractTaggedUnion, tag: xtype.Int[64] ): """Infer the return type of primitive `casttag`.""" opts = await lib.force_pending(x.options) tag_v = self.require_constant(tag, argnum=2, range={i for i, _ in opts}) for i, typ in opts: if i == tag_v: return typ raise AssertionError("Unreachable")
Python
def inject(**utilities): """Inject all utilities in the globals of every module.""" for name, module in list(sys.modules.items()): glob = vars(module) for key, value in utilities.items(): if key not in glob: try: glob[key] = value except TypeError: pass
def inject(**utilities): """Inject all utilities in the globals of every module.""" for name, module in list(sys.modules.items()): glob = vars(module) for key, value in utilities.items(): if key not in glob: try: glob[key] = value except TypeError: pass
Python
def range_(start, stop=None, step=None): """Myia implementation of the standard range function.""" if stop is None: stop = start start = 0 if step is None: step = 1 return Range(start, stop, step)
def range_(start, stop=None, step=None): """Myia implementation of the standard range function.""" if stop is None: stop = start start = 0 if step is None: step = 1 return Range(start, stop, step)
Python
def _resolve_var(self): """Try to forcefully resolve one variable to resume execution. For example, if the code contains the literal 1.0, we let its type be determined by variables it interacts with, but if there is nothing else to do, we may force it to Float[64]. """ # Filter out all done tasks varlist = [fut for fut in self._vars if not fut.done()] # Filter out priority-less tasks, which cannot be forced later = [fut for fut in varlist if fut.priority() is None] varlist = [fut for fut in varlist if fut.priority() is not None] self._vars = later if not varlist: return False varlist.sort(key=lambda x: x.priority()) found = False while varlist: v1 = varlist.pop() try: v1.force_resolve() except self.errtype as e: self._errors.append(e) else: found = True break self._vars += varlist return found
def _resolve_var(self): """Try to forcefully resolve one variable to resume execution. For example, if the code contains the literal 1.0, we let its type be determined by variables it interacts with, but if there is nothing else to do, we may force it to Float[64]. """ # Filter out all done tasks varlist = [fut for fut in self._vars if not fut.done()] # Filter out priority-less tasks, which cannot be forced later = [fut for fut in varlist if fut.priority() is None] varlist = [fut for fut in varlist if fut.priority() is not None] self._vars = later if not varlist: return False varlist.sort(key=lambda x: x.priority()) found = False while varlist: v1 = varlist.pop() try: v1.force_resolve() except self.errtype as e: self._errors.append(e) else: found = True break self._vars += varlist return found
Python
def run_forever(self): """Run this loop until there is no more work to do.""" while True: while self._todo: h = self._todo.popleft() h._run() # If some literals weren't forced to a concrete type by some # operation, we sort by priority (i.e. floats first) and we # force the first one to take its default concrete type. Then # we resume the loop. if not self._resolve_var(): break
def run_forever(self): """Run this loop until there is no more work to do.""" while True: while self._todo: h = self._todo.popleft() h._run() # If some literals weren't forced to a concrete type by some # operation, we sort by priority (i.e. floats first) and we # force the first one to take its default concrete type. Then # we resume the loop. if not self._resolve_var(): break
Python
def call_exception_handler(self, ctx): """Log an exception in the list of errors.""" if "exception" in ctx: self._errors.append(ctx["exception"]) else: raise AssertionError("call_exception_handler", ctx)
def call_exception_handler(self, ctx): """Log an exception in the list of errors.""" if "exception" in ctx: self._errors.append(ctx["exception"]) else: raise AssertionError("call_exception_handler", ctx)
Python
def collect_errors(self): """Return a collection of all exceptions from all futures.""" futs, self._tasks = self._tasks, [] errors, self._errors = self._errors, [] errors += [ fut.exception() for _, fut in futs if fut.done() and fut.exception() ] not_done = [ref for ref, fut in futs if not fut.done()] if not errors and not_done: exc = self.errtype( f"Could not run inference to completion." " There might be an infinite loop in the program" " which prevents type inference from working. " " The above is the set of blocked calls and does not " " necessarily constitute a stack trace.", refs=[x for x in not_done if x], priority=-2, ) errors.append(exc) return errors
def collect_errors(self): """Return a collection of all exceptions from all futures.""" futs, self._tasks = self._tasks, [] errors, self._errors = self._errors, [] errors += [ fut.exception() for _, fut in futs if fut.done() and fut.exception() ] not_done = [ref for ref, fut in futs if not fut.done()] if not errors and not_done: exc = self.errtype( f"Could not run inference to completion." " There might be an infinite loop in the program" " which prevents type inference from working. " " The above is the set of blocked calls and does not " " necessarily constitute a stack trace.", refs=[x for x in not_done if x], priority=-2, ) errors.append(exc) return errors
Python
def call_soon(self, callback, *args, context=None): """Call the given callback as soon as possible.""" h = asyncio.Handle(callback, args, self, context=context) self._todo.append(h) return h
def call_soon(self, callback, *args, context=None): """Call the given callback as soon as possible.""" h = asyncio.Handle(callback, args, self, context=context) self._todo.append(h) return h
Python
def create_pending(self, resolve, priority): """Create a Pending associated to this loop.""" pending = Pending(resolve=resolve, priority=priority, loop=self) self._vars.append(pending) return pending
def create_pending(self, resolve, priority): """Create a Pending associated to this loop.""" pending = Pending(resolve=resolve, priority=priority, loop=self) self._vars.append(pending) return pending
Python
def create_pending_from_list(self, poss, dflt, priority): """Create a PendingFromList associated to this loop.""" pending = PendingFromList(poss, dflt, priority, loop=self) self._vars.append(pending) return pending
def create_pending_from_list(self, poss, dflt, priority): """Create a PendingFromList associated to this loop.""" pending = PendingFromList(poss, dflt, priority, loop=self) self._vars.append(pending) return pending
Python
def create_pending_tentative(self, tentative): """Create a PendingTentative associated to this loop.""" pending = PendingTentative(tentative=tentative, loop=self) self._vars.append(pending) return pending
def create_pending_tentative(self, tentative): """Create a PendingTentative associated to this loop.""" pending = PendingTentative(tentative=tentative, loop=self) self._vars.append(pending) return pending
Python
def is_simple(x): """Returns whether data or a Pending is considered "simple". "Simple" data is merged by identity, whereas this may not be the case for non-simple data, e.g. Possibilities are merged using set union, and distinct numbers e.g. 2 and 3 are merged into ANYTHING. Simple data can be forced more easily because it won't cause problems if we find more values to merge along. """ from .data import AbstractScalar if isinstance(x, Pending): return x.is_simple() if isinstance(x, AbstractScalar): return is_simple(x.xtype()) elif isinstance(x, xtype.TypeMeta): return True else: return False
def is_simple(x): """Returns whether data or a Pending is considered "simple". "Simple" data is merged by identity, whereas this may not be the case for non-simple data, e.g. Possibilities are merged using set union, and distinct numbers e.g. 2 and 3 are merged into ANYTHING. Simple data can be forced more easily because it won't cause problems if we find more values to merge along. """ from .data import AbstractScalar if isinstance(x, Pending): return x.is_simple() if isinstance(x, AbstractScalar): return is_simple(x.xtype()) elif isinstance(x, xtype.TypeMeta): return True else: return False
Python
async def find_coherent_result(v, fn): """Return fn(v) without fully resolving v, if possible. If v is a PendingFromList and fn(x) is the same for every x in v, this will return that result without resolving which possibility v is. Otherwise, v will be resolved. """ if isinstance(v, PendingFromList): results = set() for option in v.possibilities: results.add(await fn(option)) if len(results) == 1: return results.pop() x = await v return await fn(x)
async def find_coherent_result(v, fn): """Return fn(v) without fully resolving v, if possible. If v is a PendingFromList and fn(x) is the same for every x in v, this will return that result without resolving which possibility v is. Otherwise, v will be resolved. """ if isinstance(v, PendingFromList): results = set() for option in v.possibilities: results.add(await fn(option)) if len(results) == 1: return results.pop() x = await v return await fn(x)
Python
def find_coherent_result_sync(v, fn): """Return fn(v) without fully resolving v, if possible. If v is a PendingFromList and fn(x) is the same for every x in v, this will return that result without resolving which possibility v is. Otherwise, an exception is raised. """ if isinstance(v, PendingFromList): exc = InferenceError("Nothing matches") results = set() for option in v.possibilities: try: results.add(fn(option)) except Exception as e: exc = e if len(results) == 1: return results.pop() elif len(results) == 0: raise exc else: raise InferenceError("Must resolve Pending to find result") elif isinstance(v, Pending): raise InferenceError("Must resolve Pending to find result") else: return fn(v)
def find_coherent_result_sync(v, fn): """Return fn(v) without fully resolving v, if possible. If v is a PendingFromList and fn(x) is the same for every x in v, this will return that result without resolving which possibility v is. Otherwise, an exception is raised. """ if isinstance(v, PendingFromList): exc = InferenceError("Nothing matches") results = set() for option in v.possibilities: try: results.add(fn(option)) except Exception as e: exc = e if len(results) == 1: return results.pop() elif len(results) == 0: raise exc else: raise InferenceError("Must resolve Pending to find result") elif isinstance(v, Pending): raise InferenceError("Must resolve Pending to find result") else: return fn(v)
Python
async def force_pending(v): """Resolve v if v is Pending, otherwise return v directly.""" if isinstance(v, Pending): return await v else: return v
async def force_pending(v): """Resolve v if v is Pending, otherwise return v directly.""" if isinstance(v, Pending): return await v else: return v
Python
def run_helper(epochs, n, batch_size, layer_sizes): """Run a model with the specified layer sizes on n random batches. Arguments: epochs: How many epochs to run. n: Number of training batches to generate. batch_size: Number of samples per batch. layer_sizes: Sizes of the model's layers. """ layers = [] for W, b in mlp_parameters(*layer_sizes): layers.append(Linear(W, b)) layers.append(Tanh()) model = Sequential(tuple(layers)) model = to_device(model, backend, backend_options) data = generate_data(n, batch_size, layer_sizes[0], layer_sizes[-1]) for _ in range(epochs): costs = [] t0 = time.time() for inp, target in data: cost, model = step(model, inp, target, lr) costs.append(cost) costs = [float(c.from_device()) for c in costs] c = sum(costs) / n t = time.time() - t0 print(f"Cost: {c:15.10f}\tTime: {t:15.10f}")
def run_helper(epochs, n, batch_size, layer_sizes): """Run a model with the specified layer sizes on n random batches. Arguments: epochs: How many epochs to run. n: Number of training batches to generate. batch_size: Number of samples per batch. layer_sizes: Sizes of the model's layers. """ layers = [] for W, b in mlp_parameters(*layer_sizes): layers.append(Linear(W, b)) layers.append(Tanh()) model = Sequential(tuple(layers)) model = to_device(model, backend, backend_options) data = generate_data(n, batch_size, layer_sizes[0], layer_sizes[-1]) for _ in range(epochs): costs = [] t0 = time.time() for inp, target in data: cost, model = step(model, inp, target, lr) costs.append(cost) costs = [float(c.from_device()) for c in costs] c = sum(costs) / n t = time.time() - t0 print(f"Cost: {c:15.10f}\tTime: {t:15.10f}")
Python
def fill_reverse_tag_map(): """Fill the back-conversion map. Do this after a compilation step involving the constructors you need since the tags are not set otherwise. """ for tag, ctr in tag_map.items(): if ctr.tag != -1: rev_tag_map[ctr.tag] = tag
def fill_reverse_tag_map(): """Fill the back-conversion map. Do this after a compilation step involving the constructors you need since the tags are not set otherwise. """ for tag, ctr in tag_map.items(): if ctr.tag != -1: rev_tag_map[ctr.tag] = tag
Python
def initialize(self, mod, mng): """Add types to the module.""" if mng is not None: for node in mng.all_nodes: if isinstance(node.abstract, AbstractTaggedUnion): for opt in node.abstract.options: get_union_ctr(*opt) elif node.is_apply(P.env_setitem): key = node.inputs[2] tt = to_relay_type(node.inputs[3].abstract) assert key.is_constant() self.env_val_map[key.value] = tt env_val_keys = sorted(list(self.env_val_map.keys())) for i, k in enumerate(env_val_keys): self.env_val_map[k] = (i, self.env_val_map[k]) mod[union_type] = adt.TypeData(union_type, [], list(tag_map.values())) mod[option_type] = adt.TypeData(option_type, [a], [nil, some]) self.env_ctr = adt.Constructor("v", [self._build_env_type()], env_type) mod[env_type] = adt.TypeData(env_type, [], [self.env_ctr, dead_env])
def initialize(self, mod, mng): """Add types to the module.""" if mng is not None: for node in mng.all_nodes: if isinstance(node.abstract, AbstractTaggedUnion): for opt in node.abstract.options: get_union_ctr(*opt) elif node.is_apply(P.env_setitem): key = node.inputs[2] tt = to_relay_type(node.inputs[3].abstract) assert key.is_constant() self.env_val_map[key.value] = tt env_val_keys = sorted(list(self.env_val_map.keys())) for i, k in enumerate(env_val_keys): self.env_val_map[k] = (i, self.env_val_map[k]) mod[union_type] = adt.TypeData(union_type, [], list(tag_map.values())) mod[option_type] = adt.TypeData(option_type, [a], [nil, some]) self.env_ctr = adt.Constructor("v", [self._build_env_type()], env_type) mod[env_type] = adt.TypeData(env_type, [], [self.env_ctr, dead_env])
Python
def do_env_update(self, env_, key, val): """Build the code to update the env.""" v = relay.var("v") cl = adt.Clause( adt.PatternConstructor(self.env_ctr, [adt.PatternVar(v)]), v ) env = adt.Match(env_, [cl], complete=False) map = dict((i, k) for k, (i, _) in self.env_val_map.items()) new_env = relay.Tuple( [ some(val) if map[i] == key else relay.TupleGetItem(env, i) for i in range(len(map)) ] ) return self.env_ctr(new_env)
def do_env_update(self, env_, key, val): """Build the code to update the env.""" v = relay.var("v") cl = adt.Clause( adt.PatternConstructor(self.env_ctr, [adt.PatternVar(v)]), v ) env = adt.Match(env_, [cl], complete=False) map = dict((i, k) for k, (i, _) in self.env_val_map.items()) new_env = relay.Tuple( [ some(val) if map[i] == key else relay.TupleGetItem(env, i) for i in range(len(map)) ] ) return self.env_ctr(new_env)
Python
def do_env_find(self, env, key, dft): """Build the code to find a value in env.""" v = relay.var("v") cl = adt.Clause( adt.PatternConstructor(self.env_ctr, [adt.PatternVar(v)]), v ) env_v = adt.Match(env, [cl], complete=False) val = relay.TupleGetItem(env_v, self.env_val_map[key][0]) x = relay.var("x") nil_c = adt.Clause(adt.PatternConstructor(nil, []), dft) some_c = adt.Clause( adt.PatternConstructor(some, [adt.PatternVar(x)]), x ) return adt.Match(val, [some_c, nil_c])
def do_env_find(self, env, key, dft): """Build the code to find a value in env.""" v = relay.var("v") cl = adt.Clause( adt.PatternConstructor(self.env_ctr, [adt.PatternVar(v)]), v ) env_v = adt.Match(env, [cl], complete=False) val = relay.TupleGetItem(env_v, self.env_val_map[key][0]) x = relay.var("x") nil_c = adt.Clause(adt.PatternConstructor(nil, []), dft) some_c = adt.Clause( adt.PatternConstructor(some, [adt.PatternVar(x)]), x ) return adt.Match(val, [some_c, nil_c])
Python
def to_relay_type(self, a: AbstractScalar): """Convert a myia abstract to a Relay type.""" tp = a.xtype() if issubclass(tp, Bool): return relay.ty.scalar_type("bool") elif issubclass(tp, Nil): return relay.ty.TupleType([]) elif issubclass(tp, EnvType): return env_type() elif issubclass(tp, UniverseType): return relay.ty.TupleType([]) else: return relay.ty.scalar_type(type_to_np_dtype(tp))
def to_relay_type(self, a: AbstractScalar): """Convert a myia abstract to a Relay type.""" tp = a.xtype() if issubclass(tp, Bool): return relay.ty.scalar_type("bool") elif issubclass(tp, Nil): return relay.ty.TupleType([]) elif issubclass(tp, EnvType): return env_type() elif issubclass(tp, UniverseType): return relay.ty.TupleType([]) else: return relay.ty.scalar_type(type_to_np_dtype(tp))
Python
def handle_wrapper(fn, handle_params): """Wraps a model function to perform handle updates.""" def wrapper(*args): handle_instances = list(args[i] for i in handle_params) res = fn(*args) u = res[0] res = res[1] if len(res) == 2 else res[1:] for h, v in zip(handle_instances, u): h.value = v return (), res if len(handle_params) == 0: return fn else: return wrapper
def handle_wrapper(fn, handle_params): """Wraps a model function to perform handle updates.""" def wrapper(*args): handle_instances = list(args[i] for i in handle_params) res = fn(*args) u = res[0] res = res[1] if len(res) == 2 else res[1:] for h, v in zip(handle_instances, u): h.value = v return (), res if len(handle_params) == 0: return fn else: return wrapper
Python
def from_list(elems): """Convert a list to a linked list using Cons.""" rval = Empty() for elem in reversed(elems): rval = Cons(elem, rval) return rval
def from_list(elems): """Convert a list to a linked list using Cons.""" rval = Empty() for elem in reversed(elems): rval = Cons(elem, rval) return rval
Python
async def infer_argmax( self, engine, input: lib.AbstractArray, dim: lib.u64tup_typecheck ): """Infer the return type of primitive `argmax`.""" shp = () shp_inp = input.xshape() dim = tuple( self.require_constant(e, argnum=f'"1:dim[{edx}]"') for edx, e in enumerate(dim.elements) ) shp = list(shp_inp) for d in dim: shp[d] = 1 shp = tuple(shp) return type(input)( AbstractScalar({VALUE: ANYTHING, TYPE: xtype.Int[64]}), {SHAPE: shp, TYPE: input.xtype()}, )
async def infer_argmax( self, engine, input: lib.AbstractArray, dim: lib.u64tup_typecheck ): """Infer the return type of primitive `argmax`.""" shp = () shp_inp = input.xshape() dim = tuple( self.require_constant(e, argnum=f'"1:dim[{edx}]"') for edx, e in enumerate(dim.elements) ) shp = list(shp_inp) for d in dim: shp[d] = 1 shp = tuple(shp) return type(input)( AbstractScalar({VALUE: ANYTHING, TYPE: xtype.Int[64]}), {SHAPE: shp, TYPE: input.xtype()}, )
Python
def pytorch_random_initialize(seed): """Implementation of random_initialize for pytorch.""" rng = torch.Generator() rng.manual_seed(seed.item()) return rng.get_state()
def pytorch_random_initialize(seed): """Implementation of random_initialize for pytorch.""" rng = torch.Generator() rng.manual_seed(seed.item()) return rng.get_state()
Python
def pytorch_random_uint32(rstate, shape): """Implementation of random_uint32 for pytorch.""" shape = tuple(dim.item() for dim in shape) rng = torch.Generator() rng.set_state(rstate) output = torch.zeros(shape, dtype=torch.int64) output.random_(0, 2 ** 32, generator=rng) return rng.get_state(), output
def pytorch_random_uint32(rstate, shape): """Implementation of random_uint32 for pytorch.""" shape = tuple(dim.item() for dim in shape) rng = torch.Generator() rng.set_state(rstate) output = torch.zeros(shape, dtype=torch.int64) output.random_(0, 2 ** 32, generator=rng) return rng.get_state(), output
Python
def pytorch_array_cast(op): """Implementation of array_cast for pytorch.""" t = op.inputs[2] dt = _type_map[t.value.xtype()] def _impl(x): return (x.to(dtype=dt),) return _impl, op.inputs[1:2]
def pytorch_array_cast(op): """Implementation of array_cast for pytorch.""" t = op.inputs[2] dt = _type_map[t.value.xtype()] def _impl(x): return (x.to(dtype=dt),) return _impl, op.inputs[1:2]
Python
def pytorch_array_map(op): """Implementation of array_map for pytorch.""" fn = op.inputs[1] assert fn.is_constant(Primitive) fn = fn.value if fn in scalar_mapping: impl = scalar_mapping[fn] else: raise NotImplementedError(f"array_map of {fn}") def _impl(*args): return (impl(*args),) return _impl, op.inputs[2:]
def pytorch_array_map(op): """Implementation of array_map for pytorch.""" fn = op.inputs[1] assert fn.is_constant(Primitive) fn = fn.value if fn in scalar_mapping: impl = scalar_mapping[fn] else: raise NotImplementedError(f"array_map of {fn}") def _impl(*args): return (impl(*args),) return _impl, op.inputs[2:]
Python
def _pytorch_array_reduce_add(tshp): """Generate implementation for sum reduction based on given axes.""" def _impl(array): ashp = array.shape if len(tshp) < len(ashp): ts = (1,) * (len(ashp) - len(tshp)) + tshp else: ts = tshp axis = list(i for i, t in enumerate(ts) if t == 1) if len(axis) == 1: axis = axis[0] res = torch.sum(array, axis, keepdim=True) if len(tshp) < len(ashp): res = torch.reshape(res, shape=tshp) return (res,) return _impl
def _pytorch_array_reduce_add(tshp): """Generate implementation for sum reduction based on given axes.""" def _impl(array): ashp = array.shape if len(tshp) < len(ashp): ts = (1,) * (len(ashp) - len(tshp)) + tshp else: ts = tshp axis = list(i for i, t in enumerate(ts) if t == 1) if len(axis) == 1: axis = axis[0] res = torch.sum(array, axis, keepdim=True) if len(tshp) < len(ashp): res = torch.reshape(res, shape=tshp) return (res,) return _impl
Python
def _pytorch_array_reduce_mul(tshp): """Generate implementation for product reduction based on given axes.""" def _impl(array): ashp = array.shape if len(tshp) in (0, len(ashp)): res = torch.prod(array) else: raise NotImplementedError( "We currently only support full product on an array." ) return (res,) return _impl
def _pytorch_array_reduce_mul(tshp): """Generate implementation for product reduction based on given axes.""" def _impl(array): ashp = array.shape if len(tshp) in (0, len(ashp)): res = torch.prod(array) else: raise NotImplementedError( "We currently only support full product on an array." ) return (res,) return _impl
Python
def pytorch_array_reduce(op): """Implementation of array_reduce for pytorch.""" fn = op.inputs[1] shape = op.inputs[3] assert fn.is_constant(Primitive) assert shape.is_constant(tuple) fn = fn.value tshp = shape.value if fn == P.scalar_add: gen_impl = _pytorch_array_reduce_add elif fn == P.scalar_mul: gen_impl = _pytorch_array_reduce_mul else: raise NotImplementedError(f"reduce with {fn}") return gen_impl(tshp), (op.inputs[2],)
def pytorch_array_reduce(op): """Implementation of array_reduce for pytorch.""" fn = op.inputs[1] shape = op.inputs[3] assert fn.is_constant(Primitive) assert shape.is_constant(tuple) fn = fn.value tshp = shape.value if fn == P.scalar_add: gen_impl = _pytorch_array_reduce_add elif fn == P.scalar_mul: gen_impl = _pytorch_array_reduce_mul else: raise NotImplementedError(f"reduce with {fn}") return gen_impl(tshp), (op.inputs[2],)
Python
def pytorch_array_getitem(op): """Implementation of array_getitem for pytorch.""" def _impl(array, begin, end, strides): idx = tuple(slice(b, e, s) for b, e, s in zip(begin, end, strides)) return (array[idx],) return _impl, op.inputs[1:]
def pytorch_array_getitem(op): """Implementation of array_getitem for pytorch.""" def _impl(array, begin, end, strides): idx = tuple(slice(b, e, s) for b, e, s in zip(begin, end, strides)) return (array[idx],) return _impl, op.inputs[1:]
Python
def pytorch_array_setitem(op): """Implementation of array_setitem for pytorch.""" def _impl(array, begin, end, strides, value): idx = tuple(slice(b, e, s) for b, e, s in zip(begin, end, strides)) ret = array.clone() ret[idx] = value return (ret,) return _impl, op.inputs[1:]
def pytorch_array_setitem(op): """Implementation of array_setitem for pytorch.""" def _impl(array, begin, end, strides, value): idx = tuple(slice(b, e, s) for b, e, s in zip(begin, end, strides)) ret = array.clone() ret[idx] = value return (ret,) return _impl, op.inputs[1:]
Python
def pytorch_argmax(op): """Implementation of argmax for pytorch.""" def _impl(x, dim): dim = tuple(sorted(dim)) n = () for _s in range(len(x.shape)): if _s not in dim: n = n + (_s,) n = n + dim x = x.permute(n) ns = x.shape[0 : -len(dim)] + (-1,) r = torch.argmax(x.reshape(ns), -1, keepdim=False) rl = list(r.shape) for _sd in dim: rl.insert(_sd, 1) rf = tuple(rl) return (torch.reshape(r, rf),) return _impl, op.inputs[1:]
def pytorch_argmax(op): """Implementation of argmax for pytorch.""" def _impl(x, dim): dim = tuple(sorted(dim)) n = () for _s in range(len(x.shape)): if _s not in dim: n = n + (_s,) n = n + dim x = x.permute(n) ns = x.shape[0 : -len(dim)] + (-1,) r = torch.argmax(x.reshape(ns), -1, keepdim=False) rl = list(r.shape) for _sd in dim: rl.insert(_sd, 1) rf = tuple(rl) return (torch.reshape(r, rf),) return _impl, op.inputs[1:]
Python
def pytorch_array_max(op): """Implementation of array_max for pytorch.""" def _impl(x, dim): dim = tuple(sorted(dim)) n = () for _s in range(len(x.shape)): if _s not in dim: n = n + (_s,) n = n + dim x = x.permute(n) ns = x.shape[0 : -len(dim)] + (-1,) r = torch.max(x.reshape(ns), -1, keepdim=False)[0] rl = list(r.shape) for _sd in dim: rl.insert(_sd, 1) rf = tuple(rl) return (torch.reshape(r, rf),) return _impl, op.inputs[1:]
def pytorch_array_max(op): """Implementation of array_max for pytorch.""" def _impl(x, dim): dim = tuple(sorted(dim)) n = () for _s in range(len(x.shape)): if _s not in dim: n = n + (_s,) n = n + dim x = x.permute(n) ns = x.shape[0 : -len(dim)] + (-1,) r = torch.max(x.reshape(ns), -1, keepdim=False)[0] rl = list(r.shape) for _sd in dim: rl.insert(_sd, 1) rf = tuple(rl) return (torch.reshape(r, rf),) return _impl, op.inputs[1:]
Python
def pytorch_gather(op): """Implementation of gather for pytorch.""" def _impl(x, dim, index): dim = dim.item() return (torch.gather(x, dim, index),) return _impl, op.inputs[1:]
def pytorch_gather(op): """Implementation of gather for pytorch.""" def _impl(x, dim, index): dim = dim.item() return (torch.gather(x, dim, index),) return _impl, op.inputs[1:]
Python
def pytorch_scatter(op): """Implementation of scatter for pytorch.""" def _impl(x, dim, index, src): dim = dim.item() return (torch.scatter(x, dim, index, src),) return _impl, op.inputs[1:]
def pytorch_scatter(op): """Implementation of scatter for pytorch.""" def _impl(x, dim, index, src): dim = dim.item() return (torch.scatter(x, dim, index, src),) return _impl, op.inputs[1:]
Python
def pytorch_scatter_add(op): """Implementation of scatter_add for pytorch.""" def _impl(x, dim, index, src): dim = dim.item() return (torch.scatter_add(x, dim, index, src),) return _impl, op.inputs[1:]
def pytorch_scatter_add(op): """Implementation of scatter_add for pytorch.""" def _impl(x, dim, index, src): dim = dim.item() return (torch.scatter_add(x, dim, index, src),) return _impl, op.inputs[1:]
Python
def pytorch_concat(op): """Implementation of concat for pytorch.""" def _impl(x, dim): dim = dim.item() return (torch.cat(x, dim),) return _impl, op.inputs[1:]
def pytorch_concat(op): """Implementation of concat for pytorch.""" def _impl(x, dim): dim = dim.item() return (torch.cat(x, dim),) return _impl, op.inputs[1:]
Python
def pytorch_split(op): """Implementation of split for pytorch.""" def _impl(x, sections, dim): dim = dim.item() return (torch.split(x, sections, dim),) return _impl, op.inputs[1:]
def pytorch_split(op): """Implementation of split for pytorch.""" def _impl(x, sections, dim): dim = dim.item() return (torch.split(x, sections, dim),) return _impl, op.inputs[1:]
Python
def pytorch_conv2d(op): """Implementation of conv2d for pytorch.""" def _impl(input, weight, stride, padding, dilation, groups): groups = groups.item() return ( torch.nn.functional.conv2d( input, weight, None, stride, padding, dilation, groups ), ) return _impl, op.inputs[1:]
def pytorch_conv2d(op): """Implementation of conv2d for pytorch.""" def _impl(input, weight, stride, padding, dilation, groups): groups = groups.item() return ( torch.nn.functional.conv2d( input, weight, None, stride, padding, dilation, groups ), ) return _impl, op.inputs[1:]
Python
def pytorch_conv2d_weight_grad(op): """Implementation of conv2d_weight_grad for pytorch.""" def _impl( input, weight_size, grad_output, stride, padding, dilation, groups ): weight_size = tuple(w.item() for w in weight_size) stride = tuple(_x.item() for _x in stride) padding = tuple(_x.item() for _x in padding) dilation = tuple(_x.item() for _x in dilation) groups = groups.item() return ( conv2d_weight( input, weight_size, grad_output, stride, padding, dilation, groups, ), ) return _impl, op.inputs[1:]
def pytorch_conv2d_weight_grad(op): """Implementation of conv2d_weight_grad for pytorch.""" def _impl( input, weight_size, grad_output, stride, padding, dilation, groups ): weight_size = tuple(w.item() for w in weight_size) stride = tuple(_x.item() for _x in stride) padding = tuple(_x.item() for _x in padding) dilation = tuple(_x.item() for _x in dilation) groups = groups.item() return ( conv2d_weight( input, weight_size, grad_output, stride, padding, dilation, groups, ), ) return _impl, op.inputs[1:]
Python
def pytorch_max_pool2d(op): """Implementation of max_pool2d for pytorch.""" def _impl(input, kernel_size, stride, padding, dilation, ceil_mode): return ( torch.nn.functional.max_pool2d( input, kernel_size, stride, padding, dilation, ceil_mode.item(), False, ), ) return _impl, op.inputs[1:]
def pytorch_max_pool2d(op): """Implementation of max_pool2d for pytorch.""" def _impl(input, kernel_size, stride, padding, dilation, ceil_mode): return ( torch.nn.functional.max_pool2d( input, kernel_size, stride, padding, dilation, ceil_mode.item(), False, ), ) return _impl, op.inputs[1:]
Python
def pytorch_max_pool2d_grad(op): """Implementation of max_pool2d grad for pytorch.""" def _impl(input, kernel_size, stride, padding, dilation, ceil_mode, dout): input.requires_grad_(requires_grad=True) output = torch.nn.functional.max_pool2d( input, kernel_size, stride, padding, dilation, ceil_mode.item(), False, ) grads = torch.autograd.grad(output, input, dout, allow_unused=True) return (grads[0],) return _impl, op.inputs[1:]
def pytorch_max_pool2d_grad(op): """Implementation of max_pool2d grad for pytorch.""" def _impl(input, kernel_size, stride, padding, dilation, ceil_mode, dout): input.requires_grad_(requires_grad=True) output = torch.nn.functional.max_pool2d( input, kernel_size, stride, padding, dilation, ceil_mode.item(), False, ) grads = torch.autograd.grad(output, input, dout, allow_unused=True) return (grads[0],) return _impl, op.inputs[1:]
Python
def to_numpy(self, v): """Make a numpy array from a torch tensor.""" if v.is_cuda: with untested(): v = v.cpu() return v.detach().numpy()
def to_numpy(self, v): """Make a numpy array from a torch tensor.""" if v.is_cuda: with untested(): v = v.cpu() return v.detach().numpy()
Python
def to_scalar(self, v): """Convert a torch tensor to a scalar.""" if (v is None) or (v is True) or (v is False) or (isinstance(v, str)): return v else: return v.item()
def to_scalar(self, v): """Convert a torch tensor to a scalar.""" if (v is None) or (v is True) or (v is False) or (isinstance(v, str)): return v else: return v.item()
Python
def from_backend_value(self, v, t): """Convert a backend value to an intermediate value.""" if isinstance(t, abstract.AbstractScalar): return self.to_scalar(v) elif isinstance(t, abstract.AbstractArray): # Convert torch tensor to numpy tensor. output = self.to_numpy(v) # If possible and necessary, cast numpy tensor to expected tensor. array_type = t.element.xtype() if array_type and array_type not in _type_map: # Probably u16, u32 or u64. Let's cast. output = output.astype(type_to_np_dtype(array_type)) return output elif isinstance(t, abstract.AbstractTuple): return tuple( self.from_backend_value(ve, te) for ve, te in zip(v, t.elements) ) elif isinstance(t, abstract.AbstractTaggedUnion): return TaggedValue( v.tag, self.from_backend_value(v.value, t.options.get(v.tag)) ) elif isinstance(t, abstract.AbstractRandomState): return RandomStateWrapper(self.to_numpy(v)) elif isinstance(t, abstract.AbstractType): if isinstance(t.element, abstract.AbstractHandle): return HandleInstance else: myia_type = t.element.xtype() if myia_type in _type_map: return getattr(np, type_to_np_dtype(myia_type)) else: return v else: raise NotImplementedError(f"Don't know what to do for {t}")
def from_backend_value(self, v, t): """Convert a backend value to an intermediate value.""" if isinstance(t, abstract.AbstractScalar): return self.to_scalar(v) elif isinstance(t, abstract.AbstractArray): # Convert torch tensor to numpy tensor. output = self.to_numpy(v) # If possible and necessary, cast numpy tensor to expected tensor. array_type = t.element.xtype() if array_type and array_type not in _type_map: # Probably u16, u32 or u64. Let's cast. output = output.astype(type_to_np_dtype(array_type)) return output elif isinstance(t, abstract.AbstractTuple): return tuple( self.from_backend_value(ve, te) for ve, te in zip(v, t.elements) ) elif isinstance(t, abstract.AbstractTaggedUnion): return TaggedValue( v.tag, self.from_backend_value(v.value, t.options.get(v.tag)) ) elif isinstance(t, abstract.AbstractRandomState): return RandomStateWrapper(self.to_numpy(v)) elif isinstance(t, abstract.AbstractType): if isinstance(t.element, abstract.AbstractHandle): return HandleInstance else: myia_type = t.element.xtype() if myia_type in _type_map: return getattr(np, type_to_np_dtype(myia_type)) else: return v else: raise NotImplementedError(f"Don't know what to do for {t}")
Python
def to_backend_value(self, v, t): """Convert an intermediate value to a backend value.""" if isinstance(t, abstract.AbstractError) or v is abstract.DEAD: return None elif isinstance(t, abstract.AbstractType): # Handle abstract types. # Return v if type does not match any torch type. myia_type = t.element.xtype() if myia_type is xtype.Tuple: return tuple return _type_map.get(myia_type, v) elif isinstance(t, abstract.AbstractArray): return self.from_numpy(v) elif isinstance(t, abstract.AbstractScalar): if issubclass( t.values[abstract.TYPE], (xtype.Number, xtype.Bool, xtype.Nil) ): return self.from_scalar(v, t.values[abstract.TYPE]) elif issubclass(t.values[abstract.TYPE], xtype.EnvType): assert len(v._contents) == 0 return () else: raise NotImplementedError(f"to_backend_value for {t}") elif isinstance(t, abstract.AbstractTuple): return tuple( self.to_backend_value(v, t) for v, t in zip(v, t.elements) ) elif isinstance(t, abstract.AbstractTaggedUnion): real_t = t.options.get(v.tag) return TaggedValue(v.tag, self.to_backend_value(v.value, real_t)) elif isinstance(t, abstract.AbstractRandomState): return self.from_numpy(v.state.copy()) else: raise NotImplementedError(f"to_backend_value for {t}")
def to_backend_value(self, v, t): """Convert an intermediate value to a backend value.""" if isinstance(t, abstract.AbstractError) or v is abstract.DEAD: return None elif isinstance(t, abstract.AbstractType): # Handle abstract types. # Return v if type does not match any torch type. myia_type = t.element.xtype() if myia_type is xtype.Tuple: return tuple return _type_map.get(myia_type, v) elif isinstance(t, abstract.AbstractArray): return self.from_numpy(v) elif isinstance(t, abstract.AbstractScalar): if issubclass( t.values[abstract.TYPE], (xtype.Number, xtype.Bool, xtype.Nil) ): return self.from_scalar(v, t.values[abstract.TYPE]) elif issubclass(t.values[abstract.TYPE], xtype.EnvType): assert len(v._contents) == 0 return () else: raise NotImplementedError(f"to_backend_value for {t}") elif isinstance(t, abstract.AbstractTuple): return tuple( self.to_backend_value(v, t) for v, t in zip(v, t.elements) ) elif isinstance(t, abstract.AbstractTaggedUnion): real_t = t.options.get(v.tag) return TaggedValue(v.tag, self.to_backend_value(v.value, real_t)) elif isinstance(t, abstract.AbstractRandomState): return self.from_numpy(v.state.copy()) else: raise NotImplementedError(f"to_backend_value for {t}")
Python
async def infer_universe_setitem( self, engine, universe: xtype.UniverseType, handle: lib.AbstractHandle, value, ): """Infer the return type of primitive `universe_setitem`.""" engine.abstract_merge(handle.element, value) return AbstractScalar({VALUE: ANYTHING, TYPE: xtype.UniverseType})
async def infer_universe_setitem( self, engine, universe: xtype.UniverseType, handle: lib.AbstractHandle, value, ): """Infer the return type of primitive `universe_setitem`.""" engine.abstract_merge(handle.element, value) return AbstractScalar({VALUE: ANYTHING, TYPE: xtype.UniverseType})
Python
def remove(self, e): """Remove an element that is present. Raise KeyError if not present. """ del self._d[e]
def remove(self, e): """Remove an element that is present. Raise KeyError if not present. """ del self._d[e]
Python
def pop(self): """Remove and return an element. Raise KeyError if empty. """ return self._d.popitem()[0]
def pop(self): """Remove and return an element. Raise KeyError if empty. """ return self._d.popitem()[0]
Python
def union(self, *others): """Return a new set with elements from the set and all others.""" res = self.copy() res.update(*others) return res
def union(self, *others): """Return a new set with elements from the set and all others.""" res = self.copy() res.update(*others) return res
Python
def intersection(self, *others): """Return a new set with elements common to the set and all others.""" res = self.copy() res.intersection_update(*others) return res
def intersection(self, *others): """Return a new set with elements common to the set and all others.""" res = self.copy() res.intersection_update(*others) return res
Python
def difference(self, *others): """Return a new set with elements that are not in the others.""" res = self.copy() res.difference_update(*others) return res
def difference(self, *others): """Return a new set with elements that are not in the others.""" res = self.copy() res.difference_update(*others) return res
Python
def symmetric_difference(self, other): """New set with the with the elements that are not in common.""" res = self.copy() res.symmetric_difference_update(other) return res
def symmetric_difference(self, other): """New set with the with the elements that are not in common.""" res = self.copy() res.symmetric_difference_update(other) return res
Python
def update(self, *others): """Update the set, adding elements from all others.""" for other in others: for e in other: self.add(e) return self
def update(self, *others): """Update the set, adding elements from all others.""" for other in others: for e in other: self.add(e) return self
Python
def intersection_update(self, *others): """Update the set, keeping only elements found in it and all others.""" for other in others: for e in list(self): if e not in other: self.discard(e) return self
def intersection_update(self, *others): """Update the set, keeping only elements found in it and all others.""" for other in others: for e in list(self): if e not in other: self.discard(e) return self
Python
def difference_update(self, *others): """Update the set, removing elements found in others.""" for other in others: for e in other: self.discard(e) return self
def difference_update(self, *others): """Update the set, removing elements found in others.""" for other in others: for e in other: self.discard(e) return self
Python
def symmetric_difference_update(self, other): """Update the set, keeping only the difference from both sets.""" for e in other: if e in self: self.remove(e) else: self.add(e)
def symmetric_difference_update(self, other): """Update the set, keeping only the difference from both sets.""" for e in other: if e in self: self.remove(e) else: self.add(e)
Python
def _color(color, text): """Wrap the text with the given color. If Buche is active, the color is not applied. """ if os.environ.get("BUCHE"): return text else: return f"{color}{text}{Fore.RESET}"
def _color(color, text): """Wrap the text with the given color. If Buche is active, the color is not applied. """ if os.environ.get("BUCHE"): return text else: return f"{color}{text}{Fore.RESET}"
Python
def _pgraph(path): """Print a graph using Buche.""" def _p(graph, **_): bucheg(graph) return lambda: DoTrace({path: _p})
def _pgraph(path): """Print a graph using Buche.""" def _p(graph, **_): bucheg(graph) return lambda: DoTrace({path: _p})
Python
def log(path=None, *fields, **kwfields): """Log fields of interest on the given path. The breakword module is used for logging, thus it is possible to set a word upon which to enter a breakpoint (using the BREAKWORD environment variable). * When no path is given, show all events. * The "help" field shows all possible fields. """ getters = Getters(fields, kwfields) def _p(**kwargs): _curpath = kwargs["_curpath"] results = getters(kwargs) _display(_curpath, results) return DoTrace({pth: _p for pth in _resolve_path(path)})
def log(path=None, *fields, **kwfields): """Log fields of interest on the given path. The breakword module is used for logging, thus it is possible to set a word upon which to enter a breakpoint (using the BREAKWORD environment variable). * When no path is given, show all events. * The "help" field shows all possible fields. """ getters = Getters(fields, kwfields) def _p(**kwargs): _curpath = kwargs["_curpath"] results = getters(kwargs) _display(_curpath, results) return DoTrace({pth: _p for pth in _resolve_path(path)})
Python
def stat(path=None, *fields, **kwfields): """Collect and display statistics about certain fields. * Numeric fields will display min/max/avg * String/other fields will count occurrences, sorted descending """ return StatAccumulator(path, fields, kwfields)
def stat(path=None, *fields, **kwfields): """Collect and display statistics about certain fields. * Numeric fields will display min/max/avg * String/other fields will count occurrences, sorted descending """ return StatAccumulator(path, fields, kwfields)
Python
async def make_list(info, *elems): """Create a list using Cons and Empty.""" g = info.graph lst = g.apply(Empty) abstracts = await info.abstracts() if not abstracts: return lst restype = info.engine.abstract_merge(*abstracts) for arg in reversed(info.nodes()): lst = g.apply(Cons, arg, lst) return g.apply(P.unsafe_static_cast, lst, listof(restype))
async def make_list(info, *elems): """Create a list using Cons and Empty.""" g = info.graph lst = g.apply(Empty) abstracts = await info.abstracts() if not abstracts: return lst restype = info.engine.abstract_merge(*abstracts) for arg in reversed(info.nodes()): lst = g.apply(Cons, arg, lst) return g.apply(P.unsafe_static_cast, lst, listof(restype))
Python
async def infer_array_setitem( self, engine, a: lib.AbstractArray, begin: lib.u64tup_typecheck, end: lib.u64tup_typecheck, strides: lib.i64tup_typecheck, value: lib.AbstractArray, ): """Infer the return type of primitive `array_setitem`.""" return a
async def infer_array_setitem( self, engine, a: lib.AbstractArray, begin: lib.u64tup_typecheck, end: lib.u64tup_typecheck, strides: lib.i64tup_typecheck, value: lib.AbstractArray, ): """Infer the return type of primitive `array_setitem`.""" return a
Python
async def infer_composite_full( self, engine, shape: u64tup_typecheck, fill_value: AbstractScalar, dtype: AbstractType, ): """Infer the return type of primitive `composite_full`.""" return AbstractArray( AbstractScalar( { TYPE: await force_pending(dtype.element.xtype()), VALUE: fill_value.xvalue(), } ), { SHAPE: tuple( self.require_constant(e, argnum=f'"0:shape[{edx}]"') for edx, e in enumerate(shape.elements) ), TYPE: NDArray, }, )
async def infer_composite_full( self, engine, shape: u64tup_typecheck, fill_value: AbstractScalar, dtype: AbstractType, ): """Infer the return type of primitive `composite_full`.""" return AbstractArray( AbstractScalar( { TYPE: await force_pending(dtype.element.xtype()), VALUE: fill_value.xvalue(), } ), { SHAPE: tuple( self.require_constant(e, argnum=f'"0:shape[{edx}]"') for edx, e in enumerate(shape.elements) ), TYPE: NDArray, }, )
Python
def link_apply(self, link): """Link generated nodes to their inputs. x = a(b, c) => A:x = (B:a)(B:b, B:c) """ new_inputs = [ self.remappers["grad_fprop"].get(link.graph, inp) for inp in link.node.inputs ] link.new_node.inputs = new_inputs
def link_apply(self, link): """Link generated nodes to their inputs. x = a(b, c) => A:x = (B:a)(B:b, B:c) """ new_inputs = [ self.remappers["grad_fprop"].get(link.graph, inp) for inp in link.node.inputs ] link.new_node.inputs = new_inputs
Python
def link_apply(self, link): """Link generated nodes to their inputs. x = a(b, c) => B:x = (A:x)[0] """ assert not link.node.is_parameter() app = self.remappers["grad_fprop_app"].get(link.graph, link.node) link.new_node.inputs = sexp_to_node( (P.tuple_getitem, app, 0), link.new_graph ).inputs
def link_apply(self, link): """Link generated nodes to their inputs. x = a(b, c) => B:x = (A:x)[0] """ assert not link.node.is_parameter() app = self.remappers["grad_fprop_app"].get(link.graph, link.node) link.new_node.inputs = sexp_to_node( (P.tuple_getitem, app, 0), link.new_graph ).inputs
Python
def link_apply(self, link): """Link generated nodes to their inputs. x = a(b, c) => C:x = (A:x)[1] """ app = self.remappers["grad_fprop_app"].get(link.graph, link.node) link.new_node.inputs = sexp_to_node( (P.tuple_getitem, app, 1), link.new_graph ).inputs
def link_apply(self, link): """Link generated nodes to their inputs. x = a(b, c) => C:x = (A:x)[1] """ app = self.remappers["grad_fprop_app"].get(link.graph, link.node) link.new_node.inputs = sexp_to_node( (P.tuple_getitem, app, 1), link.new_graph ).inputs
Python
def link_apply(self, link): """Link generated nodes to their inputs. x = a(b, c) => D:x = (C:x)(E:x) """ g = link.graph node = link.node assert not node.is_parameter() fn = self.remappers["grad_bprop"].get(g, node) arg = self.remappers["grad_sens"].get(g, node) link.new_node.inputs = [fn, arg]
def link_apply(self, link): """Link generated nodes to their inputs. x = a(b, c) => D:x = (C:x)(E:x) """ g = link.graph node = link.node assert not node.is_parameter() fn = self.remappers["grad_bprop"].get(g, node) arg = self.remappers["grad_sens"].get(g, node) link.new_node.inputs = [fn, arg]
Python
def gen_apply(self, g, ng, node): """Generate sensitivities for applications. * The output node's sensitivity is ng's sole parameter. * If a node is used in multiple graphs, each graph has a corresponding sensitivity node. """ with About(node.debug, self.relation): if node is g.output: new_node = ng.add_parameter() else: new_node = ng.apply() # NOTE: First parameter to remap_node is (g, node) instead of just # node. This lets us dispatch to a different node depending on whether # it belongs to the graph that uses it, or is a free variable. self.remap_node((g, node), g, node, ng, new_node)
def gen_apply(self, g, ng, node): """Generate sensitivities for applications. * The output node's sensitivity is ng's sole parameter. * If a node is used in multiple graphs, each graph has a corresponding sensitivity node. """ with About(node.debug, self.relation): if node is g.output: new_node = ng.add_parameter() else: new_node = ng.apply() # NOTE: First parameter to remap_node is (g, node) instead of just # node. This lets us dispatch to a different node depending on whether # it belongs to the graph that uses it, or is a free variable. self.remap_node((g, node), g, node, ng, new_node)