desc
stringlengths
3
26.7k
decl
stringlengths
11
7.89k
bodies
stringlengths
8
553k
'Collates the join inputs prior to delegating to the superclass.'
def _run_command(self, execute, copyfiles=True):
self._collate_join_field_inputs() return super(JoinNode, self)._run_command(execute, copyfiles)
'Collects each override join item field into the interface join field input.'
def _collate_join_field_inputs(self):
for field in self.inputs.copyable_trait_names(): if (field in self.joinfield): val = self._collate_input_value(field) try: setattr(self._interface.inputs, field, val) except Exception as e: raise ValueError((u'>>JN %s %s %s %s %s: %s' % (self, field, val, self.inputs.copyable_trait_names(), self.joinfield, e))) elif hasattr(self._interface.inputs, field): val = getattr(self._inputs, field) if isdefined(val): setattr(self._interface.inputs, field, val) logger.debug(u'Collated %d inputs into the %s node join fields', self._next_slot_index, self)
'Collects the join item field values into a list or set value for the given field, as follows: - If the field trait is a Set, then the values are collected into a set. - Otherwise, the values are collected into a list which preserves the iterables order. If the ``unique`` flag is set, then duplicate values are removed but the iterables order is preserved.'
def _collate_input_value(self, field):
val = [self._slot_value(field, idx) for idx in range(self._next_slot_index)] basetrait = self._interface.inputs.trait(field) if isinstance(basetrait.trait_type, traits.Set): return set(val) elif self._unique: return list(OrderedDict.fromkeys(val)) else: return val
'Parameters interface : interface object node specific interface (fsl.Bet(), spm.Coregister()) iterfield : string or list of strings name(s) of input fields that will receive a list of whatever kind of input they take. the node will be run separately for each value in these lists. for more than one input, the values are paired (i.e. it does not compute a combinatorial product). name : alphanumeric string node specific name serial : boolean flag to enforce executing the jobs of the mapnode in a serial manner rather than parallel nested : boolea support for nested lists, if set the input list will be flattened before running, and the nested list structure of the outputs will be resored See Node docstring for additional keyword arguments.'
def __init__(self, interface, iterfield, name, serial=False, nested=False, **kwargs):
super(MapNode, self).__init__(interface, name, **kwargs) if isinstance(iterfield, (str, bytes)): iterfield = [iterfield] self.iterfield = iterfield self.nested = nested self._inputs = self._create_dynamic_traits(self._interface.inputs, fields=self.iterfield) self._inputs.on_trait_change(self._set_mapnode_input) self._got_inputs = False self._serial = serial
'Convert specific fields of a trait to accept multiple inputs'
def _create_dynamic_traits(self, basetraits, fields=None, nitems=None):
output = DynamicTraitedSpec() if (fields is None): fields = basetraits.copyable_trait_names() for (name, spec) in list(basetraits.items()): if ((name in fields) and ((nitems is None) or (nitems > 1))): logger.debug(u'adding multipath trait: %s', name) if self.nested: output.add_trait(name, InputMultiPath(traits.Any())) else: output.add_trait(name, InputMultiPath(spec.trait_type)) else: output.add_trait(name, traits.Trait(spec)) setattr(output, name, Undefined) value = getattr(basetraits, name) if isdefined(value): setattr(output, name, value) value = getattr(output, name) return output
'Set interface input value or nodewrapper attribute Priority goes to interface.'
def set_input(self, parameter, val):
logger.debug(u'setting nodelevel(%s) input %s = %s', to_str(self), parameter, to_str(val)) self._set_mapnode_input(self.inputs, parameter, deepcopy(val))
'Compute hash including iterfield lists.'
def _get_hashval(self):
if (not self._got_inputs): self._get_inputs() self._got_inputs = True self._check_iterfield() hashinputs = deepcopy(self._interface.inputs) for name in self.iterfield: hashinputs.remove_trait(name) hashinputs.add_trait(name, InputMultiPath(self._interface.inputs.traits()[name].trait_type)) logger.debug(u'setting hashinput %s-> %s', name, getattr(self._inputs, name)) if self.nested: setattr(hashinputs, name, flatten(getattr(self._inputs, name))) else: setattr(hashinputs, name, getattr(self._inputs, name)) (hashed_inputs, hashvalue) = hashinputs.get_hashval(hash_method=self.config[u'execution'][u'hash_method']) rm_extra = self.config[u'execution'][u'remove_unnecessary_outputs'] if (str2bool(rm_extra) and self.needed_outputs): hashobject = md5() hashobject.update(hashvalue.encode()) sorted_outputs = sorted(self.needed_outputs) hashobject.update(str(sorted_outputs).encode()) hashvalue = hashobject.hexdigest() hashed_inputs.append((u'needed_outputs', sorted_outputs)) return (hashed_inputs, hashvalue)
'Checks iterfield * iterfield must be in inputs * number of elements must match across iterfield'
def _check_iterfield(self):
for iterfield in self.iterfield: if (not isdefined(getattr(self.inputs, iterfield))): raise ValueError((u'Input %s was not set but it is listed in iterfields.' % iterfield)) if (len(self.iterfield) > 1): first_len = len(filename_to_list(getattr(self.inputs, self.iterfield[0]))) for iterfield in self.iterfield[1:]: if (first_len != len(filename_to_list(getattr(self.inputs, iterfield)))): raise ValueError((u'All iterfields of a MapNode have to have the same length. %s' % str(self.inputs)))
'Run the mapnode interface This is primarily intended for serial execution of mapnode. A parallel execution requires creation of new nodes that can be spawned'
def _run_interface(self, execute=True, updatehash=False):
old_cwd = os.getcwd() cwd = self.output_dir() os.chdir(cwd) self._check_iterfield() if execute: if self.nested: nitems = len(filename_to_list(flatten(getattr(self.inputs, self.iterfield[0])))) else: nitems = len(filename_to_list(getattr(self.inputs, self.iterfield[0]))) nodenames = [((u'_' + self.name) + str(i)) for i in range(nitems)] self._collate_results(self._node_runner(self._make_nodes(cwd), updatehash=updatehash)) self._save_results(self._result, cwd) dirs2remove = [] for path in glob(op.join(cwd, u'mapflow', u'*')): if op.isdir(path): if (path.split(op.sep)[(-1)] not in nodenames): dirs2remove.append(path) for path in dirs2remove: shutil.rmtree(path) else: self._result = self._load_results(cwd) os.chdir(old_cwd)
'Create a workflow object. Parameters name : alphanumeric string unique identifier for the workflow base_dir : string, optional path to workflow storage'
def __init__(self, name, base_dir=None):
super(Workflow, self).__init__(name, base_dir) self._graph = nx.DiGraph() self.config = deepcopy(config._sections)
'Clone a workflow .. note:: Will reset attributes used for executing workflow. See _init_runtime_fields. Parameters name: alphanumeric name unique name for the workflow'
def clone(self, name):
clone = super(Workflow, self).clone(name) clone._reset_hierarchy() return clone
'Connect nodes in the pipeline. This routine also checks if inputs and outputs are actually provided by the nodes that are being connected. Creates edges in the directed graph using the nodes and edges specified in the `connection_list`. Uses the NetworkX method DiGraph.add_edges_from. Parameters args : list or a set of four positional arguments Four positional arguments of the form:: connect(source, sourceoutput, dest, destinput) source : nodewrapper node sourceoutput : string (must be in source.outputs) dest : nodewrapper node destinput : string (must be in dest.inputs) A list of 3-tuples of the following form:: [(source, target, [(\'sourceoutput/attribute\', \'targetinput\'), Or:: [(source, target, [((\'sourceoutput1\', func, arg2, ...), \'targetinput\'), ...]), sourceoutput1 will always be the first argument to func and func will be evaluated and the results sent ot targetinput currently func needs to define all its needed imports within the function as we use the inspect module to get at the source code and execute it remotely'
def connect(self, *args, **kwargs):
if (len(args) == 1): connection_list = args[0] elif (len(args) == 4): connection_list = [(args[0], args[2], [(args[1], args[3])])] else: raise TypeError((u'connect() takes either 4 arguments, or 1 list of connection tuples (%d args given)' % len(args))) disconnect = False if kwargs: disconnect = kwargs.get(u'disconnect', False) if disconnect: self.disconnect(connection_list) return newnodes = [] for (srcnode, destnode, _) in connection_list: if (self in [srcnode, destnode]): msg = (u'Workflow connect cannot contain itself as node: src[%s] dest[%s] workflow[%s]' % (srcnode, destnode, self.name)) raise IOError(msg) if ((srcnode not in newnodes) and (not self._has_node(srcnode))): newnodes.append(srcnode) if ((destnode not in newnodes) and (not self._has_node(destnode))): newnodes.append(destnode) if newnodes: self._check_nodes(newnodes) for node in newnodes: if (node._hierarchy is None): node._hierarchy = self.name not_found = [] connected_ports = {} for (srcnode, destnode, connects) in connection_list: if (destnode not in connected_ports): connected_ports[destnode] = [] if ((not disconnect) and (destnode in self._graph.nodes())): for edge in self._graph.in_edges_iter(destnode): data = self._graph.get_edge_data(*edge) for (sourceinfo, destname) in data[u'connect']: if (destname not in connected_ports[destnode]): connected_ports[destnode] += [destname] for (source, dest) in connects: if (dest in connected_ports[destnode]): raise Exception((u"\nTrying to connect %s:%s to %s:%s but input '%s' of node '%s' is already\nconnected.\n" % (srcnode, source, destnode, dest, dest, destnode))) if (not (hasattr(destnode, u'_interface') and ((u'.io' in str(destnode._interface.__class__)) or any([(u'.io' in str(val)) for val in destnode._interface.__class__.__bases__])))): if (not destnode._check_inputs(dest)): not_found.append([u'in', destnode.name, dest]) if (not (hasattr(srcnode, u'_interface') and ((u'.io' in str(srcnode._interface.__class__)) or any([(u'.io' in str(val)) for val in srcnode._interface.__class__.__bases__])))): if isinstance(source, tuple): sourcename = source[0] elif isinstance(source, (str, bytes)): sourcename = source else: raise Exception((u'Unknown source specification in connection from output of %s' % srcnode.name)) if (sourcename and (not srcnode._check_outputs(sourcename))): not_found.append([u'out', srcnode.name, sourcename]) connected_ports[destnode] += [dest] infostr = [] for info in not_found: infostr += [(u'Module %s has no %sput called %s\n' % (info[1], info[0], info[2]))] if not_found: raise Exception(u'\n'.join(([u'Some connections were not found'] + infostr))) for (srcnode, destnode, connects) in connection_list: for (idx, (src, dest)) in enumerate(connects): if (isinstance(src, tuple) and (not isinstance(src[1], (str, bytes)))): function_source = getsource(src[1]) connects[idx] = ((src[0], function_source, src[2:]), dest) for (srcnode, destnode, connects) in connection_list: edge_data = self._graph.get_edge_data(srcnode, destnode, None) if edge_data: logger.debug(u'(%s, %s): Edge data exists: %s', srcnode, destnode, to_str(edge_data)) for data in connects: if (data not in edge_data[u'connect']): edge_data[u'connect'].append(data) if disconnect: logger.debug(u'Removing connection: %s', to_str(data)) edge_data[u'connect'].remove(data) if edge_data[u'connect']: self._graph.add_edges_from([(srcnode, destnode, edge_data)]) else: logger.debug(u'Removing connection: %s->%s', srcnode, destnode) self._graph.remove_edges_from([(srcnode, destnode)]) elif (not disconnect): logger.debug(u'(%s, %s): No edge data', srcnode, destnode) self._graph.add_edges_from([(srcnode, destnode, {u'connect': connects})]) edge_data = self._graph.get_edge_data(srcnode, destnode) logger.debug(u'(%s, %s): new edge data: %s', srcnode, destnode, to_str(edge_data))
'Disconnect nodes See the docstring for connect for format.'
def disconnect(self, *args):
if (len(args) == 1): connection_list = args[0] elif (len(args) == 4): connection_list = [(args[0], args[2], [(args[1], args[3])])] else: raise TypeError((u'disconnect() takes either 4 arguments, or 1 list of connection tuples (%d args given)' % len(args))) for (srcnode, dstnode, conn) in connection_list: logger.debug(u'disconnect(): %s->%s %s', srcnode, dstnode, to_str(conn)) if (self in [srcnode, dstnode]): raise (IOError(u'Workflow connect cannot contain itself as node: src[%s] dest[%s] workflow[%s]') % (srcnode, dstnode, self.name)) if ((not self._has_node(srcnode)) or (not self._has_node(dstnode))): continue edge_data = self._graph.get_edge_data(srcnode, dstnode, {u'connect': []}) ed_conns = [(c[0], c[1]) for c in edge_data[u'connect']] remove = [] for edge in conn: if (edge in ed_conns): idx = ed_conns.index(edge) remove.append((edge[0], edge[1])) logger.debug(u'disconnect(): remove list %s', to_str(remove)) for el in remove: edge_data[u'connect'].remove(el) logger.debug(u'disconnect(): removed connection %s', to_str(el)) if (not edge_data[u'connect']): self._graph.remove_edge(srcnode, dstnode) else: self._graph.add_edges_from([(srcnode, dstnode, edge_data)])
'Add nodes to a workflow Parameters nodes : list A list of EngineBase-based objects'
def add_nodes(self, nodes):
newnodes = [] all_nodes = self._get_all_nodes() for node in nodes: if self._has_node(node): raise IOError((u'Node %s already exists in the workflow' % node)) if isinstance(node, Workflow): for subnode in node._get_all_nodes(): if (subnode in all_nodes): raise IOError((u'Subnode %s of node %s already exists in the workflow' % (subnode, node))) newnodes.append(node) if (not newnodes): logger.debug(u'no new nodes to add') return for node in newnodes: if (not issubclass(node.__class__, EngineBase)): raise Exception(u'Node %s must be a subclass of EngineBase', node) self._check_nodes(newnodes) for node in newnodes: if (node._hierarchy is None): node._hierarchy = self.name self._graph.add_nodes_from(newnodes)
'Remove nodes from a workflow Parameters nodes : list A list of EngineBase-based objects'
def remove_nodes(self, nodes):
self._graph.remove_nodes_from(nodes)
'Return an internal node by name'
def get_node(self, name):
nodenames = name.split(u'.') nodename = nodenames[0] outnode = [node for node in self._graph.nodes() if str(node).endswith((u'.' + nodename))] if outnode: outnode = outnode[0] if (nodenames[1:] and issubclass(outnode.__class__, Workflow)): outnode = outnode.get_node(u'.'.join(nodenames[1:])) else: outnode = None return outnode
'List names of all nodes in a workflow'
def list_node_names(self):
outlist = [] for node in nx.topological_sort(self._graph): if isinstance(node, Workflow): outlist.extend([u'.'.join((node.name, nodename)) for nodename in node.list_node_names()]) else: outlist.append(node.name) return sorted(outlist)
'Generates a graphviz dot file and a png file Parameters graph2use: \'orig\', \'hierarchical\' (default), \'flat\', \'exec\', \'colored\' orig - creates a top level graph without expanding internal workflow nodes; flat - expands workflow nodes recursively; hierarchical - expands workflow nodes recursively with a notion on hierarchy; colored - expands workflow nodes recursively with a notion on hierarchy in color; exec - expands workflows to depict iterables format: \'png\', \'svg\' simple_form: boolean (default: True) Determines if the node name used in the graph should be of the form \'nodename (package)\' when True or \'nodename.Class.package\' when False.'
def write_graph(self, dotfilename=u'graph.dot', graph2use=u'hierarchical', format=u'png', simple_form=True):
graphtypes = [u'orig', u'flat', u'hierarchical', u'exec', u'colored'] if (graph2use not in graphtypes): raise ValueError((u'Unknown graph2use keyword. Must be one of: ' + str(graphtypes))) (base_dir, dotfilename) = op.split(dotfilename) if (base_dir == u''): if self.base_dir: base_dir = self.base_dir if self.name: base_dir = op.join(base_dir, self.name) else: base_dir = os.getcwd() base_dir = make_output_dir(base_dir) if (graph2use in [u'hierarchical', u'colored']): if self.name[:1].isdigit(): raise ValueError(u'{} graph failed, workflow name cannot begin with a number'.format(graph2use)) dotfilename = op.join(base_dir, dotfilename) self.write_hierarchical_dotfile(dotfilename=dotfilename, colored=(graph2use == u'colored'), simple_form=simple_form) outfname = format_dot(dotfilename, format=format) else: graph = self._graph if (graph2use in [u'flat', u'exec']): graph = self._create_flat_graph() if (graph2use == u'exec'): graph = generate_expanded_graph(deepcopy(graph)) outfname = export_graph(graph, base_dir, dotfilename=dotfilename, format=format, simple_form=simple_form) logger.info((u'Generated workflow graph: %s (graph2use=%s, simple_form=%s).' % (outfname, graph2use, simple_form))) return outfname
'Export object into a different format Parameters filename: string file to save the code to; overrides prefix prefix: string prefix to use for output file format: string one of "python" include_config: boolean whether to include node and workflow config values'
def export(self, filename=None, prefix=u'output', format=u'python', include_config=False):
formats = [u'python'] if (format not in formats): raise ValueError((u'format must be one of: %s' % u'|'.join(formats))) flatgraph = self._create_flat_graph() nodes = nx.topological_sort(flatgraph) lines = [u'# Workflow'] importlines = [u'from nipype.pipeline.engine import Workflow, Node, MapNode'] functions = {} if (format == u'python'): connect_template = (u'%s.connect(%%s, %%s, %%s, "%%s")' % self.name) connect_template2 = (u'%s.connect(%%s, "%%s", %%s, "%%s")' % self.name) wfdef = (u'%s = Workflow("%s")' % (self.name, self.name)) lines.append(wfdef) if include_config: lines.append((u'%s.config = %s' % (self.name, self.config))) for (idx, node) in enumerate(nodes): nodename = node.fullname.replace(u'.', u'_') nodelines = format_node(node, format=u'python', include_config=include_config) for line in nodelines: if line.startswith(u'from'): if (line not in importlines): importlines.append(line) else: lines.append(line) for (u, _, d) in flatgraph.in_edges_iter(nbunch=node, data=True): for cd in d[u'connect']: if isinstance(cd[0], tuple): args = list(cd[0]) if (args[1] in functions): funcname = functions[args[1]] else: func = create_function_from_source(args[1]) funcname = [name for name in func.__globals__ if (name != u'__builtins__')][0] functions[args[1]] = funcname args[1] = funcname args = tuple([arg for arg in args if arg]) line_args = (u.fullname.replace(u'.', u'_'), args, nodename, cd[1]) line = (connect_template % line_args) line = line.replace((u"'%s'" % funcname), funcname) lines.append(line) else: line_args = (u.fullname.replace(u'.', u'_'), cd[0], nodename, cd[1]) lines.append((connect_template2 % line_args)) functionlines = [u'# Functions'] for function in functions: functionlines.append(pickle.loads(function).rstrip()) all_lines = ((importlines + functionlines) + lines) if (not filename): filename = (u'%s%s.py' % (prefix, self.name)) with open(filename, u'wt') as fp: fp.writelines(u'\n'.join(all_lines)) return all_lines
'Execute the workflow Parameters plugin: plugin name or object Plugin to use for execution. You can create your own plugins for execution. plugin_args : dictionary containing arguments to be sent to plugin constructor. see individual plugin doc strings for details.'
def run(self, plugin=None, plugin_args=None, updatehash=False):
if (plugin is None): plugin = config.get(u'execution', u'plugin') if (not isinstance(plugin, (str, bytes))): runner = plugin else: name = u'.'.join((__name__.split(u'.')[:(-2)] + [u'plugins'])) try: __import__(name) except ImportError: msg = (u'Could not import plugin module: %s' % name) logger.error(msg) raise ImportError(msg) else: plugin_mod = getattr(sys.modules[name], (u'%sPlugin' % plugin)) runner = plugin_mod(plugin_args=plugin_args) flatgraph = self._create_flat_graph() self.config = merge_dict(deepcopy(config._sections), self.config) if (u'crashdump_dir' in self.config): warn(u"Deprecated: workflow.config['crashdump_dir']\nPlease use config['execution']['crashdump_dir']") crash_dir = self.config[u'crashdump_dir'] self.config[u'execution'][u'crashdump_dir'] = crash_dir del self.config[u'crashdump_dir'] logger.info(u'Workflow %s settings: %s', self.name, to_str(sorted(self.config))) self._set_needed_outputs(flatgraph) execgraph = generate_expanded_graph(deepcopy(flatgraph)) for (index, node) in enumerate(execgraph.nodes()): node.config = merge_dict(deepcopy(self.config), node.config) node.base_dir = self.base_dir node.index = index if isinstance(node, MapNode): node.use_plugin = (plugin, plugin_args) self._configure_exec_nodes(execgraph) if str2bool(self.config[u'execution'][u'create_report']): self._write_report_info(self.base_dir, self.name, execgraph) runner.run(execgraph, updatehash=updatehash, config=self.config) datestr = datetime.utcnow().strftime(u'%Y%m%dT%H%M%S') if str2bool(self.config[u'execution'][u'write_provenance']): prov_base = op.join(self.base_dir, (u'workflow_provenance_%s' % datestr)) logger.info((u'Provenance file prefix: %s' % prov_base)) write_workflow_prov(execgraph, prov_base, format=u'all') return execgraph
'Initialize node with list of which outputs are needed.'
def _set_needed_outputs(self, graph):
rm_outputs = self.config[u'execution'][u'remove_unnecessary_outputs'] if (not str2bool(rm_outputs)): return for node in graph.nodes(): node.needed_outputs = [] for edge in graph.out_edges_iter(node): data = graph.get_edge_data(*edge) sourceinfo = [(v1[0] if isinstance(v1, tuple) else v1) for (v1, v2) in data[u'connect']] node.needed_outputs += [v for v in sourceinfo if (v not in node.needed_outputs)] if node.needed_outputs: node.needed_outputs = sorted(node.needed_outputs)
'Ensure that each node knows where to get inputs from'
def _configure_exec_nodes(self, graph):
for node in graph.nodes(): node.input_source = {} for edge in graph.in_edges_iter(node): data = graph.get_edge_data(*edge) for (sourceinfo, field) in data[u'connect']: node.input_source[field] = (op.join(edge[0].output_dir(), (u'result_%s.pklz' % edge[0].name)), sourceinfo)
'Checks if any of the nodes are already in the graph'
def _check_nodes(self, nodes):
node_names = [node.name for node in self._graph.nodes()] node_lineage = [node._hierarchy for node in self._graph.nodes()] for node in nodes: if (node.name in node_names): idx = node_names.index(node.name) if (node_lineage[idx] in [node._hierarchy, self.name]): raise IOError((u'Duplicate node name %s found.' % node.name)) else: node_names.append(node.name)
'Checks if a parameter is available as an input or output'
def _has_attr(self, parameter, subtype=u'in'):
if (subtype == u'in'): subobject = self.inputs else: subobject = self.outputs attrlist = parameter.split(u'.') cur_out = subobject for attr in attrlist: if (not hasattr(cur_out, attr)): return False cur_out = getattr(cur_out, attr) return True
'Returns the underlying node corresponding to an input or output parameter'
def _get_parameter_node(self, parameter, subtype=u'in'):
if (subtype == u'in'): subobject = self.inputs else: subobject = self.outputs attrlist = parameter.split(u'.') cur_out = subobject for attr in attrlist[:(-1)]: cur_out = getattr(cur_out, attr) return cur_out.traits()[attrlist[(-1)]].node
'Returns the inputs of a workflow This function does not return any input ports that are already connected'
def _get_inputs(self):
inputdict = TraitedSpec() for node in self._graph.nodes(): inputdict.add_trait(node.name, traits.Instance(TraitedSpec)) if isinstance(node, Workflow): setattr(inputdict, node.name, node.inputs) else: taken_inputs = [] for (_, _, d) in self._graph.in_edges_iter(nbunch=node, data=True): for cd in d[u'connect']: taken_inputs.append(cd[1]) unconnectedinputs = TraitedSpec() for (key, trait) in list(node.inputs.items()): if (key not in taken_inputs): unconnectedinputs.add_trait(key, traits.Trait(trait, node=node)) value = getattr(node.inputs, key) setattr(unconnectedinputs, key, value) setattr(inputdict, node.name, unconnectedinputs) getattr(inputdict, node.name).on_trait_change(self._set_input) return inputdict
'Returns all possible output ports that are not already connected'
def _get_outputs(self):
outputdict = TraitedSpec() for node in self._graph.nodes(): outputdict.add_trait(node.name, traits.Instance(TraitedSpec)) if isinstance(node, Workflow): setattr(outputdict, node.name, node.outputs) elif node.outputs: outputs = TraitedSpec() for (key, _) in list(node.outputs.items()): outputs.add_trait(key, traits.Any(node=node)) setattr(outputs, key, None) setattr(outputdict, node.name, outputs) return outputdict
'Trait callback function to update a node input'
def _set_input(self, object, name, newvalue):
object.traits()[name].node.set_input(name, newvalue)
'Set inputs of a node given the edge connection'
def _set_node_input(self, node, param, source, sourceinfo):
if isinstance(sourceinfo, (str, bytes)): val = source.get_output(sourceinfo) elif isinstance(sourceinfo, tuple): if callable(sourceinfo[1]): val = sourceinfo[1](source.get_output(sourceinfo[0]), *sourceinfo[2:]) newval = val if isinstance(val, TraitDictObject): newval = dict(val) if isinstance(val, TraitListObject): newval = val[:] logger.debug(u'setting node input: %s->%s', param, to_str(newval)) node.set_input(param, deepcopy(newval))
'Make a simple DAG where no node is a workflow.'
def _create_flat_graph(self):
logger.debug(u'Creating flat graph for workflow: %s', self.name) workflowcopy = deepcopy(self) workflowcopy._generate_flatgraph() return workflowcopy._graph
'Reset the hierarchy on a graph'
def _reset_hierarchy(self):
for node in self._graph.nodes(): if isinstance(node, Workflow): node._reset_hierarchy() for innernode in node._graph.nodes(): innernode._hierarchy = u'.'.join((self.name, innernode._hierarchy)) else: node._hierarchy = self.name
'Generate a graph containing only Nodes or MapNodes'
def _generate_flatgraph(self):
logger.debug(u'expanding workflow: %s', self) nodes2remove = [] if (not nx.is_directed_acyclic_graph(self._graph)): raise Exception((u'Workflow: %s is not a directed acyclic graph (DAG)' % self.name)) nodes = nx.topological_sort(self._graph) for node in nodes: logger.debug(u'processing node: %s', node) if isinstance(node, Workflow): nodes2remove.append(node) for (u, _, d) in self._graph.in_edges(nbunch=node, data=True): logger.debug(u'in: connections-> %s', to_str(d[u'connect'])) for cd in deepcopy(d[u'connect']): logger.debug(u'in: %s', to_str(cd)) dstnode = node._get_parameter_node(cd[1], subtype=u'in') srcnode = u srcout = cd[0] dstin = cd[1].split(u'.')[(-1)] logger.debug(u'in edges: %s %s %s %s', srcnode, srcout, dstnode, dstin) self.disconnect(u, cd[0], node, cd[1]) self.connect(srcnode, srcout, dstnode, dstin) for (_, v, d) in self._graph.out_edges(nbunch=node, data=True): logger.debug(u'out: connections-> %s', to_str(d[u'connect'])) for cd in deepcopy(d[u'connect']): logger.debug(u'out: %s', to_str(cd)) dstnode = v if isinstance(cd[0], tuple): parameter = cd[0][0] else: parameter = cd[0] srcnode = node._get_parameter_node(parameter, subtype=u'out') if isinstance(cd[0], tuple): srcout = list(cd[0]) srcout[0] = parameter.split(u'.')[(-1)] srcout = tuple(srcout) else: srcout = parameter.split(u'.')[(-1)] dstin = cd[1] logger.debug(u'out edges: %s %s %s %s', srcnode, srcout, dstnode, dstin) self.disconnect(node, cd[0], v, cd[1]) self.connect(srcnode, srcout, dstnode, dstin) node._generate_flatgraph() for innernode in node._graph.nodes(): innernode._hierarchy = u'.'.join((self.name, innernode._hierarchy)) self._graph.add_nodes_from(node._graph.nodes()) self._graph.add_edges_from(node._graph.edges(data=True)) if nodes2remove: self._graph.remove_nodes_from(nodes2remove) logger.debug(u'finished expanding workflow: %s', self)
'Create a dot file with connection info'
def _get_dot(self, prefix=None, hierarchy=None, colored=False, simple_form=True, level=0):
if (prefix is None): prefix = u' ' if (hierarchy is None): hierarchy = [] colorset = [u'#FFFFC8', u'#0000FF', u'#B4B4FF', u'#E6E6FF', u'#FF0000', u'#FFB4B4', u'#FFE6E6', u'#00A300', u'#B4FFB4', u'#E6FFE6', u'#0000FF', u'#B4B4FF'] if (level > (len(colorset) - 2)): level = 3 dotlist = [(u'%slabel="%s";' % (prefix, self.name))] for node in nx.topological_sort(self._graph): fullname = u'.'.join((hierarchy + [node.fullname])) nodename = fullname.replace(u'.', u'_') if (not isinstance(node, Workflow)): node_class_name = get_print_name(node, simple_form=simple_form) if (not simple_form): node_class_name = u'.'.join(node_class_name.split(u'.')[1:]) if (hasattr(node, u'iterables') and node.iterables): dotlist.append((u'%s[label="%s", shape=box3d,style=filled, color=black, colorscheme=greys7 fillcolor=2];' % (nodename, node_class_name))) elif colored: dotlist.append((u'%s[label="%s", style=filled, fillcolor="%s"];' % (nodename, node_class_name, colorset[level]))) else: dotlist.append((u'%s[label="%s"];' % (nodename, node_class_name))) for node in nx.topological_sort(self._graph): if isinstance(node, Workflow): fullname = u'.'.join((hierarchy + [node.fullname])) nodename = fullname.replace(u'.', u'_') dotlist.append((u'subgraph cluster_%s {' % nodename)) if colored: dotlist.append(((prefix + prefix) + (u'edge [color="%s"];' % colorset[(level + 1)]))) dotlist.append(((prefix + prefix) + u'style=filled;')) dotlist.append(((prefix + prefix) + (u'fillcolor="%s";' % colorset[(level + 2)]))) dotlist.append(node._get_dot(prefix=(prefix + prefix), hierarchy=(hierarchy + [self.name]), colored=colored, simple_form=simple_form, level=(level + 3))) dotlist.append(u'}') else: for subnode in self._graph.successors_iter(node): if (node._hierarchy != subnode._hierarchy): continue if (not isinstance(subnode, Workflow)): nodefullname = u'.'.join((hierarchy + [node.fullname])) subnodefullname = u'.'.join((hierarchy + [subnode.fullname])) nodename = nodefullname.replace(u'.', u'_') subnodename = subnodefullname.replace(u'.', u'_') for _ in self._graph.get_edge_data(node, subnode)[u'connect']: dotlist.append((u'%s -> %s;' % (nodename, subnodename))) logger.debug(u'connection: %s', dotlist[(-1)]) for (u, v, d) in self._graph.edges_iter(data=True): uname = u'.'.join((hierarchy + [u.fullname])) vname = u'.'.join((hierarchy + [v.fullname])) for (src, dest) in d[u'connect']: uname1 = uname vname1 = vname if isinstance(src, tuple): srcname = src[0] else: srcname = src if (u'.' in srcname): uname1 += (u'.' + u'.'.join(srcname.split(u'.')[:(-1)])) if ((u'.' in dest) and (u'@' not in dest)): if (not isinstance(v, Workflow)): if (u'datasink' not in str(v._interface.__class__).lower()): vname1 += (u'.' + u'.'.join(dest.split(u'.')[:(-1)])) else: vname1 += (u'.' + u'.'.join(dest.split(u'.')[:(-1)])) if (uname1.split(u'.')[:(-1)] != vname1.split(u'.')[:(-1)]): dotlist.append((u'%s -> %s;' % (uname1.replace(u'.', u'_'), vname1.replace(u'.', u'_')))) logger.debug(u'cross connection: %s', dotlist[(-1)]) return (u'\n' + prefix).join(dotlist)
'Open the specified file and use it as the stream for logging. By default, the file grows indefinitely. You can specify particular values of maxBytes and backupCount to allow the file to rollover at a predetermined size. Rollover occurs whenever the current log file is nearly maxBytes in length. If backupCount is >= 1, the system will successively create new files with the same pathname as the base file, but with extensions ".1", ".2" etc. appended to it. For example, with a backupCount of 5 and a base file name of "app.log", you would get "app.log", "app.log.1", "app.log.2", ... through to "app.log.5". The file being written to is always "app.log" - when it gets filled up, it is closed and renamed to "app.log.1", and if files "app.log.1", "app.log.2" etc. exist, then they are renamed to "app.log.2", "app.log.3" etc. respectively. If maxBytes is zero, rollover never occurs. On Windows, it is not possible to rename a file that is currently opened by another process. This means that it is not possible to rotate the log files if multiple processes is using the same log file. In this case, the current log file will continue to grow until the rotation can be completed successfully. In order for rotation to be possible, all of the other processes need to close the file first. A mechanism, called "degraded" mode, has been created for this scenario. In degraded mode, the log file is closed after each log message is written. So once all processes have entered degraded mode, the next rotate log attempt should be successful and then normal logging can be resumed. This log handler assumes that all concurrent processes logging to a single file will are using only this class, and that the exact same parameters are provided to each instance of this class. If, for example, two different processes are using this class, but with different values for \'maxBytes\' or \'backupCount\', then odd behavior is expected. The same is true if this class is used by one application, but the RotatingFileHandler is used by another. NOTE: You should always provide \'filename\' as an absolute path, since this class will need to re-open the file during rotation. If your application call os.chdir() then subsequent log files could be created in the wrong directory.'
def __init__(self, filename, mode='a', maxBytes=0, backupCount=0, encoding=None, debug=True, supress_abs_warn=False):
if (not os.path.isabs(filename)): if (FORCE_ABSOLUTE_PATH or (not os.path.split(filename)[0])): filename = os.path.abspath(filename) elif (not supress_abs_warn): from warnings import warn warn("The given 'filename' should be an absolute path. If your application calls os.chdir(), your logs may get messed up. Use 'supress_abs_warn=True' to hide this message.") try: BaseRotatingHandler.__init__(self, filename, mode, encoding) except TypeError: BaseRotatingHandler.__init__(self, filename, mode) self.encoding = encoding self._rotateFailed = False self.maxBytes = maxBytes self.backupCount = backupCount if filename.endswith('.log'): lock_file = filename[:(-4)] else: lock_file = filename self.stream_lock = open((lock_file + '.lock'), 'w') if debug: self._degrade = self._degrade_debug
'Acquire thread and file locks. Also re-opening log file when running in \'degraded\' mode.'
def acquire(self):
Handler.acquire(self) lock(self.stream_lock, LOCK_EX) if self.stream.closed: self._openFile(self.mode)
'Release file and thread locks. Flush stream and take care of closing stream in \'degraded\' mode.'
def release(self):
try: if (not self.stream.closed): self.stream.flush() if self._rotateFailed: self.stream.close() except IOError: if self._rotateFailed: self.stream.close() finally: try: unlock(self.stream_lock) finally: Handler.release(self)
'Closes the stream.'
def close(self):
if (not self.stream.closed): self.stream.flush() self.stream.close() Handler.close(self)
'flush(): Do nothing. Since a flush is issued in release(), we don\'t do it here. To do a flush here, it would be necessary to re-lock everything, and it is just easier and cleaner to do it all in release(), rather than requiring two lock ops per handle() call. Doing a flush() here would also introduces a window of opportunity for another process to write to the log file in between calling stream.write() and stream.flush(), which seems like a bad thing.'
def flush(self):
pass
'Set degrade mode or not. Ignore msg.'
def _degrade(self, degrade, msg, *args):
self._rotateFailed = degrade del msg, args
'A more colorful version of _degade(). (This is enabled by passing "debug=True" at initialization).'
def _degrade_debug(self, degrade, msg, *args):
if degrade: if (not self._rotateFailed): sys.stderr.write(('Degrade mode - ENTERING - (pid=%d) %s\n' % (os.getpid(), (msg % args)))) self._rotateFailed = True elif self._rotateFailed: sys.stderr.write(('Degrade mode - EXITING - (pid=%d) %s\n' % (os.getpid(), (msg % args)))) self._rotateFailed = False
'Do a rollover, as described in __init__().'
def doRollover(self):
if (self.backupCount <= 0): self.stream.close() self._openFile('w') return self.stream.close() try: tmpname = None while ((not tmpname) or os.path.exists(tmpname)): tmpname = ('%s.rotate.%08d' % (self.baseFilename, randint(0, 99999999))) try: os.rename(self.baseFilename, tmpname) except (IOError, OSError): exc_value = sys.exc_info()[1] self._degrade(True, 'rename failed. File in use? exception=%s', exc_value) return for i in range((self.backupCount - 1), 0, (-1)): sfn = ('%s.%d' % (self.baseFilename, i)) dfn = ('%s.%d' % (self.baseFilename, (i + 1))) if os.path.exists(sfn): if os.path.exists(dfn): os.remove(dfn) os.rename(sfn, dfn) dfn = (self.baseFilename + '.1') if os.path.exists(dfn): os.remove(dfn) os.rename(tmpname, dfn) self._degrade(False, 'Rotation completed') finally: self._openFile(self.mode)
'Determine if rollover should occur. For those that are keeping track. This differs from the standard library\'s RotatingLogHandler class. Because there is no promise to keep the file size under maxBytes we ignore the length of the current record.'
def shouldRollover(self, record):
del record if self._shouldRollover(): self.stream.close() self._openFile(self.mode) return self._shouldRollover() return False
'Perform no good and no bad'
def _donothing(self, *args, **kwargs):
pass
'If I could cite I would'
def dcite(self, *args, **kwargs):
def nondecorating_decorator(func): return func return nondecorating_decorator
'Temporary filesystem for testing non-POSIX filesystems on a POSIX system. with TempFATFS() as fatdir: target = os.path.join(fatdir, \'target\') copyfile(file1, target, copy=False) assert not os.path.islink(target) Arguments size_in_mbytes : int Size (in MiB) of filesystem to create delay : float Time (in seconds) to wait for fusefat to start, stop'
def __init__(self, size_in_mbytes=8, delay=0.5):
self.delay = delay self.tmpdir = mkdtemp() self.dev_null = open(os.devnull, u'wb') vfatfile = os.path.join(self.tmpdir, u'vfatblock') self.vfatmount = os.path.join(self.tmpdir, u'vfatmount') self.canary = os.path.join(self.vfatmount, u'.canary') with open(vfatfile, u'wb') as fobj: fobj.write(('\x00' * (int(size_in_mbytes) << 20))) os.mkdir(self.vfatmount) mkfs_args = [u'mkfs.vfat', vfatfile] mount_args = [u'fusefat', u'-o', u'rw+', u'-f', vfatfile, self.vfatmount] try: subprocess.check_call(args=mkfs_args, stdout=self.dev_null, stderr=self.dev_null) except CalledProcessError as e: raise_from(IOError(u'mkfs.vfat failed'), e) try: self.fusefat = subprocess.Popen(args=mount_args, stdout=self.dev_null, stderr=self.dev_null) except OSError as e: raise_from(IOError(u'fusefat is not installed'), e) time.sleep(self.delay) if (self.fusefat.poll() is not None): raise IOError(u'fusefat terminated too soon') open(self.canary, u'wb').close()
'test that usage of misc.TSNR trips a warning to use confounds.TSNR instead'
@mock.patch('warnings.warn') def test_warning(self, mock_warn):
misc.TSNR(in_file=self.in_filenames['in_file']) assert (True in [(args[0].count('confounds') > 0) for (_, args, _) in mock_warn.mock_calls])
'exactly the same as compcor except the header'
def __init__(self, *args, **kwargs):
super(CompCor, self).__init__(*args, **kwargs) self._header = u'CompCor'
'exactly the same as compcor except the header'
def __init__(self, *args, **kwargs):
super(ACompCor, self).__init__(*args, **kwargs) self._header = u'aCompCor'
'exactly the same as compcor except the header'
def __init__(self, *args, **kwargs):
super(TCompCor, self).__init__(*args, **kwargs) self._header = u'tCompCor' self._mask_files = []
'Generates a standard design matrix paradigm given information about each run'
def _generate_standard_design(self, infolist, functional_runs=None, realignment_parameters=None, outliers=None):
sessinfo = [] output_units = u'secs' if (u'output_units' in self.inputs.traits()): output_units = self.inputs.output_units for (i, info) in enumerate(infolist): sessinfo.insert(i, dict(cond=[])) if isdefined(self.inputs.high_pass_filter_cutoff): sessinfo[i][u'hpf'] = np.float(self.inputs.high_pass_filter_cutoff) if (hasattr(info, u'conditions') and (info.conditions is not None)): for (cid, cond) in enumerate(info.conditions): sessinfo[i][u'cond'].insert(cid, dict()) sessinfo[i][u'cond'][cid][u'name'] = info.conditions[cid] scaled_onset = scale_timings(info.onsets[cid], self.inputs.input_units, output_units, self.inputs.time_repetition) sessinfo[i][u'cond'][cid][u'onset'] = scaled_onset scaled_duration = scale_timings(info.durations[cid], self.inputs.input_units, output_units, self.inputs.time_repetition) sessinfo[i][u'cond'][cid][u'duration'] = scaled_duration if (hasattr(info, u'amplitudes') and info.amplitudes): sessinfo[i][u'cond'][cid][u'amplitudes'] = info.amplitudes[cid] if (hasattr(info, u'tmod') and info.tmod and (len(info.tmod) > cid)): sessinfo[i][u'cond'][cid][u'tmod'] = info.tmod[cid] if (hasattr(info, u'pmod') and info.pmod and (len(info.pmod) > cid)): if info.pmod[cid]: sessinfo[i][u'cond'][cid][u'pmod'] = [] for (j, name) in enumerate(info.pmod[cid].name): sessinfo[i][u'cond'][cid][u'pmod'].insert(j, {}) sessinfo[i][u'cond'][cid][u'pmod'][j][u'name'] = name sessinfo[i][u'cond'][cid][u'pmod'][j][u'poly'] = info.pmod[cid].poly[j] sessinfo[i][u'cond'][cid][u'pmod'][j][u'param'] = info.pmod[cid].param[j] sessinfo[i][u'regress'] = [] if (hasattr(info, u'regressors') and (info.regressors is not None)): for (j, r) in enumerate(info.regressors): sessinfo[i][u'regress'].insert(j, dict(name=u'', val=[])) if (hasattr(info, u'regressor_names') and (info.regressor_names is not None)): sessinfo[i][u'regress'][j][u'name'] = info.regressor_names[j] else: sessinfo[i][u'regress'][j][u'name'] = (u'UR%d' % (j + 1)) sessinfo[i][u'regress'][j][u'val'] = info.regressors[j] sessinfo[i][u'scans'] = functional_runs[i] if (realignment_parameters is not None): for (i, rp) in enumerate(realignment_parameters): mc = realignment_parameters[i] for col in range(mc.shape[1]): colidx = len(sessinfo[i][u'regress']) sessinfo[i][u'regress'].insert(colidx, dict(name=u'', val=[])) sessinfo[i][u'regress'][colidx][u'name'] = (u'Realign%d' % (col + 1)) sessinfo[i][u'regress'][colidx][u'val'] = mc[:, col].tolist() if (outliers is not None): for (i, out) in enumerate(outliers): numscans = 0 for f in filename_to_list(sessinfo[i][u'scans']): shape = load(f, mmap=NUMPY_MMAP).shape if ((len(shape) == 3) or (shape[3] == 1)): iflogger.warning(u'You are using 3D instead of 4D files. Are you sure this was intended?') numscans += 1 else: numscans += shape[3] for (j, scanno) in enumerate(out): colidx = len(sessinfo[i][u'regress']) sessinfo[i][u'regress'].insert(colidx, dict(name=u'', val=[])) sessinfo[i][u'regress'][colidx][u'name'] = (u'Outlier%d' % (j + 1)) sessinfo[i][u'regress'][colidx][u'val'] = np.zeros((1, numscans))[0].tolist() sessinfo[i][u'regress'][colidx][u'val'][int(scanno)] = 1 return sessinfo
'Generate design specification for a typical fmri paradigm'
def _generate_design(self, infolist=None):
realignment_parameters = [] if isdefined(self.inputs.realignment_parameters): for parfile in self.inputs.realignment_parameters: realignment_parameters.append(np.apply_along_axis(func1d=normalize_mc_params, axis=1, arr=np.loadtxt(parfile), source=self.inputs.parameter_source)) outliers = [] if isdefined(self.inputs.outlier_files): for filename in self.inputs.outlier_files: try: outindices = np.loadtxt(filename, dtype=int) except IOError: outliers.append([]) else: if (outindices.size == 1): outliers.append([outindices.tolist()]) else: outliers.append(outindices.tolist()) if (infolist is None): if isdefined(self.inputs.subject_info): infolist = self.inputs.subject_info else: infolist = gen_info(self.inputs.event_files) self._sessinfo = self._generate_standard_design(infolist, functional_runs=self.inputs.functional_runs, realignment_parameters=realignment_parameters, outliers=outliers)
''
def _run_interface(self, runtime):
self._sessioninfo = None self._generate_design() return runtime
'Generates a regressor for a sparse/clustered-sparse acquisition'
def _gen_regress(self, i_onsets, i_durations, i_amplitudes, nscans):
bplot = False if (isdefined(self.inputs.save_plot) and self.inputs.save_plot): bplot = True import matplotlib matplotlib.use(config.get(u'execution', u'matplotlib_backend')) import matplotlib.pyplot as plt TR = np.round((self.inputs.time_repetition * 1000)) if self.inputs.time_acquisition: TA = np.round((self.inputs.time_acquisition * 1000)) else: TA = TR nvol = self.inputs.volumes_in_cluster SCANONSET = np.round((self.inputs.scan_onset * 1000)) total_time = ((((TR * (nscans - nvol)) / nvol) + (TA * nvol)) + SCANONSET) SILENCE = (TR - (TA * nvol)) dt = (TA / 10.0) durations = np.round((np.array(i_durations) * 1000)) if (len(durations) == 1): durations = (durations * np.ones(len(i_onsets))) onsets = np.round((np.array(i_onsets) * 1000)) dttemp = gcd(TA, gcd(SILENCE, TR)) if (dt < dttemp): if ((dttemp % dt) != 0): dt = float(gcd(dttemp, dt)) if (dt < 1): raise Exception(u'Time multiple less than 1 ms') iflogger.info((u'Setting dt = %d ms\n' % dt)) npts = int(np.ceil((total_time / dt))) times = (np.arange(0, total_time, dt) * 0.001) timeline = np.zeros(npts) timeline2 = np.zeros(npts) if (isdefined(self.inputs.model_hrf) and self.inputs.model_hrf): hrf = spm_hrf((dt * 0.001)) reg_scale = 1.0 if self.inputs.scale_regressors: boxcar = np.zeros(int(((50.0 * 1000.0) / dt))) if self.inputs.stimuli_as_impulses: boxcar[int(((1.0 * 1000.0) / dt))] = 1.0 reg_scale = float((TA / dt)) else: boxcar[int(((1.0 * 1000.0) / dt)):int(((2.0 * 1000.0) / dt))] = 1.0 if (isdefined(self.inputs.model_hrf) and self.inputs.model_hrf): response = np.convolve(boxcar, hrf) reg_scale = (1.0 / response.max()) iflogger.info((u'response sum: %.4f max: %.4f' % (response.sum(), response.max()))) iflogger.info((u'reg_scale: %.4f' % reg_scale)) for (i, t) in enumerate(onsets): idx = int(np.round((t / dt))) if i_amplitudes: if (len(i_amplitudes) > 1): timeline2[idx] = i_amplitudes[i] else: timeline2[idx] = i_amplitudes[0] else: timeline2[idx] = 1 if bplot: plt.subplot(4, 1, 1) plt.plot(times, timeline2) if (not self.inputs.stimuli_as_impulses): if (durations[i] == 0): durations[i] = (TA * nvol) stimdur = np.ones(int((durations[i] / dt))) timeline2 = np.convolve(timeline2, stimdur)[0:len(timeline2)] timeline += timeline2 timeline2[:] = 0 if bplot: plt.subplot(4, 1, 2) plt.plot(times, timeline) if (isdefined(self.inputs.model_hrf) and self.inputs.model_hrf): timeline = np.convolve(timeline, hrf)[0:len(timeline)] if (isdefined(self.inputs.use_temporal_deriv) and self.inputs.use_temporal_deriv): timederiv = np.concatenate(([0], np.diff(timeline))) if bplot: plt.subplot(4, 1, 3) plt.plot(times, timeline) if (isdefined(self.inputs.use_temporal_deriv) and self.inputs.use_temporal_deriv): plt.plot(times, timederiv) timeline2 = np.zeros(npts) reg = [] regderiv = [] for (i, trial) in enumerate((np.arange(nscans) / nvol)): scanstart = int((((SCANONSET + (trial * TR)) + ((i % nvol) * TA)) / dt)) scanidx = (scanstart + np.arange(int((TA / dt)))) timeline2[scanidx] = np.max(timeline) reg.insert(i, (np.mean(timeline[scanidx]) * reg_scale)) if (isdefined(self.inputs.use_temporal_deriv) and self.inputs.use_temporal_deriv): regderiv.insert(i, (np.mean(timederiv[scanidx]) * reg_scale)) if (isdefined(self.inputs.use_temporal_deriv) and self.inputs.use_temporal_deriv): iflogger.info(u'orthoganlizing derivative w.r.t. main regressor') regderiv = orth(reg, regderiv) if bplot: plt.subplot(4, 1, 3) plt.plot(times, timeline2) plt.subplot(4, 1, 4) plt.bar(np.arange(len(reg)), reg, width=0.5) plt.savefig(u'sparse.png') plt.savefig(u'sparse.svg') if regderiv: return [reg, regderiv] else: return reg
'Converts condition information to full regressors'
def _cond_to_regress(self, info, nscans):
reg = [] regnames = [] for (i, cond) in enumerate(info.conditions): if (hasattr(info, u'amplitudes') and info.amplitudes): amplitudes = info.amplitudes[i] else: amplitudes = None regnames.insert(len(regnames), cond) scaled_onsets = scale_timings(info.onsets[i], self.inputs.input_units, u'secs', self.inputs.time_repetition) scaled_durations = scale_timings(info.durations[i], self.inputs.input_units, u'secs', self.inputs.time_repetition) regressor = self._gen_regress(scaled_onsets, scaled_durations, amplitudes, nscans) if (isdefined(self.inputs.use_temporal_deriv) and self.inputs.use_temporal_deriv): reg.insert(len(reg), regressor[0]) regnames.insert(len(regnames), (cond + u'_D')) reg.insert(len(reg), regressor[1]) else: reg.insert(len(reg), regressor) nvol = self.inputs.volumes_in_cluster if (nvol > 1): for i in range((nvol - 1)): treg = np.zeros(((nscans / nvol), nvol)) treg[:, i] = 1 reg.insert(len(reg), treg.ravel().tolist()) regnames.insert(len(regnames), (u'T1effect_%d' % i)) return (reg, regnames)
'Generates condition information for sparse-clustered designs.'
def _generate_clustered_design(self, infolist):
infoout = deepcopy(infolist) for (i, info) in enumerate(infolist): infoout[i].conditions = None infoout[i].onsets = None infoout[i].durations = None if info.conditions: img = load(self.inputs.functional_runs[i], mmap=NUMPY_MMAP) nscans = img.shape[3] (reg, regnames) = self._cond_to_regress(info, nscans) if (hasattr(infoout[i], u'regressors') and infoout[i].regressors): if (not infoout[i].regressor_names): infoout[i].regressor_names = [(u'R%d' % j) for j in range(len(infoout[i].regressors))] else: infoout[i].regressors = [] infoout[i].regressor_names = [] for (j, r) in enumerate(reg): regidx = len(infoout[i].regressors) infoout[i].regressor_names.insert(regidx, regnames[j]) infoout[i].regressors.insert(regidx, r) return infoout
'Generate output files based on motion filenames Parameters motionfile: file/string Filename for motion parameter file output_dir: string output directory in which the files will be generated'
def _get_output_filenames(self, motionfile, output_dir):
if isinstance(motionfile, (str, bytes)): infile = motionfile elif isinstance(motionfile, list): infile = motionfile[0] else: raise Exception(u'Unknown type of file') (_, filename, ext) = split_filename(infile) artifactfile = os.path.join(output_dir, u''.join((u'art.', filename, u'_outliers.txt'))) intensityfile = os.path.join(output_dir, u''.join((u'global_intensity.', filename, u'.txt'))) statsfile = os.path.join(output_dir, u''.join((u'stats.', filename, u'.txt'))) normfile = os.path.join(output_dir, u''.join((u'norm.', filename, u'.txt'))) plotfile = os.path.join(output_dir, u''.join((u'plot.', filename, u'.', self.inputs.plot_type))) displacementfile = os.path.join(output_dir, u''.join((u'disp.', filename, ext))) maskfile = os.path.join(output_dir, u''.join((u'mask.', filename, ext))) return (artifactfile, intensityfile, statsfile, normfile, plotfile, displacementfile, maskfile)
'Core routine for detecting outliers'
def _detect_outliers_core(self, imgfile, motionfile, runidx, cwd=None):
if (not cwd): cwd = os.getcwd() if isinstance(imgfile, (str, bytes)): nim = load(imgfile, mmap=NUMPY_MMAP) elif isinstance(imgfile, list): if (len(imgfile) == 1): nim = load(imgfile[0], mmap=NUMPY_MMAP) else: images = [load(f, mmap=NUMPY_MMAP) for f in imgfile] nim = funcs.concat_images(images) (x, y, z, timepoints) = nim.shape data = nim.get_data() affine = nim.affine g = np.zeros((timepoints, 1)) masktype = self.inputs.mask_type if (masktype == u'spm_global'): iflogger.debug(u'art: using spm global') intersect_mask = self.inputs.intersect_mask if intersect_mask: mask = np.ones((x, y, z), dtype=bool) for t0 in range(timepoints): vol = data[:, :, :, t0] mask_tmp = (vol > (_nanmean(vol) / self.inputs.global_threshold)) mask = (mask * mask_tmp) for t0 in range(timepoints): vol = data[:, :, :, t0] g[t0] = _nanmean(vol[mask]) if (len(find_indices(mask)) < (np.prod((x, y, z)) / 10)): intersect_mask = False g = np.zeros((timepoints, 1)) if (not intersect_mask): iflogger.info(u'not intersect_mask is True') mask = np.zeros((x, y, z, timepoints)) for t0 in range(timepoints): vol = data[:, :, :, t0] mask_tmp = (vol > (_nanmean(vol) / self.inputs.global_threshold)) mask[:, :, :, t0] = mask_tmp g[t0] = (np.nansum((vol * mask_tmp)) / np.nansum(mask_tmp)) elif (masktype == u'file'): maskimg = load(self.inputs.mask_file, mmap=NUMPY_MMAP) mask = maskimg.get_data() affine = maskimg.affine mask = (mask > 0.5) for t0 in range(timepoints): vol = data[:, :, :, t0] g[t0] = _nanmean(vol[mask]) elif (masktype == u'thresh'): for t0 in range(timepoints): vol = data[:, :, :, t0] mask = (vol > self.inputs.mask_threshold) g[t0] = _nanmean(vol[mask]) else: mask = np.ones((x, y, z)) g = _nanmean(data[(mask > 0), :], 1) gz = signal.detrend(g, axis=0) if self.inputs.use_differences[1]: gz = np.concatenate((np.zeros((1, 1)), np.diff(gz, n=1, axis=0)), axis=0) gz = ((gz - np.mean(gz)) / np.std(gz)) iidx = find_indices((abs(gz) > self.inputs.zintensity_threshold)) mc_in = np.loadtxt(motionfile) mc = deepcopy(mc_in) (artifactfile, intensityfile, statsfile, normfile, plotfile, displacementfile, maskfile) = self._get_output_filenames(imgfile, cwd) mask_img = Nifti1Image(mask.astype(np.uint8), affine) mask_img.to_filename(maskfile) if self.inputs.use_norm: brain_pts = None if self.inputs.bound_by_brainmask: voxel_coords = np.nonzero(mask) coords = np.vstack((voxel_coords[0], np.vstack((voxel_coords[1], voxel_coords[2])))).T brain_pts = np.dot(affine, np.hstack((coords, np.ones((coords.shape[0], 1)))).T) (normval, displacement) = _calc_norm(mc, self.inputs.use_differences[0], self.inputs.parameter_source, brain_pts=brain_pts) tidx = find_indices((normval > self.inputs.norm_threshold)) ridx = find_indices((normval < 0)) if (displacement is not None): dmap = np.zeros((x, y, z, timepoints), dtype=np.float) for i in range(timepoints): dmap[(voxel_coords[0], voxel_coords[1], voxel_coords[2], i)] = displacement[i, :] dimg = Nifti1Image(dmap, affine) dimg.to_filename(displacementfile) else: if self.inputs.use_differences[0]: mc = np.concatenate((np.zeros((1, 6)), np.diff(mc_in, n=1, axis=0)), axis=0) traval = mc[:, 0:3] rotval = mc[:, 3:6] tidx = find_indices((np.sum((abs(traval) > self.inputs.translation_threshold), 1) > 0)) ridx = find_indices((np.sum((abs(rotval) > self.inputs.rotation_threshold), 1) > 0)) outliers = np.unique(np.union1d(iidx, np.union1d(tidx, ridx))) np.savetxt(artifactfile, outliers, fmt='%d', delimiter=u' ') np.savetxt(intensityfile, g, fmt='%.2f', delimiter=u' ') if self.inputs.use_norm: np.savetxt(normfile, normval, fmt='%.4f', delimiter=u' ') if (isdefined(self.inputs.save_plot) and self.inputs.save_plot): import matplotlib matplotlib.use(config.get(u'execution', u'matplotlib_backend')) import matplotlib.pyplot as plt fig = plt.figure() if (isdefined(self.inputs.use_norm) and self.inputs.use_norm): plt.subplot(211) else: plt.subplot(311) self._plot_outliers_with_wave(gz, iidx, u'Intensity') if (isdefined(self.inputs.use_norm) and self.inputs.use_norm): plt.subplot(212) self._plot_outliers_with_wave(normval, np.union1d(tidx, ridx), u'Norm (mm)') else: diff = u'' if self.inputs.use_differences[0]: diff = u'diff' plt.subplot(312) self._plot_outliers_with_wave(traval, tidx, (u'Translation (mm)' + diff)) plt.subplot(313) self._plot_outliers_with_wave(rotval, ridx, (u'Rotation (rad)' + diff)) plt.savefig(plotfile) plt.close(fig) motion_outliers = np.union1d(tidx, ridx) stats = [{u'motion_file': motionfile, u'functional_file': imgfile}, {u'common_outliers': len(np.intersect1d(iidx, motion_outliers)), u'intensity_outliers': len(np.setdiff1d(iidx, motion_outliers)), u'motion_outliers': len(np.setdiff1d(motion_outliers, iidx))}, {u'motion': [{u'using differences': self.inputs.use_differences[0]}, {u'mean': np.mean(mc_in, axis=0).tolist(), u'min': np.min(mc_in, axis=0).tolist(), u'max': np.max(mc_in, axis=0).tolist(), u'std': np.std(mc_in, axis=0).tolist()}]}, {u'intensity': [{u'using differences': self.inputs.use_differences[1]}, {u'mean': np.mean(gz, axis=0).tolist(), u'min': np.min(gz, axis=0).tolist(), u'max': np.max(gz, axis=0).tolist(), u'std': np.std(gz, axis=0).tolist()}]}] if self.inputs.use_norm: stats.insert(3, {u'motion_norm': {u'mean': np.mean(normval, axis=0).tolist(), u'min': np.min(normval, axis=0).tolist(), u'max': np.max(normval, axis=0).tolist(), u'std': np.std(normval, axis=0).tolist()}}) save_json(statsfile, stats)
'Execute this module.'
def _run_interface(self, runtime):
funcfilelist = filename_to_list(self.inputs.realigned_files) motparamlist = filename_to_list(self.inputs.realignment_parameters) for (i, imgf) in enumerate(funcfilelist): self._detect_outliers_core(imgf, motparamlist[i], i, cwd=os.getcwd()) return runtime
'Generate output files based on motion filenames Parameters motionfile: file/string Filename for motion parameter file output_dir: string output directory in which the files will be generated'
def _get_output_filenames(self, motionfile, output_dir):
(_, filename) = os.path.split(motionfile) (filename, _) = os.path.splitext(filename) corrfile = os.path.join(output_dir, u''.join((u'qa.', filename, u'_stimcorr.txt'))) return corrfile
'Core routine for determining stimulus correlation'
def _stimcorr_core(self, motionfile, intensityfile, designmatrix, cwd=None):
if (not cwd): cwd = os.getcwd() mc_in = np.loadtxt(motionfile) g_in = np.loadtxt(intensityfile) g_in.shape = (g_in.shape[0], 1) dcol = designmatrix.shape[1] mccol = mc_in.shape[1] concat_matrix = np.hstack((np.hstack((designmatrix, mc_in)), g_in)) cm = np.corrcoef(concat_matrix, rowvar=0) corrfile = self._get_output_filenames(motionfile, cwd) file = open(corrfile, u'w') file.write(u'Stats for:\n') file.write((u'Stimulus correlated motion:\n%s\n' % motionfile)) for i in range(dcol): file.write((u'SCM.%d:' % i)) for v in cm[(i, (dcol + np.arange(mccol)))]: file.write((u' %.2f' % v)) file.write(u'\n') file.write((u'Stimulus correlated intensity:\n%s\n' % intensityfile)) for i in range(dcol): file.write((u'SCI.%d: %.2f\n' % (i, cm[(i, (-1))]))) file.close()
'Parameters spmmat: scipy matlab object full SPM.mat file loaded into a scipy object sessidx: int index to session that needs to be extracted.'
def _get_spm_submatrix(self, spmmat, sessidx, rows=None):
designmatrix = spmmat[u'SPM'][0][0].xX[0][0].X U = spmmat[u'SPM'][0][0].Sess[0][sessidx].U[0] if (rows is None): rows = (spmmat[u'SPM'][0][0].Sess[0][sessidx].row[0] - 1) cols = (spmmat[u'SPM'][0][0].Sess[0][sessidx].col[0][list(range(len(U)))] - 1) outmatrix = designmatrix.take(rows.tolist(), axis=0).take(cols.tolist(), axis=1) return outmatrix
'Execute this module.'
def _run_interface(self, runtime):
motparamlist = self.inputs.realignment_parameters intensityfiles = self.inputs.intensity_values spmmat = sio.loadmat(self.inputs.spm_mat_file, struct_as_record=False) nrows = [] for i in range(len(motparamlist)): sessidx = i rows = None if self.inputs.concatenated_design: sessidx = 0 mc_in = np.loadtxt(motparamlist[i]) rows = (np.sum(nrows) + np.arange(mc_in.shape[0])) nrows.append(mc_in.shape[0]) matrix = self._get_spm_submatrix(spmmat, sessidx, rows) self._stimcorr_core(motparamlist[i], intensityfiles[i], matrix, os.getcwd()) return runtime
'Generates a copy of an image with a certain amount of added gaussian noise (rayleigh for background in mask)'
def gen_noise(self, image, mask=None, snr_db=10.0, dist=u'normal', bg_dist=u'normal'):
from math import sqrt snr = sqrt(np.power(10.0, (snr_db / 10.0))) if (mask is None): mask = np.ones_like(image) else: mask[(mask > 0)] = 1 mask[(mask < 1)] = 0 if (mask.ndim < image.ndim): mask = np.rollaxis(np.array(([mask] * image.shape[3])), 0, 4) signal = image[(mask > 0)].reshape((-1)) if (dist == u'normal'): signal = (signal - signal.mean()) sigma_n = sqrt((signal.var() / snr)) noise = np.random.normal(size=image.shape, scale=sigma_n) if (np.any((mask == 0)) and (bg_dist == u'rayleigh')): bg_noise = np.random.rayleigh(size=image.shape, scale=sigma_n) noise[(mask == 0)] = bg_noise[(mask == 0)] im_noise = (image + noise) elif (dist == u'rician'): sigma_n = (signal.mean() / snr) n_1 = np.random.normal(size=image.shape, scale=sigma_n) n_2 = np.random.normal(size=image.shape, scale=sigma_n) stde_1 = (n_1 / sqrt(2.0)) stde_2 = (n_2 / sqrt(2.0)) im_noise = np.sqrt((((image + stde_1) ** 2) + (stde_2 ** 2))) else: raise NotImplementedError(u'Only normal and rician distributions are supported') return im_noise
'Parameters interface: a nipype interface class The interface class to wrap base_dir: a string The directory in which the computation will be stored callback: a callable An optional callable called each time after the function is called.'
def __init__(self, interface, base_dir, callback=None):
if (not (isinstance(interface, type) and issubclass(interface, BaseInterface))): raise ValueError((u'the interface argument should be a nipype interface class, but %s (type %s) was passed.' % (interface, type(interface)))) self.interface = interface base_dir = os.path.abspath(base_dir) if ((not os.path.exists(base_dir)) and os.path.isdir(base_dir)): raise ValueError(u'base_dir should be an existing directory') self.base_dir = base_dir doc = (u'%s\n%s' % (self.interface.__doc__, self.interface.help(returnhelp=True))) self.__doc__ = doc self.callback = callback
'Returns a callable that caches the output of an interface Parameters interface: nipype interface The nipype interface class to be wrapped and cached Returns pipe_func: a PipeFunc callable object An object that can be used as a function to apply the interface to arguments. Inputs of the interface are given as keyword arguments, bearing the same name as the name in the inputs specs of the interface. Examples >>> from tempfile import mkdtemp >>> mem = Memory(mkdtemp()) >>> from nipype.interfaces import fsl Here we create a callable that can be used to apply an fsl.Merge interface to files >>> fsl_merge = mem.cache(fsl.Merge) Now we apply it to a list of files. We need to specify the list of input files and the dimension along which the files should be merged. >>> results = fsl_merge(in_files=[\'a.nii\', \'b.nii\'], ... dimension=\'t\') # doctest: +SKIP We can retrieve the resulting file from the outputs: >>> results.outputs.merged_file # doctest: +SKIP'
def cache(self, interface):
return PipeFunc(interface, self.base_dir, _MemoryCallback(self))
'Increment counters tracking which cached function get executed.'
def _log_name(self, dir_name, job_name):
base_dir = self.base_dir with open(os.path.join(base_dir, u'log.current'), u'a') as currentlog: currentlog.write((u'%s/%s\n' % (dir_name, job_name))) t = time.localtime() year_dir = os.path.join(base_dir, (u'log.%i' % t.tm_year)) try: os.mkdir(year_dir) except OSError: u'Dir exists' month_dir = os.path.join(year_dir, (u'%02i' % t.tm_mon)) try: os.mkdir(month_dir) except OSError: u'Dir exists' with open(os.path.join(month_dir, (u'%02i.log' % t.tm_mday)), u'a') as rotatefile: rotatefile.write((u'%s/%s\n' % (dir_name, job_name)))
'Remove all the cache that where not used in the latest run of the memory object: i.e. since the corresponding Python object was created. Parameters warn: boolean, optional If true, echoes warning messages for all directory removed'
def clear_previous_runs(self, warn=True):
base_dir = self.base_dir latest_runs = read_log(os.path.join(base_dir, u'log.current')) self._clear_all_but(latest_runs, warn=warn)
'Remove all the cache that where not used since the given date Parameters day, month, year: integers, optional The integers specifying the latest day (in localtime) that a node should have been accessed to be kept. If not given, the current date is used. warn: boolean, optional If true, echoes warning messages for all directory removed'
def clear_runs_since(self, day=None, month=None, year=None, warn=True):
t = time.localtime() day = (day if (day is not None) else t.tm_mday) month = (month if (month is not None) else t.tm_mon) year = (year if (year is not None) else t.tm_year) base_dir = self.base_dir cut_off_file = (u'%s/log.%i/%02i/%02i.log' % (base_dir, year, month, day)) logs_to_flush = list() recent_runs = dict() for log_name in glob.glob((u'%s/log.*/*/*.log' % base_dir)): if (log_name < cut_off_file): logs_to_flush.append(log_name) else: recent_runs = read_log(log_name, recent_runs) self._clear_all_but(recent_runs, warn=warn) for log_name in logs_to_flush: os.remove(log_name)
'Remove all the runs appart from those given to the function input.'
def _clear_all_but(self, runs, warn=True):
rm_all_but(self.base_dir, set(runs.keys()), warn=warn) for (dir_name, job_names) in list(runs.items()): rm_all_but(os.path.join(self.base_dir, dir_name), job_names, warn=warn)
'Convert input to appropriate format for seg_maths.'
def _format_arg(self, opt, spec, val):
if ((opt in ['proportion', 'prob_update_flag', 'set_pq', 'mrf_value', 'max_iter', 'unc_thresh', 'conv']) and (self.inputs.classifier_type not in ['STAPLE', 'STEPS'])): return '' if (opt == 'sm_ranking'): return self.get_staple_args(val) if ((opt == 'classifier_type') and (val == 'STEPS')): return self.get_steps_args() return super(LabelFusion, self)._format_arg(opt, spec, val)
'Convert input to appropriate format for seg_EM.'
def _format_arg(self, opt, spec, val):
if (opt == 'priors'): _nb_priors = len(self.inputs.priors) return ('-priors %d %s' % (_nb_priors, ' '.join(self.inputs.priors))) else: return super(EM, self)._format_arg(opt, spec, val)
'Convert input to appropriate format for seg_maths.'
def _format_arg(self, opt, spec, val):
if ((opt == 'operand_str') and (self.inputs.operation != 'splitinter')): err = 'operand_str set but with an operation different than "splitinter"' raise NipypeInterfaceError(err) if (opt == 'operation'): if (val in ['pow', 'thr', 'uthr', 'smo', 'edge', 'sobel3', 'sobel5', 'smol']): if (not isdefined(self.inputs.operand_value)): err = 'operand_value not set for {0}.'.format(val) raise NipypeInterfaceError(err) elif (val in ['min', 'llsnorm', 'masknan', 'hdr_copy']): if (not isdefined(self.inputs.operand_file)): err = 'operand_file not set for {0}.'.format(val) raise NipypeInterfaceError(err) elif (val == 'splitinter'): if (not isdefined(self.inputs.operand_str)): err = 'operand_str not set for splitinter.' raise NipypeInterfaceError(err) if ((opt == 'operand_value') and (float(val) == 0.0)): return '0' return super(BinaryMaths, self)._format_arg(opt, spec, val)
'Convert input to appropriate format for seg_maths.'
def _format_arg(self, opt, spec, val):
if (opt == 'merge_files'): return ('-merge %d %d %s' % (len(val), self.inputs.dimension, ' '.join(val))) return super(Merge, self)._format_arg(opt, spec, val)
'update existing attribute, or create new attribute Note: update is very much like HasTraits.set'
def update(self, *args, **kwargs):
self.__dict__.update(*args, **kwargs)
'iterates over bunch attributes as key, value pairs'
def items(self):
return list(self.__dict__.items())
'iterates over bunch attributes as key, value pairs'
def iteritems(self):
warn(u'iteritems is deprecated, use items instead') return list(self.items())
'Support dictionary get() functionality'
def get(self, *args):
return self.__dict__.get(*args)
'Support dictionary get() functionality'
def set(self, **kwargs):
return self.__dict__.update(**kwargs)
'returns a deep copy of existing Bunch as a dictionary'
def dictcopy(self):
return deepcopy(self.__dict__)
'representation of the sorted Bunch as a string Currently, this string representation of the `inputs` Bunch of interfaces is hashed to determine if the process\' dirty-bit needs setting or not. Till that mechanism changes, only alter this after careful consideration.'
def __repr__(self):
outstr = [u'Bunch('] first = True for (k, v) in sorted(self.items()): if (not first): outstr.append(u', ') if isinstance(v, dict): pairs = [] for (key, value) in sorted(v.items()): pairs.append((u"'%s': %s" % (key, value))) v = ((u'{' + u', '.join(pairs)) + u'}') outstr.append((u'%s=%s' % (k, v))) else: outstr.append((u'%s=%r' % (k, v))) first = False outstr.append(u')') return u''.join(outstr)
'Return a dictionary of our items with hashes for each file. Searches through dictionary items and if an item is a file, it calculates the md5 hash of the file contents and stores the file name and hash value as the new key value. However, the overall bunch hash is calculated only on the hash value of a file. The path and name of the file are not used in the overall hash calculation. Returns dict_withhash : dict Copy of our dictionary with the new file hashes included with each file. hashvalue : str The md5 hash value of the `dict_withhash`'
def _get_bunch_hash(self):
infile_list = [] for (key, val) in list(self.items()): if is_container(val): if isinstance(val, dict): item = None else: if (len(val) == 0): raise AttributeError((u'%s attribute is empty' % key)) item = val[0] else: item = val try: if (isinstance(item, str) and os.path.isfile(item)): infile_list.append(key) except TypeError: continue dict_withhash = self.dictcopy() dict_nofilename = self.dictcopy() for item in infile_list: dict_withhash[item] = self._hash_infile(dict_withhash, item) dict_nofilename[item] = [val[1] for val in dict_withhash[item]] sorted_dict = to_str(sorted(dict_nofilename.items())) return (dict_withhash, md5(sorted_dict.encode()).hexdigest())
'Support for the pretty module pretty is included in ipython.externals for ipython > 0.10'
def __pretty__(self, p, cycle):
if cycle: p.text(u'Bunch(...)') else: p.begin_group(6, u'Bunch(') first = True for (k, v) in sorted(self.items()): if (not first): p.text(u',') p.breakable() p.text((k + u'=')) p.pretty(v) first = False p.end_group(6, u')')
'Initialize handlers and inputs'
def __init__(self, **kwargs):
super(BaseTraitedSpec, self).__init__(**kwargs) traits.push_exception_handler(reraise_exceptions=True) undefined_traits = {} for trait in self.copyable_trait_names(): if (not self.traits()[trait].usedefault): undefined_traits[trait] = Undefined self.trait_set(trait_change_notify=False, **undefined_traits) self._generate_handlers() self.trait_set(**kwargs)
'Name, trait generator for user modifiable traits'
def items(self):
for name in sorted(self.copyable_trait_names()): (yield (name, self.traits()[name]))
'Return a well-formatted representation of the traits'
def __repr__(self):
outstr = [] for (name, value) in sorted(self.trait_get().items()): outstr.append((u'%s = %s' % (name, value))) return u'\n{}\n'.format(u'\n'.join(outstr))
'Find all traits with the \'xor\' metadata and attach an event handler to them.'
def _generate_handlers(self):
has_xor = dict(xor=(lambda t: (t is not None))) xors = self.trait_names(**has_xor) for elem in xors: self.on_trait_change(self._xor_warn, elem) has_deprecation = dict(deprecated=(lambda t: (t is not None))) deprecated = self.trait_names(**has_deprecation) for elem in deprecated: self.on_trait_change(self._deprecated_warn, elem)
'Generates warnings for xor traits'
def _xor_warn(self, obj, name, old, new):
if isdefined(new): trait_spec = self.traits()[name] for trait_name in trait_spec.xor: if (trait_name == name): continue if isdefined(getattr(self, trait_name)): self.trait_set(trait_change_notify=False, **{(u'%s' % name): Undefined}) msg = (u'Input "%s" is mutually exclusive with input "%s", which is already set' % (name, trait_name)) raise IOError(msg)
'Part of the xor behavior'
def _requires_warn(self, obj, name, old, new):
if isdefined(new): trait_spec = self.traits()[name] msg = None for trait_name in trait_spec.requires: if (not isdefined(getattr(self, trait_name))): if (not msg): msg = (u'Input %s requires inputs: %s' % (name, u', '.join(trait_spec.requires))) if msg: warn(msg)
'Checks if a user assigns a value to a deprecated trait'
def _deprecated_warn(self, obj, name, old, new):
if isdefined(new): trait_spec = self.traits()[name] msg1 = (u'Input %s in interface %s is deprecated.' % (name, self.__class__.__name__.split(u'InputSpec')[0])) msg2 = (u'Will be removed or raise an error as of release %s' % trait_spec.deprecated) if trait_spec.new_name: if (trait_spec.new_name not in self.copyable_trait_names()): raise TraitError((msg1 + (u' Replacement trait %s not found' % trait_spec.new_name))) msg3 = (u'It has been replaced by %s.' % trait_spec.new_name) else: msg3 = u'' msg = u' '.join((msg1, msg2, msg3)) if (Version(str(trait_spec.deprecated)) < self.package_version): raise TraitError(msg) else: if trait_spec.new_name: msg += (u'Unsetting old value %s; setting new value %s.' % (name, trait_spec.new_name)) warn(msg) if trait_spec.new_name: self.trait_set(trait_change_notify=False, **{(u'%s' % name): Undefined, (u'%s' % trait_spec.new_name): new})
'Inject file hashes into adict[key]'
def _hash_infile(self, adict, key):
stuff = adict[key] if (not is_container(stuff)): stuff = [stuff] file_list = [] for afile in stuff: if is_container(afile): hashlist = self._hash_infile({u'infiles': afile}, u'infiles') hash = [val[1] for val in hashlist] elif (config.get(u'execution', u'hash_method').lower() == u'timestamp'): hash = hash_timestamp(afile) elif (config.get(u'execution', u'hash_method').lower() == u'content'): hash = hash_infile(afile) else: raise Exception((u'Unknown hash method: %s' % config.get(u'execution', u'hash_method'))) file_list.append((afile, hash)) return file_list
'Returns traited class as a dict Augments the trait get function to return a dictionary without notification handles'
def get(self, **kwargs):
out = super(BaseTraitedSpec, self).get(**kwargs) out = self._clean_container(out, Undefined) return out
'Returns traited class as a dict Augments the trait get function to return a dictionary without any traits. The dictionary does not contain any attributes that were Undefined'
def get_traitsfree(self, **kwargs):
out = super(BaseTraitedSpec, self).get(**kwargs) out = self._clean_container(out, skipundefined=True) return out
'Convert a traited obejct into a pure python representation.'
def _clean_container(self, object, undefinedval=None, skipundefined=False):
if (isinstance(object, TraitDictObject) or isinstance(object, dict)): out = {} for (key, val) in list(object.items()): if isdefined(val): out[key] = self._clean_container(val, undefinedval) elif (not skipundefined): out[key] = undefinedval elif (isinstance(object, TraitListObject) or isinstance(object, list) or isinstance(object, tuple)): out = [] for val in object: if isdefined(val): out.append(self._clean_container(val, undefinedval)) elif (not skipundefined): out.append(undefinedval) else: out.append(None) if isinstance(object, tuple): out = tuple(out) elif isdefined(object): out = object elif (not skipundefined): out = undefinedval return out
'Return has_metadata for the requested trait name in this interface'
def has_metadata(self, name, metadata, value=None, recursive=True):
return has_metadata(self.trait(name).trait_type, metadata, value, recursive)
'Return a dictionary of our items with hashes for each file. Searches through dictionary items and if an item is a file, it calculates the md5 hash of the file contents and stores the file name and hash value as the new key value. However, the overall bunch hash is calculated only on the hash value of a file. The path and name of the file are not used in the overall hash calculation. Returns dict_withhash : dict Copy of our dictionary with the new file hashes included with each file. hashvalue : str The md5 hash value of the traited spec'
def get_hashval(self, hash_method=None):
dict_withhash = [] dict_nofilename = [] for (name, val) in sorted(self.get().items()): if ((not isdefined(val)) or self.has_metadata(name, u'nohash', True)): continue hash_files = ((not self.has_metadata(name, u'hash_files', False)) and (not self.has_metadata(name, u'name_source'))) dict_nofilename.append((name, self._get_sorteddict(val, hash_method=hash_method, hash_files=hash_files))) dict_withhash.append((name, self._get_sorteddict(val, True, hash_method=hash_method, hash_files=hash_files))) return (dict_withhash, md5(to_str(dict_nofilename).encode()).hexdigest())
'bug in deepcopy for HasTraits results in weird cloning behavior for added traits'
def __deepcopy__(self, memo):
id_self = id(self) if (id_self in memo): return memo[id_self] dup_dict = deepcopy(self.get(), memo) for key in self.copyable_trait_names(): if (key in self.__dict__.keys()): _ = getattr(self, key) dup = self.clone_traits(memo=memo) for key in self.copyable_trait_names(): try: _ = getattr(dup, key) except: pass dup = self.clone_traits(memo=memo) dup.trait_set(**dup_dict) return dup
'Initialize command with given args and inputs.'
def __init__(self, **inputs):
raise NotImplementedError
'Prints class help'
@classmethod def help(cls):
raise NotImplementedError
'Prints inputs help'
@classmethod def _inputs_help(cls):
raise NotImplementedError
'Prints outputs help'
@classmethod def _outputs_help(cls):
raise NotImplementedError