desc
stringlengths
3
26.7k
decl
stringlengths
11
7.89k
bodies
stringlengths
8
553k
'A container for layer\'s parameters. Parameters init : str, default \'glorot_uniform\'. The name of the weight initialization function. scale : float, default 0.5 bias : float, default 1.0 Initial values for bias. regularizers : dict Weight regularizers. >>> {\'W\' : L2()} constraints : dict Weight constraints. >>> {\'b\' : MaxNorm()}'
def __init__(self, init='glorot_uniform', scale=0.5, bias=1.0, regularizers=None, constraints=None):
if (constraints is None): self.constraints = {} else: self.constraints = constraints if (regularizers is None): self.regularizers = {} else: self.regularizers = regularizers self.initial_bias = bias self.scale = scale self.init = get_initializer(init) self._params = {} self._grads = {}
'Init gradient arrays corresponding to each weight array.'
def init_grad(self):
for key in self._params.keys(): if (key not in self._grads): self._grads[key] = np.zeros_like(self._params[key])
'Increase specific weight by amount of the step parameter.'
def step(self, name, step):
self._params[name] += step if (name in self.constraints): self._params[name] = self.constraints[name].clip(self._params[name])
'Update gradient values.'
def update_grad(self, name, value):
self._grads[name] = value if (name in self.regularizers): self._grads[name] += self.regularizers[name](self._params[name])
'Count the number of parameters in this layer.'
@property def n_params(self):
return sum([np.prod(self._params[x].shape) for x in self._params.keys()])
'A 2D convolutional layer. Input shape: (n_images, n_channels, height, width) Parameters n_filters : int, default 8 The number of filters (kernels). filter_shape : tuple(int, int), default (3, 3) The shape of the filters. (height, width) parameters : Parameters instance, default None stride : tuple(int, int), default (1, 1) The step of the convolution. (height, width). padding : tuple(int, int), default (0, 0) The number of pixel to add to each side of the input. (height, weight)'
def __init__(self, n_filters=8, filter_shape=(3, 3), padding=(0, 0), stride=(1, 1), parameters=None):
self.padding = padding self._params = parameters self.stride = stride self.filter_shape = filter_shape self.n_filters = n_filters if (self._params is None): self._params = Parameters()
'Max pooling layer. Input shape: (n_images, n_channels, height, width) Parameters pool_shape : tuple(int, int), default (2, 2) stride : tuple(int, int), default (1,1) padding : tuple(int, int), default (0,0)'
def __init__(self, pool_shape=(2, 2), stride=(1, 1), padding=(0, 0)):
self.pool_shape = pool_shape self.stride = stride self.padding = padding
'Allocates initial weights.'
def setup(self, X_shape):
pass
'Returns shape of the current layer.'
def shape(self, x_shape):
raise NotImplementedError()
'A fully connected layer. Parameters output_dim : int'
def __init__(self, output_dim, parameters=None):
self._params = parameters self.output_dim = output_dim self.last_input = None if (parameters is None): self._params = Parameters()
'Naming convention: i : input gate f : forget gate c : cell o : output gate Parameters x_shape : np.array(batch size, time steps, input shape)'
def setup(self, x_shape):
self.input_dim = x_shape[2] W_params = ['W_i', 'W_f', 'W_o', 'W_c'] U_params = ['U_i', 'U_f', 'U_o', 'U_c'] b_params = ['b_i', 'b_f', 'b_o', 'b_c'] for param in W_params: self._params[param] = self._params.init((self.input_dim, self.hidden_dim)) for param in U_params: self._params[param] = self.inner_init((self.hidden_dim, self.hidden_dim)) for param in b_params: self._params[param] = np.full((self.hidden_dim,), self._params.initial_bias) self.W = [self._params[param] for param in W_params] self.U = [self._params[param] for param in U_params] self._params.init_grad() self.hprev = np.zeros((x_shape[0], self.hidden_dim)) self.oprev = np.zeros((x_shape[0], self.hidden_dim))
'Parameters x_shape : np.array(batch size, time steps, input shape)'
def setup(self, x_shape):
self.input_dim = x_shape[2] self._params['W'] = self._params.init((self.input_dim, self.hidden_dim)) self._params['b'] = np.full((self.hidden_dim,), self._params.initial_bias) self._params['U'] = self.inner_init((self.hidden_dim, self.hidden_dim)) self._params.init_grad() self.hprev = np.zeros((x_shape[0], self.hidden_dim))
'Initialize model\'s layers.'
def _setup_layers(self, x_shape):
x_shape = list(x_shape) x_shape[0] = self.batch_size for layer in self.layers: layer.setup(x_shape) x_shape = layer.shape(x_shape) self._n_layers = len(self.layers) self.optimizer.setup(self) self._initialized = True logging.info(('Total parameters: %s' % self.n_params))
'Find entry layer for back propagation.'
def _find_bprop_entry(self):
if ((len(self.layers) > 0) and (not hasattr(self.layers[(-1)], 'parameters'))): return (-1) return len(self.layers)
'Forward propagation.'
def fprop(self, X):
for layer in self.layers: X = layer.forward_pass(X) return X
'Returns a list of all parameters.'
@property def parameters(self):
params = [] for layer in self.parametric_layers: params.append(layer.parameters) return params
'Calculate an error for given examples.'
def error(self, X=None, y=None):
training_phase = self.is_training if training_phase: self.is_training = False if ((X is None) and (y is None)): y_pred = self._predict(self.X) score = self.metric(self.y, y_pred) else: y_pred = self._predict(X) score = self.metric(y, y_pred) if training_phase: self.is_training = True return score
'Shuffle rows in the dataset.'
def shuffle_dataset(self):
n_samples = self.X.shape[0] indices = np.arange(n_samples) np.random.shuffle(indices) self.X = self.X.take(indices, axis=0) self.y = self.y.take(indices, axis=0)
'Returns the number of layers.'
@property def n_layers(self):
return self._n_layers
'Return the number of trainable parameters.'
@property def n_params(self):
return sum([layer.parameters.n_params for layer in self.parametric_layers])
'Initialize package for parsing Parameters package_name : string Name of the top-level package. *package_name* must be the name of an importable package package_skip_patterns : None or sequence of {strings, regexps} Sequence of strings giving URIs of packages to be excluded Operates on the package path, starting at (including) the first dot in the package path, after *package_name* - so, if *package_name* is ``sphinx``, then ``sphinx.util`` will result in ``.util`` being passed for earching by these regexps. If is None, gives default. Default is: [\'\.tests$\'] module_skip_patterns : None or sequence Sequence of strings giving URIs of modules to be excluded Operates on the module name including preceding URI path, back to the first dot after *package_name*. For example ``sphinx.util.console`` results in the string to search of ``.util.console`` If is None, gives default. Default is: [\'\.setup$\', \'\._\'] class_skip_patterns : None or sequence Sequence of strings giving classes to be excluded Default is: None'
def __init__(self, package_name, package_skip_patterns=None, module_skip_patterns=None, class_skip_patterns=None):
if (package_skip_patterns is None): package_skip_patterns = [u'\\.tests$'] if (module_skip_patterns is None): module_skip_patterns = [u'\\.setup$', u'\\._'] if class_skip_patterns: self.class_skip_patterns = class_skip_patterns else: self.class_skip_patterns = [] self.package_name = package_name self.package_skip_patterns = package_skip_patterns self.module_skip_patterns = module_skip_patterns
'Set package_name'
def set_package_name(self, package_name):
self._package_name = package_name self.root_module = __import__(package_name) self.root_path = self.root_module.__path__[0]
'Convert uri to absolute filepath Parameters uri : string URI of python module to return path for Returns path : None or string Returns None if there is no valid path for this URI Otherwise returns absolute file system path for URI'
def _uri2path(self, uri):
if (uri == self.package_name): return os.path.join(self.root_path, u'__init__.py') path = uri.replace(u'.', os.path.sep) path = path.replace((self.package_name + os.path.sep), u'') path = os.path.join(self.root_path, path) if os.path.exists((path + u'.py')): path += u'.py' elif os.path.exists(os.path.join(path, u'__init__.py')): path = os.path.join(path, u'__init__.py') else: return None return path
'Convert directory path to uri'
def _path2uri(self, dirpath):
relpath = dirpath.replace(self.root_path, self.package_name) if relpath.startswith(os.path.sep): relpath = relpath[1:] return relpath.replace(os.path.sep, u'.')
'Parse module defined in *uri*'
def _parse_module(self, uri):
filename = self._uri2path(uri) if (filename is None): return ([], []) f = open(filename, u'rt') (functions, classes) = self._parse_lines(f, uri) f.close() return (functions, classes)
'Parse lines of text for functions and classes'
def _parse_lines(self, linesource, module):
functions = [] classes = [] for line in linesource: if (line.startswith(u'def ') and line.count(u'(')): name = self._get_object_name(line) if (not name.startswith(u'_')): functions.append(name) elif line.startswith(u'class '): name = self._get_object_name(line) if ((not name.startswith(u'_')) and self._survives_exclude(u'.'.join((module, name)), u'class')): classes.append(name) else: pass functions.sort() classes.sort() return (functions, classes)
'Check input and output specs in an uri Parameters uri : string python location of module - e.g \'sphinx.builder\' Returns'
def test_specs(self, uri):
(_, classes) = self._parse_module(uri) if (not classes): return None uri_short = re.sub((u'^%s\\.' % self.package_name), u'', uri) allowed_keys = [u'desc', u'genfile', u'xor', u'requires', u'desc', u'nohash', u'argstr', u'position', u'mandatory', u'copyfile', u'usedefault', u'sep', u'hash_files', u'deprecated', u'new_name', u'min_ver', u'max_ver', u'name_source', u'name_template', u'keep_extension', u'units', u'output_name'] in_built = [u'type', u'copy', u'parent', u'instance_handler', u'comparison_mode', u'array', u'default', u'editor'] bad_specs = [] for c in classes: __import__(uri) try: with warnings.catch_warnings(): warnings.simplefilter(u'ignore') classinst = sys.modules[uri].__dict__[c] except Exception as inst: continue if (not issubclass(classinst, BaseInterface)): continue testdir = os.path.join(*(uri.split(u'.')[:(-1)] + [u'tests'])) if (not os.path.exists(testdir)): os.makedirs(testdir) nonautotest = os.path.join(testdir, (u'test_%s.py' % c)) testfile = os.path.join(testdir, (u'test_auto_%s.py' % c)) if os.path.exists(testfile): os.unlink(testfile) if (not os.path.exists(nonautotest)): with open(testfile, u'wt') as fp: cmd = [u'# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT', u'from __future__ import unicode_literals', (u'from ..%s import %s' % (uri.split(u'.')[(-1)], c)), u''] cmd.append((u'\ndef test_%s_inputs():' % c)) input_fields = u'' for (traitname, trait) in sorted(classinst.input_spec().traits(transient=None).items()): input_fields += (u'%s=dict(' % traitname) for (key, value) in sorted(trait.__dict__.items()): if ((key in in_built) or (key == u'desc')): continue input_fields += (u'%s=%s,\n ' % (key, self._normalize_repr(value))) input_fields += u'),\n ' cmd += [(u' input_map = dict(%s)' % input_fields)] cmd += [(u' inputs = %s.input_spec()' % c)] cmd += [u'\n for key, metadata in list(input_map.items()):\n for metakey, value in list(metadata.items()):\n assert getattr(inputs.traits()[key], metakey) == value'] fp.writelines((u'\n'.join(cmd) + u'\n\n')) else: print((u'%s has nonautotest' % c)) for (traitname, trait) in sorted(classinst.input_spec().traits(transient=None).items()): for key in sorted(trait.__dict__): if (key in in_built): continue parent_metadata = [] if (u'parent' in trait.__dict__): parent_metadata = list(getattr(trait, u'parent').__dict__.keys()) if (key not in ((allowed_keys + classinst._additional_metadata) + parent_metadata)): bad_specs.append([uri, c, u'Inputs', traitname, key]) if ((key == u'mandatory') and (trait.mandatory is not None) and (not trait.mandatory)): bad_specs.append([uri, c, u'Inputs', traitname, u'mandatory=False']) if (not classinst.output_spec): continue if (not os.path.exists(nonautotest)): with open(testfile, u'at') as fp: cmd = [(u'\ndef test_%s_outputs():' % c)] input_fields = u'' for (traitname, trait) in sorted(classinst.output_spec().traits(transient=None).items()): input_fields += (u'%s=dict(' % traitname) for (key, value) in sorted(trait.__dict__.items()): if ((key in in_built) or (key == u'desc')): continue input_fields += (u'%s=%s,\n ' % (key, self._normalize_repr(value))) input_fields += u'),\n ' cmd += [(u' output_map = dict(%s)' % input_fields)] cmd += [(u' outputs = %s.output_spec()' % c)] cmd += [u'\n for key, metadata in list(output_map.items()):\n for metakey, value in list(metadata.items()):\n assert getattr(outputs.traits()[key], metakey) == value'] fp.writelines((u'\n'.join(cmd) + u'\n')) for (traitname, trait) in sorted(classinst.output_spec().traits(transient=None).items()): for key in sorted(trait.__dict__): if (key in in_built): continue parent_metadata = [] if (u'parent' in trait.__dict__): parent_metadata = list(getattr(trait, u'parent').__dict__.keys()) if (key not in ((allowed_keys + classinst._additional_metadata) + parent_metadata)): bad_specs.append([uri, c, u'Outputs', traitname, key]) return bad_specs
'Returns True if *matchstr* does not match patterns ``self.package_name`` removed from front of string if present Examples >>> dw = ApiDocWriter(\'sphinx\') >>> dw._survives_exclude(\'sphinx.okpkg\', \'package\') True >>> dw.package_skip_patterns.append(\'^\.badpkg$\') >>> dw._survives_exclude(\'sphinx.badpkg\', \'package\') False >>> dw._survives_exclude(\'sphinx.badpkg\', \'module\') True >>> dw._survives_exclude(\'sphinx.badmod\', \'module\') True >>> dw.module_skip_patterns.append(\'^\.badmod$\') >>> dw._survives_exclude(\'sphinx.badmod\', \'module\') False'
def _survives_exclude(self, matchstr, match_type):
if (match_type == u'module'): patterns = self.module_skip_patterns elif (match_type == u'package'): patterns = self.package_skip_patterns elif (match_type == u'class'): patterns = self.class_skip_patterns else: raise ValueError((u'Cannot interpret match type "%s"' % match_type)) L = len(self.package_name) if (matchstr[:L] == self.package_name): matchstr = matchstr[L:] for pat in patterns: try: pat.search except AttributeError: pat = re.compile(pat) if pat.search(matchstr): return False return True
'Return module sequence discovered from ``self.package_name`` Parameters None Returns mods : sequence Sequence of module names within ``self.package_name`` Examples'
def discover_modules(self):
modules = [self.package_name] for (dirpath, dirnames, filenames) in os.walk(self.root_path): root_uri = self._path2uri(os.path.join(self.root_path, dirpath)) for dirname in dirnames[:]: package_uri = u'.'.join((root_uri, dirname)) if (self._uri2path(package_uri) and self._survives_exclude(package_uri, u'package')): modules.append(package_uri) else: dirnames.remove(dirname) for filename in filenames: module_name = filename[:(-3)] module_uri = u'.'.join((root_uri, module_name)) if (self._uri2path(module_uri) and self._survives_exclude(module_uri, u'module')): modules.append(module_uri) return sorted(modules)
'Initialize package for parsing Parameters package_name : string Name of the top-level package. *package_name* must be the name of an importable package rst_extension : string, optional Extension for reST files, default \'.rst\' package_skip_patterns : None or sequence of {strings, regexps} Sequence of strings giving URIs of packages to be excluded Operates on the package path, starting at (including) the first dot in the package path, after *package_name* - so, if *package_name* is ``sphinx``, then ``sphinx.util`` will result in ``.util`` being passed for earching by these regexps. If is None, gives default. Default is: [\'\.tests$\'] module_skip_patterns : None or sequence Sequence of strings giving URIs of modules to be excluded Operates on the module name including preceding URI path, back to the first dot after *package_name*. For example ``sphinx.util.console`` results in the string to search of ``.util.console`` If is None, gives default. Default is: [\'\.setup$\', \'\._\']'
def __init__(self, package_name, rst_extension=u'.rst', package_skip_patterns=None, module_skip_patterns=None):
if (package_skip_patterns is None): package_skip_patterns = [u'\\.tests$'] if (module_skip_patterns is None): module_skip_patterns = [u'\\.setup$', u'\\._'] self.package_name = package_name self.rst_extension = rst_extension self.package_skip_patterns = package_skip_patterns self.module_skip_patterns = module_skip_patterns
'Set package_name >>> docwriter = ApiDocWriter(\'sphinx\') >>> import sphinx >>> docwriter.root_path == sphinx.__path__[0] True >>> docwriter.package_name = \'docutils\' >>> import docutils >>> docwriter.root_path == docutils.__path__[0] True'
def set_package_name(self, package_name):
self._package_name = package_name self.root_module = __import__(package_name) self.root_path = self.root_module.__path__[0] self.written_modules = None
'Get second token in line >>> docwriter = ApiDocWriter(\'sphinx\') >>> docwriter._get_object_name(" def func(): ") # doctest: +ALLOW_UNICODE u\'func\' >>> docwriter._get_object_name(" class Klass(object): ") # doctest: +ALLOW_UNICODE \'Klass\' >>> docwriter._get_object_name(" class Klass: ") # doctest: +ALLOW_UNICODE \'Klass\''
def _get_object_name(self, line):
name = line.split()[1].split(u'(')[0].strip() return name.rstrip(u':')
'Convert uri to absolute filepath Parameters uri : string URI of python module to return path for Returns path : None or string Returns None if there is no valid path for this URI Otherwise returns absolute file system path for URI Examples >>> docwriter = ApiDocWriter(\'sphinx\') >>> import sphinx >>> modpath = sphinx.__path__[0] >>> res = docwriter._uri2path(\'sphinx.builder\') >>> res == os.path.join(modpath, \'builder.py\') True >>> res = docwriter._uri2path(\'sphinx\') >>> res == os.path.join(modpath, \'__init__.py\') True >>> docwriter._uri2path(\'sphinx.does_not_exist\')'
def _uri2path(self, uri):
if (uri == self.package_name): return os.path.join(self.root_path, u'__init__.py') path = uri.replace(u'.', os.path.sep) path = path.replace((self.package_name + os.path.sep), u'') path = os.path.join(self.root_path, path) if os.path.exists((path + u'.py')): path += u'.py' elif os.path.exists(os.path.join(path, u'__init__.py')): path = os.path.join(path, u'__init__.py') else: return None return path
'Convert directory path to uri'
def _path2uri(self, dirpath):
relpath = dirpath.replace(self.root_path, self.package_name) if relpath.startswith(os.path.sep): relpath = relpath[1:] return relpath.replace(os.path.sep, u'.')
'Parse module defined in *uri*'
def _parse_module(self, uri):
filename = self._uri2path(uri) if (filename is None): return ([], []) f = open(filename, u'rt') (functions, classes) = self._parse_lines(f) f.close() return (functions, classes)
'Parse lines of text for functions and classes'
def _parse_lines(self, linesource):
functions = [] classes = [] for line in linesource: if (line.startswith(u'def ') and line.count(u'(')): name = self._get_object_name(line) if (not name.startswith(u'_')): functions.append(name) elif line.startswith(u'class '): name = self._get_object_name(line) if (not name.startswith(u'_')): classes.append(name) else: pass functions.sort() classes.sort() return (functions, classes)
'Make autodoc documentation template string for a module Parameters uri : string python location of module - e.g \'sphinx.builder\' Returns S : string Contents of API doc'
def generate_api_doc(self, uri):
(functions, classes) = self._parse_module(uri) if ((not len(functions)) and (not len(classes))): print((u'WARNING: Empty -', uri)) return u'' uri_short = re.sub((u'^%s\\.' % self.package_name), u'', uri) ad = u'.. AUTO-GENERATED FILE -- DO NOT EDIT!\n\n' chap_title = uri_short ad += (((chap_title + u'\n') + (self.rst_section_levels[1] * len(chap_title))) + u'\n\n') if (u'.' in uri): title = ((u'Module: :mod:`' + uri_short) + u'`') else: title = ((u':mod:`' + uri_short) + u'`') ad += ((title + u'\n') + (self.rst_section_levels[2] * len(title))) if len(classes): ad += (u'\nInheritance diagram for ``%s``:\n\n' % uri) ad += (u'.. inheritance-diagram:: %s \n' % uri) ad += u' :parts: 2\n' ad += ((u'\n.. automodule:: ' + uri) + u'\n') ad += ((u'\n.. currentmodule:: ' + uri) + u'\n') multi_class = (len(classes) > 1) multi_fx = (len(functions) > 1) if multi_class: ad += ((((u'\n' + u'Classes') + u'\n') + (self.rst_section_levels[2] * 7)) + u'\n') elif (len(classes) and multi_fx): ad += ((((u'\n' + u'Class') + u'\n') + (self.rst_section_levels[2] * 5)) + u'\n') for c in classes: ad += ((((u'\n:class:`' + c) + u'`\n') + (self.rst_section_levels[(multi_class + 2)] * (len(c) + 9))) + u'\n\n') ad += ((u'\n.. autoclass:: ' + c) + u'\n') ad += u' :members:\n :undoc-members:\n :show-inheritance:\n :inherited-members:\n\n .. automethod:: __init__\n' if multi_fx: ad += ((((u'\n' + u'Functions') + u'\n') + (self.rst_section_levels[2] * 9)) + u'\n\n') elif (len(functions) and multi_class): ad += ((((u'\n' + u'Function') + u'\n') + (self.rst_section_levels[2] * 8)) + u'\n\n') for f in functions: ad += ((((u'\n.. autofunction:: ' + uri) + u'.') + f) + u'\n\n') return ad
'Returns True if *matchstr* does not match patterns ``self.package_name`` removed from front of string if present Examples >>> dw = ApiDocWriter(\'sphinx\') >>> dw._survives_exclude(\'sphinx.okpkg\', \'package\') True >>> dw.package_skip_patterns.append(\'^\.badpkg$\') >>> dw._survives_exclude(\'sphinx.badpkg\', \'package\') False >>> dw._survives_exclude(\'sphinx.badpkg\', \'module\') True >>> dw._survives_exclude(\'sphinx.badmod\', \'module\') True >>> dw.module_skip_patterns.append(\'^\.badmod$\') >>> dw._survives_exclude(\'sphinx.badmod\', \'module\') False'
def _survives_exclude(self, matchstr, match_type):
if (match_type == u'module'): patterns = self.module_skip_patterns elif (match_type == u'package'): patterns = self.package_skip_patterns else: raise ValueError((u'Cannot interpret match type "%s"' % match_type)) L = len(self.package_name) if (matchstr[:L] == self.package_name): matchstr = matchstr[L:] for pat in patterns: try: pat.search except AttributeError: pat = re.compile(pat) if pat.search(matchstr): return False return True
'Return module sequence discovered from ``self.package_name`` Parameters None Returns mods : sequence Sequence of module names within ``self.package_name`` Examples >>> dw = ApiDocWriter(\'sphinx\') >>> mods = dw.discover_modules() >>> \'sphinx.util\' in mods True >>> dw.package_skip_patterns.append(\'\.util$\') >>> \'sphinx.util\' in dw.discover_modules() False'
def discover_modules(self):
modules = [] for (dirpath, dirnames, filenames) in os.walk(self.root_path): root_uri = self._path2uri(os.path.join(self.root_path, dirpath)) for dirname in dirnames[:]: package_uri = u'.'.join((root_uri, dirname)) if (self._uri2path(package_uri) and self._survives_exclude(package_uri, u'package')): modules.append(package_uri) else: dirnames.remove(dirname) for filename in filenames: module_name = filename[:(-3)] module_uri = u'.'.join((root_uri, module_name)) if (self._uri2path(module_uri) and self._survives_exclude(module_uri, u'module')): modules.append(module_uri) return sorted(modules)
'Generate API reST files. Parameters outdir : string Directory name in which to store files We create automatic filenames for each module Returns None Notes Sets self.written_modules to list of written modules'
def write_api_docs(self, outdir):
if (not os.path.exists(outdir)): os.mkdir(outdir) modules = self.discover_modules() self.write_modules_api(modules, outdir)
'Make a reST API index file from written files Parameters path : string Filename to write index to outdir : string Directory to which to write generated index file froot : string, optional root (filename without extension) of filename to write to Defaults to \'gen\'. We add ``self.rst_extension``. relative_to : string path to which written filenames are relative. This component of the written file path will be removed from outdir, in the generated index. Default is None, meaning, leave path as it is.'
def write_index(self, outdir, froot=u'gen', relative_to=None):
if (self.written_modules is None): raise ValueError(u'No modules written') path = os.path.join(outdir, (froot + self.rst_extension)) if (relative_to is not None): relpath = outdir.replace((relative_to + os.path.sep), u'') else: relpath = outdir idx = open(path, u'wt') w = idx.write w(u'.. AUTO-GENERATED FILE -- DO NOT EDIT!\n\n') w(u'.. toctree::\n\n') for f in self.written_modules: w((u' %s\n' % os.path.join(relpath, f))) idx.close()
'Initialize package for parsing Parameters package_name : string Name of the top-level package. *package_name* must be the name of an importable package rst_extension : string, optional Extension for reST files, default \'.rst\' package_skip_patterns : None or sequence of {strings, regexps} Sequence of strings giving URIs of packages to be excluded Operates on the package path, starting at (including) the first dot in the package path, after *package_name* - so, if *package_name* is ``sphinx``, then ``sphinx.util`` will result in ``.util`` being passed for earching by these regexps. If is None, gives default. Default is: [\'\.tests$\'] module_skip_patterns : None or sequence Sequence of strings giving URIs of modules to be excluded Operates on the module name including preceding URI path, back to the first dot after *package_name*. For example ``sphinx.util.console`` results in the string to search of ``.util.console`` If is None, gives default. Default is: [\'\.setup$\', \'\._\'] class_skip_patterns : None or sequence Sequence of strings giving classes to be excluded Default is: None'
def __init__(self, package_name, rst_extension=u'.rst', package_skip_patterns=None, module_skip_patterns=None, class_skip_patterns=None):
if (package_skip_patterns is None): package_skip_patterns = [u'\\.tests$'] if (module_skip_patterns is None): module_skip_patterns = [u'\\.setup$', u'\\._'] if class_skip_patterns: self.class_skip_patterns = class_skip_patterns else: self.class_skip_patterns = [] self.package_name = package_name self.rst_extension = rst_extension self.package_skip_patterns = package_skip_patterns self.module_skip_patterns = module_skip_patterns
'Set package_name >>> docwriter = ApiDocWriter(\'sphinx\') >>> import sphinx >>> docwriter.root_path == sphinx.__path__[0] True >>> docwriter.package_name = \'docutils\' >>> import docutils >>> docwriter.root_path == docutils.__path__[0] True'
def set_package_name(self, package_name):
self._package_name = package_name self.root_module = __import__(package_name) self.root_path = self.root_module.__path__[0] self.written_modules = None
'Get second token in line >>> docwriter = ApiDocWriter(\'sphinx\') >>> docwriter._get_object_name(" def func(): ") # doctest: +ALLOW_UNICODE u\'func\' >>> docwriter._get_object_name(" class Klass(object): ") # doctest: +ALLOW_UNICODE \'Klass\' >>> docwriter._get_object_name(" class Klass: ") # doctest: +ALLOW_UNICODE \'Klass\''
def _get_object_name(self, line):
name = line.split()[1].split(u'(')[0].strip() return name.rstrip(u':')
'Convert uri to absolute filepath Parameters uri : string URI of python module to return path for Returns path : None or string Returns None if there is no valid path for this URI Otherwise returns absolute file system path for URI Examples >>> docwriter = ApiDocWriter(\'sphinx\') >>> import sphinx >>> modpath = sphinx.__path__[0] >>> res = docwriter._uri2path(\'sphinx.builder\') >>> res == os.path.join(modpath, \'builder.py\') True >>> res = docwriter._uri2path(\'sphinx\') >>> res == os.path.join(modpath, \'__init__.py\') True >>> docwriter._uri2path(\'sphinx.does_not_exist\')'
def _uri2path(self, uri):
if (uri == self.package_name): return os.path.join(self.root_path, u'__init__.py') path = uri.replace(u'.', os.path.sep) path = path.replace((self.package_name + os.path.sep), u'') path = os.path.join(self.root_path, path) if os.path.exists((path + u'.py')): path += u'.py' elif os.path.exists(os.path.join(path, u'__init__.py')): path = os.path.join(path, u'__init__.py') else: return None return path
'Convert directory path to uri'
def _path2uri(self, dirpath):
relpath = dirpath.replace(self.root_path, self.package_name) if relpath.startswith(os.path.sep): relpath = relpath[1:] return relpath.replace(os.path.sep, u'.')
'Parse module defined in *uri*'
def _parse_module(self, uri):
filename = self._uri2path(uri) if (filename is None): return ([], []) f = open(filename, u'rt') (functions, classes) = self._parse_lines(f, uri) f.close() return (functions, classes)
'Parse lines of text for functions and classes'
def _parse_lines(self, linesource, module):
functions = [] classes = [] for line in linesource: if (line.startswith(u'def ') and line.count(u'(')): name = self._get_object_name(line) if (not name.startswith(u'_')): functions.append(name) elif line.startswith(u'class '): name = self._get_object_name(line) if ((not name.startswith(u'_')) and self._survives_exclude(u'.'.join((module, name)), u'class')): classes.append(name) else: pass functions.sort() classes.sort() return (functions, classes)
'Make autodoc documentation template string for a module Parameters uri : string python location of module - e.g \'sphinx.builder\' Returns S : string Contents of API doc'
def generate_api_doc(self, uri):
(functions, classes) = self._parse_module(uri) workflows = [] helper_functions = [] for function in functions: try: __import__(uri) finst = sys.modules[uri].__dict__[function] except TypeError: continue try: workflow = finst() except Exception: helper_functions.append((function, finst)) continue if isinstance(workflow, Workflow): workflows.append((workflow, function, finst)) if ((not classes) and (not workflows) and (not helper_functions)): print(u'WARNING: Empty -', uri) return u'' uri_short = re.sub((u'^%s\\.' % self.package_name), u'', uri) ad = u'.. AUTO-GENERATED FILE -- DO NOT EDIT!\n\n' chap_title = uri_short ad += (((chap_title + u'\n') + (self.rst_section_levels[1] * len(chap_title))) + u'\n\n') for c in classes: __import__(uri) print(c) try: with warnings.catch_warnings(): warnings.simplefilter(u'ignore') classinst = sys.modules[uri].__dict__[c] except Exception as inst: print(inst) continue if (not issubclass(classinst, BaseInterface)): continue label = (((uri + u'.') + c) + u':') ad += (u'\n.. _%s\n\n' % label) ad += (u'\n.. index:: %s\n\n' % c) ad += (((c + u'\n') + (self.rst_section_levels[2] * len(c))) + u'\n\n') ad += (u'`Link to code <%s>`__\n\n' % get_file_url(classinst)) ad += (trim(classinst.help(returnhelp=True), self.rst_section_levels[3]) + u'\n') if (workflows or helper_functions): ad += (u'\n.. module:: %s\n\n' % uri) for (workflow, name, finst) in workflows: label = ((u':func:`' + name) + u'`') ad += (u'\n.. _%s:\n\n' % ((uri + u'.') + name)) ad += u'\n'.join((label, (self.rst_section_levels[2] * len(label)))) ad += (u'\n\n`Link to code <%s>`__\n\n' % get_file_url(finst)) helpstr = trim(finst.__doc__, self.rst_section_levels[3]) ad += ((u'\n\n' + helpstr) + u'\n\n') u"\n # use sphinx autodoc for function signature\n ad += '\n.. _%s:\n\n' % (uri + '.' + name)\n ad += '.. autofunction:: %s\n\n' % name\n " (_, fname) = tempfile.mkstemp(suffix=u'.dot') workflow.write_graph(dotfilename=fname, graph2use=u'hierarchical') ad += (self._write_graph_section(fname, u'Graph') + u'\n') for (name, finst) in helper_functions: label = ((u':func:`' + name) + u'`') ad += (u'\n.. _%s:\n\n' % ((uri + u'.') + name)) ad += u'\n'.join((label, (self.rst_section_levels[2] * len(label)))) ad += (u'\n\n`Link to code <%s>`__\n\n' % get_file_url(finst)) helpstr = trim(finst.__doc__, self.rst_section_levels[3]) ad += ((u'\n\n' + helpstr) + u'\n\n') return ad
'Returns True if *matchstr* does not match patterns ``self.package_name`` removed from front of string if present Examples >>> dw = ApiDocWriter(\'sphinx\') >>> dw._survives_exclude(\'sphinx.okpkg\', \'package\') True >>> dw.package_skip_patterns.append(\'^\.badpkg$\') >>> dw._survives_exclude(\'sphinx.badpkg\', \'package\') False >>> dw._survives_exclude(\'sphinx.badpkg\', \'module\') True >>> dw._survives_exclude(\'sphinx.badmod\', \'module\') True >>> dw.module_skip_patterns.append(\'^\.badmod$\') >>> dw._survives_exclude(\'sphinx.badmod\', \'module\') False'
def _survives_exclude(self, matchstr, match_type):
if (match_type == u'module'): patterns = self.module_skip_patterns elif (match_type == u'package'): patterns = self.package_skip_patterns elif (match_type == u'class'): patterns = self.class_skip_patterns else: raise ValueError((u'Cannot interpret match type "%s"' % match_type)) L = len(self.package_name) if (matchstr[:L] == self.package_name): matchstr = matchstr[L:] for pat in patterns: try: pat.search except AttributeError: pat = re.compile(pat) if pat.search(matchstr): return False return True
'Return module sequence discovered from ``self.package_name`` Parameters None Returns mods : sequence Sequence of module names within ``self.package_name`` Examples >>> dw = ApiDocWriter(\'sphinx\') >>> mods = dw.discover_modules() >>> \'sphinx.util\' in mods True >>> dw.package_skip_patterns.append(\'\.util$\') >>> \'sphinx.util\' in dw.discover_modules() False'
def discover_modules(self):
modules = [self.package_name] for (dirpath, dirnames, filenames) in os.walk(self.root_path): root_uri = self._path2uri(os.path.join(self.root_path, dirpath)) for dirname in dirnames[:]: package_uri = u'.'.join((root_uri, dirname)) if (self._uri2path(package_uri) and self._survives_exclude(package_uri, u'package')): modules.append(package_uri) else: dirnames.remove(dirname) for filename in filenames: module_name = filename[:(-3)] module_uri = u'.'.join((root_uri, module_name)) if (self._uri2path(module_uri) and self._survives_exclude(module_uri, u'module')): modules.append(module_uri) return sorted(modules)
'Generate API reST files. Parameters outdir : string Directory name in which to store files We create automatic filenames for each module Returns None Notes Sets self.written_modules to list of written modules'
def write_api_docs(self, outdir):
if (not os.path.exists(outdir)): os.mkdir(outdir) modules = self.discover_modules() self.write_modules_api(modules, outdir)
'Make a reST API index file from written files Parameters path : string Filename to write index to outdir : string Directory to which to write generated index file froot : string, optional root (filename without extension) of filename to write to Defaults to \'gen\'. We add ``self.rst_extension``. relative_to : string path to which written filenames are relative. This component of the written file path will be removed from outdir, in the generated index. Default is None, meaning, leave path as it is.'
def write_index(self, outdir, froot=u'gen', relative_to=None):
if (self.written_modules is None): raise ValueError(u'No modules written') path = os.path.join(outdir, (froot + self.rst_extension)) if (relative_to is not None): relpath = outdir.replace((relative_to + os.path.sep), u'') else: relpath = outdir idx = open(path, u'wt') w = idx.write w(u'.. AUTO-GENERATED FILE -- DO NOT EDIT!\n\n') w(u'.. toctree::\n') w(u' :maxdepth: 2\n\n') for f in self.written_modules: w((u' %s\n' % os.path.join(relpath, f))) idx.close()
'Executes a pre-defined pipeline is distributed approaches based on IPython\'s ipyparallel processing interface'
def run(self, graph, config, updatehash=False):
try: name = u'ipyparallel' __import__(name) self.iparallel = sys.modules[name] except ImportError as e: raise_from(ImportError(u'ipyparallel not found. Parallel execution will be unavailable'), e) try: self.taskclient = self.iparallel.Client(**self.client_args) except Exception as e: if isinstance(e, TimeoutError): raise_from(Exception(u'No IPython clients found.'), e) if isinstance(e, IOError): raise_from(Exception(u'ipcluster/ipcontroller has not been started'), e) if isinstance(e, ValueError): raise_from(Exception(u'Ipython kernel not installed'), e) else: raise e return super(IPythonPlugin, self).run(graph, config, updatehash=updatehash)
'Executes a pre-defined pipeline in a serial order. Parameters graph : networkx digraph defines order of execution'
def run(self, graph, config, updatehash=False):
if (not isinstance(graph, nx.DiGraph)): raise ValueError(u'Input must be a networkx digraph object') logger.info(u'Running serially.') old_wd = os.getcwd() notrun = [] donotrun = [] (nodes, _) = topological_sort(graph) for node in nodes: try: if (node in donotrun): continue if self._status_callback: self._status_callback(node, u'start') node.run(updatehash=updatehash) if self._status_callback: self._status_callback(node, u'end') except: os.chdir(old_wd) if str2bool(config[u'execution'][u'stop_on_first_crash']): raise crashfile = report_crash(node) subnodes = [s for s in dfs_preorder(graph, node)] notrun.append(dict(node=node, dependents=subnodes, crashfile=crashfile)) donotrun.extend(subnodes) if self._status_callback: self._status_callback(node, u'exception') report_nodes_not_run(notrun)
'Initialize runtime attributes to none procs: list (N) of underlying interface elements to be processed proc_done: a boolean vector (N) signifying whether a process has been executed proc_pending: a boolean vector (N) signifying whether a process is currently running. Note: A process is finished only when both proc_done==True and proc_pending==False depidx: a boolean matrix (NxN) storing the dependency structure accross processes. Process dependencies are derived from each column.'
def __init__(self, plugin_args=None):
super(DistributedPluginBase, self).__init__(plugin_args=plugin_args) self.procs = None self.depidx = None self.refidx = None self.mapnodes = None self.mapnodesubids = None self.proc_done = None self.proc_pending = None self.max_jobs = np.inf if (plugin_args and (u'max_jobs' in plugin_args)): self.max_jobs = plugin_args[u'max_jobs']
'Executes a pre-defined pipeline using distributed approaches'
def run(self, graph, config, updatehash=False):
logger.info(u'Running in parallel.') self._config = config self._generate_dependency_list(graph) self.pending_tasks = [] self.readytorun = [] self.mapnodes = [] self.mapnodesubids = {} notrun = [] while (np.any((self.proc_done == False)) | np.any((self.proc_pending == True))): toappend = [] while self.pending_tasks: (taskid, jobid) = self.pending_tasks.pop() try: result = self._get_result(taskid) if result: if result[u'traceback']: notrun.append(self._clean_queue(jobid, graph, result=result)) else: self._task_finished_cb(jobid) self._remove_node_dirs() self._clear_task(taskid) else: toappend.insert(0, (taskid, jobid)) except Exception: result = {u'result': None, u'traceback': format_exc()} notrun.append(self._clean_queue(jobid, graph, result=result)) if toappend: self.pending_tasks.extend(toappend) num_jobs = len(self.pending_tasks) logger.debug((u'Number of pending tasks: %d' % num_jobs)) if (num_jobs < self.max_jobs): self._send_procs_to_workers(updatehash=updatehash, graph=graph) else: logger.debug(u'Not submitting') self._wait() self._remove_node_dirs() report_nodes_not_run(notrun) self._close()
'Sends jobs to workers'
def _send_procs_to_workers(self, updatehash=False, graph=None):
while np.any((self.proc_done == False)): num_jobs = len(self.pending_tasks) if np.isinf(self.max_jobs): slots = None else: slots = max(0, (self.max_jobs - num_jobs)) logger.debug((u'Slots available: %s' % slots)) if ((num_jobs >= self.max_jobs) or (slots == 0)): break jobids = np.flatnonzero(((self.proc_done == False) & (self.depidx.sum(axis=0) == 0).__array__())) if (len(jobids) > 0): if slots: logger.info((u'Pending[%d] Submitting[%d] jobs Slots[%d]' % (num_jobs, len(jobids[:slots]), slots))) else: logger.info((u'Pending[%d] Submitting[%d] jobs Slots[inf]' % (num_jobs, len(jobids)))) for jobid in jobids[:slots]: if isinstance(self.procs[jobid], MapNode): try: num_subnodes = self.procs[jobid].num_subnodes() except Exception: self._clean_queue(jobid, graph) self.proc_pending[jobid] = False continue if (num_subnodes > 1): submit = self._submit_mapnode(jobid) if (not submit): continue self.proc_done[jobid] = True self.proc_pending[jobid] = True logger.info((u'Submitting: %s ID: %d' % (self.procs[jobid]._id, jobid))) if self._status_callback: self._status_callback(self.procs[jobid], u'start') continue_with_submission = True if str2bool(self.procs[jobid].config[u'execution'][u'local_hash_check']): logger.debug(u'checking hash locally') try: (hash_exists, _, _, _) = self.procs[jobid].hash_exists() logger.debug((u'Hash exists %s' % str(hash_exists))) if (hash_exists and ((self.procs[jobid].overwrite is False) or ((self.procs[jobid].overwrite is None) and (not self.procs[jobid]._interface.always_run)))): continue_with_submission = False self._task_finished_cb(jobid) self._remove_node_dirs() except Exception: self._clean_queue(jobid, graph) self.proc_pending[jobid] = False continue_with_submission = False logger.debug((u'Finished checking hash %s' % str(continue_with_submission))) if continue_with_submission: if self.procs[jobid].run_without_submitting: logger.debug((u'Running node %s on master thread' % self.procs[jobid])) try: self.procs[jobid].run() except Exception: self._clean_queue(jobid, graph) self._task_finished_cb(jobid) self._remove_node_dirs() else: tid = self._submit_job(deepcopy(self.procs[jobid]), updatehash=updatehash) if (tid is None): self.proc_done[jobid] = False self.proc_pending[jobid] = False else: self.pending_tasks.insert(0, (tid, jobid)) logger.info((u'Finished submitting: %s ID: %d' % (self.procs[jobid]._id, jobid))) else: break
'Extract outputs and assign to inputs of dependent tasks This is called when a job is completed.'
def _task_finished_cb(self, jobid):
logger.info((u'[Job finished] jobname: %s jobid: %d' % (self.procs[jobid]._id, jobid))) if self._status_callback: self._status_callback(self.procs[jobid], u'end') self.proc_pending[jobid] = False rowview = self.depidx.getrowview(jobid) rowview[rowview.nonzero()] = 0 if (jobid not in self.mapnodesubids): self.refidx[(self.refidx[:, jobid].nonzero()[0], jobid)] = 0
'Generates a dependency list for a list of graphs.'
def _generate_dependency_list(self, graph):
(self.procs, _) = topological_sort(graph) try: self.depidx = nx.to_scipy_sparse_matrix(graph, nodelist=self.procs, format=u'lil') except: self.depidx = nx.to_scipy_sparse_matrix(graph, nodelist=self.procs) self.refidx = deepcopy(self.depidx) self.refidx.astype = np.int self.proc_done = np.zeros(len(self.procs), dtype=bool) self.proc_pending = np.zeros(len(self.procs), dtype=bool)
'Removes directories whose outputs have already been used up'
def _remove_node_dirs(self):
if str2bool(self._config[u'execution'][u'remove_node_directories']): for idx in np.nonzero((self.refidx.sum(axis=1) == 0).__array__())[0]: if (idx in self.mapnodesubids): continue if (self.proc_done[idx] and (not self.proc_pending[idx])): self.refidx[(idx, idx)] = (-1) outdir = self.procs[idx]._output_directory() logger.info((u'[node dependencies finished] removing node: %s from directory %s' % (self.procs[idx]._id, outdir))) shutil.rmtree(outdir)
'Check if a task is pending in the batch system'
def _is_pending(self, taskid):
raise NotImplementedError
'Submit a task to the batch system'
def _submit_batchtask(self, scriptfile, node):
raise NotImplementedError
'submit job and return taskid'
def _submit_job(self, node, updatehash=False):
pyscript = create_pyscript(node, updatehash=updatehash) (batch_dir, name) = os.path.split(pyscript) name = u'.'.join(name.split(u'.')[:(-1)]) batchscript = u'\n'.join((self._template, (u'%s %s' % (sys.executable, pyscript)))) batchscriptfile = os.path.join(batch_dir, (u'batchscript_%s.sh' % name)) with open(batchscriptfile, u'wt') as fp: fp.writelines(batchscript) return self._submit_batchtask(batchscriptfile, node)
'pyfiles: list of files corresponding to a topological sort dependencies: dictionary of dependencies based on the toplogical sort'
def _submit_graph(self, pyfiles, dependencies, nodes):
raise NotImplementedError
'This is more or less the _submit_batchtask from sge.py with flipped variable names, different command line switches, and different output formatting/processing'
def _submit_batchtask(self, scriptfile, node):
cmd = CommandLine(u'sbatch', environ=dict(os.environ), terminal_output=u'allatonce') path = os.path.dirname(scriptfile) sbatch_args = u'' if self._sbatch_args: sbatch_args = self._sbatch_args if (u'sbatch_args' in node.plugin_args): if ((u'overwrite' in node.plugin_args) and node.plugin_args[u'overwrite']): sbatch_args = node.plugin_args[u'sbatch_args'] else: sbatch_args += (u' ' + node.plugin_args[u'sbatch_args']) if (u'-o' not in sbatch_args): sbatch_args = (u'%s -o %s' % (sbatch_args, os.path.join(path, u'slurm-%j.out'))) if (u'-e' not in sbatch_args): sbatch_args = (u'%s -e %s' % (sbatch_args, os.path.join(path, u'slurm-%j.out'))) if node._hierarchy: jobname = u'.'.join((dict(os.environ)[u'LOGNAME'], node._hierarchy, node._id)) else: jobname = u'.'.join((dict(os.environ)[u'LOGNAME'], node._id)) jobnameitems = jobname.split(u'.') jobnameitems.reverse() jobname = u'.'.join(jobnameitems) cmd.inputs.args = (u'%s -J %s %s' % (sbatch_args, jobname, scriptfile)) oldlevel = iflogger.level iflogger.setLevel(logging.getLevelName(u'CRITICAL')) tries = 0 while True: try: result = cmd.run() except Exception as e: if (tries < self._max_tries): tries += 1 sleep(self._retry_timeout) else: iflogger.setLevel(oldlevel) raise RuntimeError(u'\n'.join(((u'Could not submit sbatch task for node %s' % node._id), str(e)))) else: break logger.debug(u'Ran command ({0})'.format(cmd.cmdline)) iflogger.setLevel(oldlevel) lines = [line for line in result.runtime.stdout.split(u'\n') if line] taskid = int(re.match(self._jobid_re, lines[(-1)]).groups()[0]) self._pending[taskid] = node.output_dir() logger.debug((u'submitted sbatch task: %d for node %s' % (taskid, node._id))) return taskid
'Executes a pre-defined pipeline is distributed approaches based on IPython\'s ipyparallel processing interface'
def run(self, graph, config, updatehash=False):
try: name = u'IPython.kernel.client' __import__(name) self.ipyclient = sys.modules[name] except ImportError as e: raise_from(ImportError(u'Ipython kernel not found. Parallel execution will be unavailable'), e) try: self.taskclient = self.ipyclient.TaskClient() except Exception as e: if isinstance(e, ConnectionRefusedError): raise_from(Exception(u'No IPython clients found.'), e) if isinstance(e, ValueError): raise_from(Exception(u'Ipython kernel not installed'), e) return super(IPythonXPlugin, self).run(graph, config, updatehash=updatehash)
'Return True, unless job is in the "zombie" status'
def is_job_state_pending(self):
time_diff = (time.time() - self._job_info_creation_time) if self.is_zombie(): sge_debug_print(u"DONE! QJobInfo.IsPending found in 'zombie' list, returning False so claiming done!\n{0}".format(self)) is_pending_status = False elif (self.is_initializing() and (time_diff > 600)): sge_debug_print(u"FAILURE! QJobInfo.IsPending found long running at {1} seconds'initializing' returning False for to break loop!\n{0}".format(self, time_diff)) is_pending_status = True else: is_pending_status = True return is_pending_status
':param qstat_instant_executable: :param qstat_cached_executable:'
def __init__(self, qstat_instant_executable=u'qstat', qstat_cached_executable=u'qstat'):
self._qstat_instant_executable = qstat_instant_executable self._qstat_cached_executable = qstat_cached_executable self._out_of_scope_jobs = list() self._task_dictionary = dict() self._remove_old_jobs()
'This is only called during initialization of the function for the purpose of identifying jobs that are not part of this run of nipype. They are jobs that existed prior to starting a new jobs, so they are irrelevant.'
def _remove_old_jobs(self):
self._run_qstat(u'QstatInitialization', True)
':param taskid: The job id :param qsub_command_line: When initializing, re-use the job_queue_name :return: NONE'
def add_startup_job(self, taskid, qsub_command_line):
taskid = int(taskid) self._task_dictionary[taskid] = QJobInfo(taskid, u'initializing', time.time(), u'noQueue', 1, qsub_command_line)
'request definitive job completion information for the current job from the qacct report'
@staticmethod def _qacct_verified_complete(taskid):
sge_debug_print(u'WARNING: CONTACTING qacct for finished jobs, {0}: {1}'.format(time.time(), u'Verifying Completion')) this_command = u'qacct' qacct_retries = 10 is_complete = False while (qacct_retries > 0): qacct_retries -= 1 try: proc = subprocess.Popen([this_command, u'-o', pwd.getpwuid(os.getuid())[0], u'-j', str(taskid)], stdout=subprocess.PIPE, stderr=subprocess.PIPE) (qacct_result, _) = proc.communicate() if qacct_result.find(str(taskid)): is_complete = True sge_debug_print(u'NOTE: qacct for jobs\n{0}'.format(qacct_result)) break except: sge_debug_print(u'NOTE: qacct call failed') time.sleep(5) pass return is_complete
'request all job information for the current user in xmlformat. See documentation from java documentation: http://arc.liv.ac.uk/SGE/javadocs/jgdi/com/sun/grid/jgdi/monitoring/filter/JobStateFilter.html -s r gives running jobs -s z gives recently completed jobs (**recently** is very ambiguous) -s s suspended jobs'
def _run_qstat(self, reason_for_qstat, force_instant=True):
sge_debug_print(u'WARNING: CONTACTING qmaster for jobs, {0}: {1}'.format(time.time(), reason_for_qstat)) if force_instant: this_command = self._qstat_instant_executable else: this_command = self._qstat_cached_executable qstat_retries = 10 while (qstat_retries > 0): qstat_retries -= 1 try: proc = subprocess.Popen([this_command, u'-u', pwd.getpwuid(os.getuid())[0], u'-xml', u'-s', u'psrz'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) (qstat_xml_result, _) = proc.communicate() dom = xml.dom.minidom.parseString(qstat_xml_result) jobs = dom.getElementsByTagName(u'job_info') run = jobs[0] runjobs = run.getElementsByTagName(u'job_list') self._parse_qstat_job_list(runjobs) break except Exception as inst: exception_message = u'QstatParsingError:\n DCTB {0}\n DCTB {1}\n'.format(type(inst), inst) sge_debug_print(exception_message) time.sleep(5) pass
'For debugging'
def print_dictionary(self):
for vv in list(self._task_dictionary.values()): sge_debug_print(str(vv))
'Sends jobs to workers when system resources are available. Check memory (gb) and cores usage before running jobs.'
def _send_procs_to_workers(self, updatehash=False, graph=None):
executing_now = [] currently_running_jobids = np.flatnonzero(((self.proc_pending == True) & (self.depidx.sum(axis=0) == 0).__array__())) busy_memory_gb = 0 busy_processors = 0 for jobid in currently_running_jobids: if ((self.procs[jobid]._interface.estimated_memory_gb <= self.memory_gb) and (self.procs[jobid]._interface.num_threads <= self.processors)): busy_memory_gb += self.procs[jobid]._interface.estimated_memory_gb busy_processors += self.procs[jobid]._interface.num_threads else: raise ValueError(u'Resources required by jobid {0} ({3}GB, {4} threads) exceed what is available on the system ({1}GB, {2} threads)'.format(jobid, self.memory_gb, self.processors, self.procs[jobid]._interface.estimated_memory_gb, self.procs[jobid]._interface.num_threads)) free_memory_gb = (self.memory_gb - busy_memory_gb) free_processors = (self.processors - busy_processors) jobids = np.flatnonzero(((self.proc_done == False) & (self.depidx.sum(axis=0) == 0).__array__())) jobids = sorted(jobids, key=(lambda item: (self.procs[item]._interface.estimated_memory_gb, self.procs[item]._interface.num_threads))) if str2bool(config.get(u'execution', u'profile_runtime')): logger.debug(u'Free memory (GB): %d, Free processors: %d', free_memory_gb, free_processors) for jobid in jobids: if str2bool(config.get(u'execution', u'profile_runtime')): logger.debug((u'Next Job: %d, memory (GB): %d, threads: %d' % (jobid, self.procs[jobid]._interface.estimated_memory_gb, self.procs[jobid]._interface.num_threads))) if ((self.procs[jobid]._interface.estimated_memory_gb <= free_memory_gb) and (self.procs[jobid]._interface.num_threads <= free_processors)): logger.info((u'Executing: %s ID: %d' % (self.procs[jobid]._id, jobid))) executing_now.append(self.procs[jobid]) if isinstance(self.procs[jobid], MapNode): try: num_subnodes = self.procs[jobid].num_subnodes() except Exception: (etype, eval, etr) = sys.exc_info() traceback = format_exception(etype, eval, etr) report_crash(self.procs[jobid], traceback=traceback) self._clean_queue(jobid, graph) self.proc_pending[jobid] = False continue if (num_subnodes > 1): submit = self._submit_mapnode(jobid) if (not submit): continue self.proc_done[jobid] = True self.proc_pending[jobid] = True free_memory_gb -= self.procs[jobid]._interface.estimated_memory_gb free_processors -= self.procs[jobid]._interface.num_threads if self._status_callback: self._status_callback(self.procs[jobid], u'start') if str2bool(self.procs[jobid].config[u'execution'][u'local_hash_check']): logger.debug(u'checking hash locally') try: (hash_exists, _, _, _) = self.procs[jobid].hash_exists() logger.debug((u'Hash exists %s' % str(hash_exists))) if (hash_exists and ((self.procs[jobid].overwrite == False) or ((self.procs[jobid].overwrite == None) and (not self.procs[jobid]._interface.always_run)))): self._task_finished_cb(jobid) self._remove_node_dirs() continue except Exception: (etype, eval, etr) = sys.exc_info() traceback = format_exception(etype, eval, etr) report_crash(self.procs[jobid], traceback=traceback) self._clean_queue(jobid, graph) self.proc_pending[jobid] = False continue logger.debug(u'Finished checking hash') if self.procs[jobid].run_without_submitting: logger.debug((u'Running node %s on master thread' % self.procs[jobid])) try: self.procs[jobid].run() except Exception: (etype, eval, etr) = sys.exc_info() traceback = format_exception(etype, eval, etr) report_crash(self.procs[jobid], traceback=traceback) self._task_finished_cb(jobid) self._remove_node_dirs() else: logger.debug((u'MultiProcPlugin submitting %s' % str(jobid))) tid = self._submit_job(deepcopy(self.procs[jobid]), updatehash=updatehash) if (tid is None): self.proc_done[jobid] = False self.proc_pending[jobid] = False else: self.pending_tasks.insert(0, (tid, jobid)) else: break
'Executes a pre-defined pipeline in a serial order. Parameters graph : networkx digraph defines order of execution'
def run(self, graph, config, updatehash=False):
if (not isinstance(graph, nx.DiGraph)): raise ValueError(u'Input must be a networkx digraph object') logger.info(u'Executing debug plugin') for node in nx.topological_sort(graph): self._callable(node, graph)
'LSF lists a status of \'PEND\' when a job has been submitted but is waiting to be picked up, and \'RUN\' when it is actively being processed. But _is_pending should return True until a job has finished and is ready to be checked for completeness. So return True if status is either \'PEND\' or \'RUN\''
def _is_pending(self, taskid):
cmd = CommandLine(u'bjobs', terminal_output=u'allatonce') cmd.inputs.args = (u'%d' % taskid) oldlevel = iflogger.level iflogger.setLevel(logging.getLevelName(u'CRITICAL')) result = cmd.run(ignore_exception=True) iflogger.setLevel(oldlevel) if ((u'DONE' in result.runtime.stdout) or (u'EXIT' in result.runtime.stdout)): return False else: return True
'Initialize base parameters of a workflow or node Parameters name : string (mandatory) Name of this node. Name must be alphanumeric and not contain any special characters (e.g., \'.\', \'@\'). base_dir : string base output directory (will be hashed before creations) default=None, which results in the use of mkdtemp'
def __init__(self, name=None, base_dir=None):
self.base_dir = base_dir self.config = None self._verify_name(name) self.name = name self._id = self.name self._hierarchy = None
'Clone an EngineBase object Parameters name : string (mandatory) A clone of node or workflow must have a new name'
def clone(self, name):
if ((name is None) or (name == self.name)): raise Exception(u'Cloning requires a new name') self._verify_name(name) clone = deepcopy(self) clone.name = name clone._id = name clone._hierarchy = None return clone
'Parameters interface : interface object node specific interface (fsl.Bet(), spm.Coregister()) name : alphanumeric string node specific name iterables : generator Input field and list to iterate using the pipeline engine for example to iterate over different frac values in fsl.Bet() for a single field the input can be a tuple, otherwise a list of tuples :: node.iterables = (\'frac\',[0.5,0.6,0.7]) node.iterables = [(\'fwhm\',[2,4]),(\'fieldx\',[0.5,0.6,0.7])] If this node has an itersource, then the iterables values is a dictionary which maps an iterable source field value to the target iterables field values, e.g.: :: inputspec.iterables = (\'images\',[\'img1.nii\', \'img2.nii\']]) node.itersource = (\'inputspec\', [\'frac\']) node.iterables = (\'frac\', {\'img1.nii\': [0.5, 0.6], \'img2.nii\': [0.6, 0.7]}) If this node\'s synchronize flag is set, then an alternate form of the iterables is a [fields, values] list, where fields is the list of iterated fields and values is the list of value tuples for the given fields, e.g.: :: node.synchronize = True node.iterables = [(\'frac\', \'threshold\'), [(0.5, True), (0.6, False)]] itersource: tuple The (name, fields) iterables source which specifies the name of the predecessor iterable node and the input fields to use from that source node. The output field values comprise the key to the iterables parameter value mapping dictionary. synchronize: boolean Flag indicating whether iterables are synchronized. If the iterables are synchronized, then this iterable node is expanded once per iteration over all of the iterables values. Otherwise, this iterable node is expanded once per each permutation of the iterables values. overwrite : Boolean Whether to overwrite contents of output directory if it already exists. If directory exists and hash matches it assumes that process has been executed needed_outputs : list of output_names Force the node to keep only specific outputs. By default all outputs are kept. Setting this attribute will delete any output files and directories from the node\'s working directory that are not part of the `needed_outputs`. run_without_submitting : boolean Run the node without submitting to a job engine or to a multiprocessing pool'
def __init__(self, interface, name, iterables=None, itersource=None, synchronize=False, overwrite=None, needed_outputs=None, run_without_submitting=False, n_procs=1, mem_gb=None, **kwargs):
base_dir = None if (u'base_dir' in kwargs): base_dir = kwargs[u'base_dir'] super(Node, self).__init__(name, base_dir) if (interface is None): raise IOError(u'Interface must be provided') if (not isinstance(interface, Interface)): raise IOError(u'interface must be an instance of an Interface') self._interface = interface self.name = name self._result = None self.iterables = iterables self.synchronize = synchronize self.itersource = itersource self.overwrite = overwrite self.parameterization = None self.run_without_submitting = run_without_submitting self.input_source = {} self.needed_outputs = [] self.plugin_args = {} self._interface.num_threads = n_procs if (mem_gb is not None): self._interface.estimated_memory_gb = mem_gb if needed_outputs: self.needed_outputs = sorted(needed_outputs) self._got_inputs = False
'Return the underlying interface object'
@property def interface(self):
return self._interface
'Return the inputs of the underlying interface'
@property def inputs(self):
return self._interface.inputs
'Return the output fields of the underlying interface'
@property def outputs(self):
return self._interface._outputs()
'Return the location of the output directory for the node'
def output_dir(self):
if (self.base_dir is None): self.base_dir = mkdtemp() outputdir = self.base_dir if self._hierarchy: outputdir = op.join(outputdir, *self._hierarchy.split(u'.')) if self.parameterization: params_str = [u'{}'.format(p) for p in self.parameterization] if (not str2bool(self.config[u'execution'][u'parameterize_dirs'])): params_str = [self._parameterization_dir(p) for p in params_str] outputdir = op.join(outputdir, *params_str) return op.abspath(op.join(outputdir, self.name))
'Set interface input value'
def set_input(self, parameter, val):
logger.debug(u'setting nodelevel(%s) input %s = %s', self.name, parameter, to_str(val)) setattr(self.inputs, parameter, deepcopy(val))
'Retrieve a particular output of the node'
def get_output(self, parameter):
val = None if self._result: val = getattr(self._result.outputs, parameter) else: cwd = self.output_dir() (result, _, _) = self._load_resultfile(cwd) if (result and result.outputs): val = getattr(result.outputs, parameter) return val
'Print interface help'
def help(self):
self._interface.help()
'Execute the node in its directory. Parameters updatehash: boolean Update the hash stored in the output directory'
def run(self, updatehash=False):
if (self.config is None): self.config = deepcopy(config._sections) else: self.config = merge_dict(deepcopy(config._sections), self.config) if (not self._got_inputs): self._get_inputs() self._got_inputs = True outdir = self.output_dir() logger.info(u'Executing node %s in dir: %s', self._id, outdir) if op.exists(outdir): logger.debug(u'Output dir: %s', to_str(os.listdir(outdir))) hash_info = self.hash_exists(updatehash=updatehash) (hash_exists, hashvalue, hashfile, hashed_inputs) = hash_info logger.debug(u'updatehash=%s, overwrite=%s, always_run=%s, hash_exists=%s', updatehash, self.overwrite, self._interface.always_run, hash_exists) if ((not updatehash) and ((((self.overwrite is None) and self._interface.always_run) or self.overwrite) or (not hash_exists))): logger.debug(u'Node hash: %s', hashvalue) json_pat = op.join(outdir, u'_0x*.json') json_unfinished_pat = op.join(outdir, u'_0x*_unfinished.json') need_rerun = (op.exists(outdir) and (not isinstance(self, MapNode)) and (len(glob(json_pat)) != 0) and (len(glob(json_unfinished_pat)) == 0)) if need_rerun: logger.debug(u'Rerunning node:\nupdatehash = %s, self.overwrite = %s, self._interface.always_run = %s, os.path.exists(%s) = %s, hash_method = %s', updatehash, self.overwrite, self._interface.always_run, hashfile, op.exists(hashfile), self.config[u'execution'][u'hash_method'].lower()) log_debug = (config.get(u'logging', u'workflow_level') == u'DEBUG') if (log_debug and (not op.exists(hashfile))): exp_hash_paths = glob(json_pat) if (len(exp_hash_paths) == 1): split_out = split_filename(exp_hash_paths[0]) exp_hash_file_base = split_out[1] exp_hash = exp_hash_file_base[len(u'_0x'):] logger.debug(u'Previous node hash = %s', exp_hash) try: prev_inputs = load_json(exp_hash_paths[0]) except: pass else: logging.logdebug_dict_differences(prev_inputs, hashed_inputs) cannot_rerun = (str2bool(self.config[u'execution'][u'stop_on_first_rerun']) and (not ((self.overwrite is None) and self._interface.always_run))) if cannot_rerun: raise Exception(u"Cannot rerun when 'stop_on_first_rerun' is set to True") hashfile_unfinished = op.join(outdir, (u'_0x%s_unfinished.json' % hashvalue)) if op.exists(hashfile): os.remove(hashfile) rm_outdir = (op.exists(outdir) and (not (op.exists(hashfile_unfinished) and self._interface.can_resume)) and (not isinstance(self, MapNode))) if rm_outdir: logger.debug(u'Removing old %s and its contents', outdir) try: rmtree(outdir) except OSError as ex: outdircont = os.listdir(outdir) if ((ex.errno == errno.ENOTEMPTY) and (len(outdircont) == 0)): logger.warn(u'An exception was raised trying to remove old %s, but the path seems empty. Is it an NFS mount?. Passing the exception.', outdir) elif ((ex.errno == errno.ENOTEMPTY) and (len(outdircont) != 0)): logger.debug(u'Folder contents (%d items): %s', len(outdircont), outdircont) raise ex else: raise ex else: logger.debug(u'%s found and can_resume is True or Node is a MapNode - resuming execution', hashfile_unfinished) if isinstance(self, MapNode): for filename in glob(op.join(outdir, u'_0x*.json')): os.unlink(filename) outdir = make_output_dir(outdir) self._save_hashfile(hashfile_unfinished, hashed_inputs) self.write_report(report_type=u'preexec', cwd=outdir) savepkl(op.join(outdir, u'_node.pklz'), self) savepkl(op.join(outdir, u'_inputs.pklz'), self.inputs.get_traitsfree()) try: self._run_interface() except: os.remove(hashfile_unfinished) raise shutil.move(hashfile_unfinished, hashfile) self.write_report(report_type=u'postexec', cwd=outdir) else: if (not op.exists(op.join(outdir, u'_inputs.pklz'))): logger.debug(u'%s: creating inputs file', self.name) savepkl(op.join(outdir, u'_inputs.pklz'), self.inputs.get_traitsfree()) if (not op.exists(op.join(outdir, u'_node.pklz'))): logger.debug(u'%s: creating node file', self.name) savepkl(op.join(outdir, u'_node.pklz'), self) logger.debug(u'Hashfile exists. Skipping execution') self._run_interface(execute=False, updatehash=updatehash) logger.debug(u'Finished running %s in dir: %s\n', self._id, outdir) return self._result
'Returns the directory name for the given parameterization string as follows: - If the parameterization is longer than 32 characters, then return the SHA-1 hex digest. - Otherwise, return the parameterization unchanged.'
def _parameterization_dir(self, param):
if (len(param) > 32): return sha1(param.encode()).hexdigest() else: return param
'Return a hash of the input state'
def _get_hashval(self):
if (not self._got_inputs): self._get_inputs() self._got_inputs = True (hashed_inputs, hashvalue) = self.inputs.get_hashval(hash_method=self.config[u'execution'][u'hash_method']) rm_extra = self.config[u'execution'][u'remove_unnecessary_outputs'] if (str2bool(rm_extra) and self.needed_outputs): hashobject = md5() hashobject.update(hashvalue.encode()) sorted_outputs = sorted(self.needed_outputs) hashobject.update(str(sorted_outputs).encode()) hashvalue = hashobject.hexdigest() hashed_inputs.append((u'needed_outputs', sorted_outputs)) return (hashed_inputs, hashvalue)
'Retrieve inputs from pointers to results file This mechanism can be easily extended/replaced to retrieve data from other data sources (e.g., XNAT, HTTP, etc.,.)'
def _get_inputs(self):
logger.debug(u'Setting node inputs') for (key, info) in list(self.input_source.items()): logger.debug(u'input: %s', key) results_file = info[0] logger.debug(u'results file: %s', results_file) results = loadpkl(results_file) output_value = Undefined if isinstance(info[1], tuple): output_name = info[1][0] value = getattr(results.outputs, output_name) if isdefined(value): output_value = evaluate_connect_function(info[1][1], info[1][2], value) else: output_name = info[1] try: output_value = results.outputs.get()[output_name] except TypeError: output_value = results.outputs.dictcopy()[output_name] logger.debug(u'output: %s', output_name) try: self.set_input(key, deepcopy(output_value)) except traits.TraitError as e: msg = [u'Error setting node input:', (u'Node: %s' % self.name), (u'input: %s' % key), (u'results_file: %s' % results_file), (u'value: %s' % str(output_value))] e.args = (((e.args[0] + u'\n') + u'\n'.join(msg)),) raise
'Load results if it exists in cwd Parameter cwd : working directory of node Returns result : InterfaceResult structure aggregate : boolean indicating whether node should aggregate_outputs attribute error : boolean indicating whether there was some mismatch in versions of traits used to store result and hence node needs to rerun'
def _load_resultfile(self, cwd):
aggregate = True resultsoutputfile = op.join(cwd, (u'result_%s.pklz' % self.name)) result = None attribute_error = False if op.exists(resultsoutputfile): pkl_file = gzip.open(resultsoutputfile, u'rb') try: result = pickle.load(pkl_file) except UnicodeDecodeError: pickle.load(pkl_file, fix_imports=True, encoding=u'utf-8') logger.warn(u'Successfully loaded pickle in compatibility mode') except (traits.TraitError, AttributeError, ImportError, EOFError) as err: if isinstance(err, (AttributeError, ImportError)): attribute_error = True logger.debug(u'attribute error: %s probably using different trait pickled file', str(err)) else: logger.debug(u'some file does not exist. hence trait cannot be set') else: if result.outputs: try: outputs = result.outputs.get() except TypeError: outputs = result.outputs.dictcopy() try: result.outputs.set(**modify_paths(outputs, relative=False, basedir=cwd)) except FileNotFoundError: logger.debug(u'conversion to full path results in non existent file') aggregate = False pkl_file.close() logger.debug(u'Aggregate: %s', aggregate) return (result, aggregate, attribute_error)
'copy files over and change the inputs'
def _copyfiles_to_wd(self, outdir, execute, linksonly=False):
if hasattr(self._interface, u'_get_filecopy_info'): logger.debug(u'copying files to wd [execute=%s, linksonly=%s]', str(execute), str(linksonly)) if (execute and linksonly): olddir = outdir outdir = op.join(outdir, u'_tempinput') os.makedirs(outdir) for info in self._interface._get_filecopy_info(): files = self.inputs.get().get(info[u'key']) if (not isdefined(files)): continue if files: infiles = filename_to_list(files) if execute: if linksonly: if (not info[u'copy']): newfiles = copyfiles(infiles, [outdir], copy=info[u'copy'], create_new=True) else: newfiles = fnames_presuffix(infiles, newpath=outdir) newfiles = self._strip_temp(newfiles, op.abspath(olddir).split(op.sep)[(-1)]) else: newfiles = copyfiles(infiles, [outdir], copy=info[u'copy'], create_new=True) else: newfiles = fnames_presuffix(infiles, newpath=outdir) if (not isinstance(files, list)): newfiles = list_to_filename(newfiles) setattr(self.inputs, info[u'key'], newfiles) if (execute and linksonly): rmtree(outdir)
'Parameters interface : interface object node specific interface (fsl.Bet(), spm.Coregister()) name : alphanumeric string node specific name joinsource : node name name of the join predecessor iterable node joinfield : string or list of strings name(s) of list input fields that will be aggregated. The default is all of the join node input fields. unique : flag indicating whether to ignore duplicate input values See Node docstring for additional keyword arguments.'
def __init__(self, interface, name, joinsource, joinfield=None, unique=False, **kwargs):
super(JoinNode, self).__init__(interface, name, **kwargs) self.joinsource = joinsource u'the join predecessor iterable node' if (not joinfield): joinfield = self._interface.inputs.copyable_trait_names() elif isinstance(joinfield, (str, bytes)): joinfield = [joinfield] self.joinfield = joinfield u'the fields to join' self._inputs = self._override_join_traits(self._interface.inputs, self.joinfield) u'the override inputs' self._unique = unique u'flag indicating whether to ignore duplicate input values' self._next_slot_index = 0 u'the joinfield index assigned to an iterated input'
'Set the joinsource property. If the given value is a Node, then the joinsource is set to the node name.'
@joinsource.setter def joinsource(self, value):
if isinstance(value, Node): value = value.name self._joinsource = value
'The JoinNode inputs include the join field overrides.'
@property def inputs(self):
return self._inputs
'Add new join item fields assigned to the next iterated input This method is intended solely for workflow graph expansion. Examples >>> from nipype.interfaces.utility import IdentityInterface >>> import nipype.pipeline.engine as pe >>> from nipype import Node, JoinNode, Workflow >>> inputspec = Node(IdentityInterface(fields=[\'image\']), ... name=\'inputspec\'), >>> join = JoinNode(IdentityInterface(fields=[\'images\', \'mask\']), ... joinsource=\'inputspec\', joinfield=\'images\', name=\'join\') >>> join._add_join_item_fields() # doctest: +ALLOW_UNICODE {\'images\': \'imagesJ1\'} Return the {base field: slot field} dictionary'
def _add_join_item_fields(self):
idx = self._next_slot_index newfields = dict([(field, self._add_join_item_field(field, idx)) for field in self.joinfield]) logger.debug(u'Added the %s join item fields %s.', self, newfields) self._next_slot_index += 1 return newfields
'Add new join item fields qualified by the given index Return the new field name'
def _add_join_item_field(self, field, index):
name = self._join_item_field_name(field, index) trait = self._inputs.trait(field, False, True) self._inputs.add_trait(name, trait) return name
'Return the field suffixed by the index + 1'
def _join_item_field_name(self, field, index):
return (u'%sJ%d' % (field, (index + 1)))
'Convert the given join fields to accept an input that is a list item rather than a list. Non-join fields delegate to the interface traits. Return the override DynamicTraitedSpec'
def _override_join_traits(self, basetraits, fields):
dyntraits = DynamicTraitedSpec() if (fields is None): fields = basetraits.copyable_trait_names() else: for field in fields: if (not basetraits.trait(field)): raise ValueError((u'The JoinNode %s does not have a field named %s' % (self.name, field))) for (name, trait) in list(basetraits.items()): if ((name in fields) and (len(trait.inner_traits) == 1)): item_trait = trait.inner_traits[0] dyntraits.add_trait(name, item_trait) setattr(dyntraits, name, Undefined) logger.debug(u'Converted the join node %s field %s trait type from %s to %s', self, name, trait.trait_type.info(), item_trait.info()) else: dyntraits.add_trait(name, traits.Any) setattr(dyntraits, name, Undefined) return dyntraits