| desc
				 stringlengths 3 26.7k | decl
				 stringlengths 11 7.89k | bodies
				 stringlengths 8 553k | 
|---|---|---|
| 
	'Initializes outputs'
 | 
	@classmethod
def _outputs(cls):
 | 
	    raise NotImplementedError
 | 
| 
	'Execute the command.'
 | 
	def run(self):
 | 
	    raise NotImplementedError
 | 
| 
	'Called to populate outputs'
 | 
	def aggregate_outputs(self, runtime=None, needed_outputs=None):
 | 
	    raise NotImplementedError
 | 
| 
	'List expected outputs'
 | 
	def _list_outputs(self):
 | 
	    raise NotImplementedError
 | 
| 
	'Provides information about file inputs to copy or link to cwd.
Necessary for pipeline operation'
 | 
	def _get_filecopy_info(self):
 | 
	    raise NotImplementedError
 | 
| 
	'Prints class help'
 | 
	@classmethod
def help(cls, returnhelp=False):
 | 
	    if cls.__doc__:
        docstring = (trim(cls.__doc__).split(u'\n') + [u''])
    else:
        docstring = [u'']
    allhelp = u'\n'.join(((((((docstring + cls._inputs_help()) + [u'']) + cls._outputs_help()) + [u'']) + cls._refs_help()) + [u'']))
    if returnhelp:
        return allhelp
    else:
        print(allhelp)
 | 
| 
	'Prints interface references.'
 | 
	@classmethod
def _refs_help(cls):
 | 
	    if (not cls.references_):
        return []
    helpstr = [u'References::']
    for r in cls.references_:
        helpstr += [u'{}'.format(r[u'entry'])]
    return helpstr
 | 
| 
	'Prints description for input parameters'
 | 
	@classmethod
def _inputs_help(cls):
 | 
	    helpstr = [u'Inputs::']
    inputs = cls.input_spec()
    if (len(list(inputs.traits(transient=None).items())) == 0):
        helpstr += [u'', u' DCTB None']
        return helpstr
    manhelpstr = [u'', u' DCTB [Mandatory]']
    mandatory_items = inputs.traits(mandatory=True)
    for (name, spec) in sorted(mandatory_items.items()):
        manhelpstr += cls._get_trait_desc(inputs, name, spec)
    opthelpstr = [u'', u' DCTB [Optional]']
    for (name, spec) in sorted(inputs.traits(transient=None).items()):
        if (name in mandatory_items):
            continue
        opthelpstr += cls._get_trait_desc(inputs, name, spec)
    if manhelpstr:
        helpstr += manhelpstr
    if opthelpstr:
        helpstr += opthelpstr
    return helpstr
 | 
| 
	'Prints description for output parameters'
 | 
	@classmethod
def _outputs_help(cls):
 | 
	    helpstr = [u'Outputs::', u'']
    if cls.output_spec:
        outputs = cls.output_spec()
        for (name, spec) in sorted(outputs.traits(transient=None).items()):
            helpstr += cls._get_trait_desc(outputs, name, spec)
    if (len(helpstr) == 2):
        helpstr += [u' DCTB None']
    return helpstr
 | 
| 
	'Returns a bunch containing output fields for the class'
 | 
	def _outputs(self):
 | 
	    outputs = None
    if self.output_spec:
        outputs = self.output_spec()
    return outputs
 | 
| 
	'Provides information about file inputs to copy or link to cwd.
Necessary for pipeline operation'
 | 
	@classmethod
def _get_filecopy_info(cls):
 | 
	    info = []
    if (cls.input_spec is None):
        return info
    metadata = dict(copyfile=(lambda t: (t is not None)))
    for (name, spec) in sorted(cls.input_spec().traits(**metadata).items()):
        info.append(dict(key=name, copy=spec.copyfile))
    return info
 | 
| 
	'check if required inputs are satisfied'
 | 
	def _check_requires(self, spec, name, value):
 | 
	    if spec.requires:
        values = [(not isdefined(getattr(self.inputs, field))) for field in spec.requires]
        if (any(values) and isdefined(value)):
            msg = (u"%s    requires    a    value    for    input    '%s'    because    one    of    %s    is    set.    For    a    list    of    required    inputs,    see    %s.help()" % (self.__class__.__name__, name, u',    '.join(spec.requires), self.__class__.__name__))
            raise ValueError(msg)
 | 
| 
	'check if mutually exclusive inputs are satisfied'
 | 
	def _check_xor(self, spec, name, value):
 | 
	    if spec.xor:
        values = [isdefined(getattr(self.inputs, field)) for field in spec.xor]
        if ((not any(values)) and (not isdefined(value))):
            msg = (u"%s    requires    a    value    for    one    of    the    inputs    '%s'.    For    a    list    of    required    inputs,    see    %s.help()" % (self.__class__.__name__, u',    '.join(spec.xor), self.__class__.__name__))
            raise ValueError(msg)
 | 
| 
	'Raises an exception if a mandatory input is Undefined'
 | 
	def _check_mandatory_inputs(self):
 | 
	    for (name, spec) in list(self.inputs.traits(mandatory=True).items()):
        value = getattr(self.inputs, name)
        self._check_xor(spec, name, value)
        if ((not isdefined(value)) and (spec.xor is None)):
            msg = (u"%s    requires    a    value    for    input    '%s'.    For    a    list    of    required    inputs,    see    %s.help()" % (self.__class__.__name__, name, self.__class__.__name__))
            raise ValueError(msg)
        if isdefined(value):
            self._check_requires(spec, name, value)
    for (name, spec) in list(self.inputs.traits(mandatory=None, transient=None).items()):
        self._check_requires(spec, name, getattr(self.inputs, name))
 | 
| 
	'Raises an exception on version mismatch'
 | 
	def _check_version_requirements(self, trait_object, raise_exception=True):
 | 
	    unavailable_traits = []
    check = dict(min_ver=(lambda t: (t is not None)))
    names = trait_object.trait_names(**check)
    if (names and self.version):
        version = LooseVersion(str(self.version))
        for name in names:
            min_ver = LooseVersion(str(trait_object.traits()[name].min_ver))
            if (min_ver > version):
                unavailable_traits.append(name)
                if (not isdefined(getattr(trait_object, name))):
                    continue
                if raise_exception:
                    raise Exception((u'Trait    %s    (%s)    (version    %s    <    required    %s)' % (name, self.__class__.__name__, version, min_ver)))
        check = dict(max_ver=(lambda t: (t is not None)))
        names = trait_object.trait_names(**check)
        for name in names:
            max_ver = LooseVersion(str(trait_object.traits()[name].max_ver))
            if (max_ver < version):
                unavailable_traits.append(name)
                if (not isdefined(getattr(trait_object, name))):
                    continue
                if raise_exception:
                    raise Exception((u'Trait    %s    (%s)    (version    %s    >    required    %s)' % (name, self.__class__.__name__, version, max_ver)))
    return unavailable_traits
 | 
| 
	'Core function that executes interface'
 | 
	def _run_interface(self, runtime):
 | 
	    raise NotImplementedError
 | 
| 
	'Add the interface references to the duecredit citations'
 | 
	def _duecredit_cite(self):
 | 
	    for r in self.references_:
        r[u'path'] = self.__module__
        due.cite(**r)
 | 
| 
	'Execute this interface.
This interface will not raise an exception if runtime.returncode is
non-zero.
Parameters
inputs : allows the interface settings to be updated
Returns
results :  an InterfaceResult object containing a copy of the instance
that was executed, provenance information and, if successful, results'
 | 
	def run(self, **inputs):
 | 
	    self.inputs.trait_set(**inputs)
    self._check_mandatory_inputs()
    self._check_version_requirements(self.inputs)
    interface = self.__class__
    self._duecredit_cite()
    env = deepcopy(dict(os.environ))
    runtime = Bunch(cwd=os.getcwd(), returncode=None, duration=None, environ=env, startTime=dt.isoformat(dt.utcnow()), endTime=None, platform=platform.platform(), hostname=platform.node(), version=self.version)
    try:
        runtime = self._run_wrapper(runtime)
        outputs = self.aggregate_outputs(runtime)
        runtime.endTime = dt.isoformat(dt.utcnow())
        timediff = (parseutc(runtime.endTime) - parseutc(runtime.startTime))
        runtime.duration = (((timediff.days * 86400) + timediff.seconds) + (timediff.microseconds / 100000.0))
        results = InterfaceResult(interface, runtime, inputs=self.inputs.get_traitsfree(), outputs=outputs)
        prov_record = None
        if str2bool(config.get(u'execution', u'write_provenance')):
            prov_record = write_provenance(results)
        results.provenance = prov_record
    except Exception as e:
        runtime.endTime = dt.isoformat(dt.utcnow())
        timediff = (parseutc(runtime.endTime) - parseutc(runtime.startTime))
        runtime.duration = (((timediff.days * 86400) + timediff.seconds) + (timediff.microseconds / 100000.0))
        if (len(e.args) == 0):
            e.args = u''
        message = (u'\nInterface    %s    failed    to    run.' % self.__class__.__name__)
        if (config.has_option(u'logging', u'interface_level') and (config.get(u'logging', u'interface_level').lower() == u'debug')):
            inputs_str = ((u'\nInputs:' + str(self.inputs)) + u'\n')
        else:
            inputs_str = u''
        if ((len(e.args) == 1) and isinstance(e.args[0], (str, bytes))):
            e.args = ((e.args[0] + u'    '.join([message, inputs_str])),)
        else:
            e.args += (message,)
            if (inputs_str != u''):
                e.args += (inputs_str,)
        import traceback
        runtime.traceback = traceback.format_exc()
        runtime.traceback_args = e.args
        inputs = None
        try:
            inputs = self.inputs.get_traitsfree()
        except Exception as e:
            pass
        results = InterfaceResult(interface, runtime, inputs=inputs)
        prov_record = None
        if str2bool(config.get(u'execution', u'write_provenance')):
            try:
                prov_record = write_provenance(results)
            except Exception:
                prov_record = None
        results.provenance = prov_record
        if (hasattr(self.inputs, u'ignore_exception') and isdefined(self.inputs.ignore_exception) and self.inputs.ignore_exception):
            pass
        else:
            raise
    return results
 | 
| 
	'List the expected outputs'
 | 
	def _list_outputs(self):
 | 
	    if self.output_spec:
        raise NotImplementedError
    else:
        return None
 | 
| 
	'Collate expected outputs and check for existence'
 | 
	def aggregate_outputs(self, runtime=None, needed_outputs=None):
 | 
	    predicted_outputs = self._list_outputs()
    outputs = self._outputs()
    if predicted_outputs:
        _unavailable_outputs = []
        if outputs:
            _unavailable_outputs = self._check_version_requirements(self._outputs())
        for (key, val) in list(predicted_outputs.items()):
            if (needed_outputs and (key not in needed_outputs)):
                continue
            if (key in _unavailable_outputs):
                raise KeyError((u'Output    trait    %s    not    available    in    version    %s    of    interface    %s.    Please    inform    developers.' % (key, self.version, self.__class__.__name__)))
            try:
                setattr(outputs, key, val)
                _ = getattr(outputs, key)
            except TraitError as error:
                if (hasattr(error, u'info') and error.info.startswith(u'an    existing')):
                    msg = (u"File/Directory    '%s'    not    found    for    %s    output    '%s'." % (val, self.__class__.__name__, key))
                    raise FileNotFoundError(msg)
                else:
                    raise error
    return outputs
 | 
| 
	'A convenient way to load pre-set inputs from a JSON file.'
 | 
	def load_inputs_from_json(self, json_file, overwrite=True):
 | 
	    with open(json_file) as fhandle:
        inputs_dict = json.load(fhandle)
    def_inputs = []
    if (not overwrite):
        def_inputs = list(self.inputs.get_traitsfree().keys())
    new_inputs = list((set(list(inputs_dict.keys())) - set(def_inputs)))
    for key in new_inputs:
        if hasattr(self.inputs, key):
            setattr(self.inputs, key, inputs_dict[key])
 | 
| 
	'A convenient way to save current inputs to a JSON file.'
 | 
	def save_inputs_to_json(self, json_file):
 | 
	    inputs = self.inputs.get_traitsfree()
    iflogger.debug(u'saving    inputs    {}', inputs)
    with open(json_file, (u'w' if PY3 else u'wb')) as fhandle:
        json.dump(inputs, fhandle, indent=4, ensure_ascii=False)
 | 
| 
	'Pass-through for file descriptor.'
 | 
	def fileno(self):
 | 
	    return self._impl.fileno()
 | 
| 
	'Read from the file descriptor. If \'drain\' set, read until EOF.'
 | 
	def read(self, drain=0):
 | 
	    while (self._read(drain) is not None):
        if (not drain):
            break
 | 
| 
	'Read from the file descriptor'
 | 
	def _read(self, drain):
 | 
	    fd = self.fileno()
    buf = os.read(fd, 4096).decode(self.default_encoding)
    if ((not buf) and (not self._buf)):
        return None
    if (u'\n' not in buf):
        if (not drain):
            self._buf += buf
            return []
    buf = (self._buf + buf)
    if (u'\n' in buf):
        (tmp, rest) = buf.rsplit(u'\n', 1)
    else:
        tmp = buf
        rest = None
    self._buf = rest
    now = datetime.datetime.now().isoformat()
    rows = tmp.split(u'\n')
    self._rows += [(now, (u'%s    %s:%s' % (self._name, now, r)), r) for r in rows]
    for idx in range(self._lastidx, len(self._rows)):
        iflogger.info(self._rows[idx][1])
    self._lastidx = len(self._rows)
 | 
| 
	'Set the default terminal output for CommandLine Interfaces.
This method is used to set default terminal output for
CommandLine Interfaces.  However, setting this will not
update the output type for any existing instances.  For these,
assign the <instance>.inputs.terminal_output.'
 | 
	@classmethod
def set_default_terminal_output(cls, output_type):
 | 
	    if (output_type in [u'stream', u'allatonce', u'file', u'none']):
        cls._terminal_output = output_type
    else:
        raise AttributeError((u'Invalid    terminal    output_type:    %s' % output_type))
 | 
| 
	'sets base command, immutable'
 | 
	@property
def cmd(self):
 | 
	    return self._cmd
 | 
| 
	'`command` plus any arguments (args)
validates arguments and generates command line'
 | 
	@property
def cmdline(self):
 | 
	    self._check_mandatory_inputs()
    allargs = self._parse_inputs()
    allargs.insert(0, self.cmd)
    return u'    '.join(allargs)
 | 
| 
	'Execute command via subprocess
Parameters
runtime : passed by the run function
Returns
runtime : updated runtime information
adds stdout, stderr, merged, cmdline, dependencies, command_path'
 | 
	def _run_interface(self, runtime, correct_return_codes=(0,)):
 | 
	    setattr(runtime, u'stdout', None)
    setattr(runtime, u'stderr', None)
    setattr(runtime, u'cmdline', self.cmdline)
    out_environ = self._get_environ()
    runtime.environ.update(out_environ)
    executable_name = self.cmd.split()[0]
    (exist_val, cmd_path) = _exists_in_path(executable_name, runtime.environ)
    if (not exist_val):
        raise IOError((u"command    '%s'    could    not    be    found    on    host    %s" % (self.cmd.split()[0], runtime.hostname)))
    setattr(runtime, u'command_path', cmd_path)
    setattr(runtime, u'dependencies', get_dependencies(executable_name, runtime.environ))
    runtime = run_command(runtime, output=self.inputs.terminal_output, redirect_x=self._redirect_x)
    if ((runtime.returncode is None) or (runtime.returncode not in correct_return_codes)):
        self.raise_exception(runtime)
    return runtime
 | 
| 
	'A helper function for _parse_inputs
Formats a trait containing argstr metadata'
 | 
	def _format_arg(self, name, trait_spec, value):
 | 
	    argstr = trait_spec.argstr
    iflogger.debug((u'%s_%s' % (name, str(value))))
    if (trait_spec.is_trait_type(traits.Bool) and (u'%' not in argstr)):
        if value:
            return argstr
        else:
            return None
    elif (trait_spec.is_trait_type(traits.List) or (trait_spec.is_trait_type(traits.TraitCompound) and isinstance(value, list))):
        sep = trait_spec.sep
        if (sep is None):
            sep = u'    '
        if argstr.endswith(u'...'):
            argstr = argstr.replace(u'...', u'')
            return sep.join([(argstr % elt) for elt in value])
        else:
            return (argstr % sep.join((str(elt) for elt in value)))
    else:
        return (argstr % value)
 | 
| 
	'Parse all inputs using the ``argstr`` format string in the Trait.
Any inputs that are assigned (not the default_value) are formatted
to be added to the command line.
Returns
all_args : list
A list of all inputs formatted for the command line.'
 | 
	def _parse_inputs(self, skip=None):
 | 
	    all_args = []
    initial_args = {}
    final_args = {}
    metadata = dict(argstr=(lambda t: (t is not None)))
    for (name, spec) in sorted(self.inputs.traits(**metadata).items()):
        if (skip and (name in skip)):
            continue
        value = getattr(self.inputs, name)
        if spec.name_source:
            value = self._filename_from_source(name)
        elif spec.genfile:
            if ((not isdefined(value)) or (value is None)):
                value = self._gen_filename(name)
        if (not isdefined(value)):
            continue
        arg = self._format_arg(name, spec, value)
        if (arg is None):
            continue
        pos = spec.position
        if (pos is not None):
            if (int(pos) >= 0):
                initial_args[pos] = arg
            else:
                final_args[pos] = arg
        else:
            all_args.append(arg)
    first_args = [arg for (pos, arg) in sorted(initial_args.items())]
    last_args = [arg for (pos, arg) in sorted(final_args.items())]
    return ((first_args + all_args) + last_args)
 | 
| 
	'Adds \'mpiexec\' to begining of command'
 | 
	@property
def cmdline(self):
 | 
	    result = []
    if self.inputs.use_mpi:
        result.append(u'mpiexec')
        if self.inputs.n_procs:
            result.append((u'-n    %d' % self.inputs.n_procs))
    result.append(super(MpiCommandLine, self).cmdline)
    return u'    '.join(result)
 | 
| 
	'validate and  process inputs into useful form.
Returns a list of nilearn maskers and the list of corresponding label names.'
 | 
	def _process_inputs(self):
 | 
	    import nilearn.input_data as nl
    import nilearn.image as nli
    label_data = nli.concat_imgs(self.inputs.label_files)
    maskers = []
    if (np.amax(label_data.get_data()) > 1):
        n_labels = np.amax(label_data.get_data())
        maskers.append(nl.NiftiLabelsMasker(label_data))
    else:
        n_labels = label_data.get_data().shape[3]
        if self.inputs.incl_shared_variance:
            for img in nli.iter_img(label_data):
                maskers.append(nl.NiftiMapsMasker(self._4d(img.get_data(), img.affine)))
        else:
            maskers.append(nl.NiftiMapsMasker(label_data))
    if (not np.isclose(int(n_labels), n_labels)):
        raise ValueError(u'The    label    files    {}    contain    invalid    value    {}.    Check    input.'.format(self.inputs.label_files, n_labels))
    if (len(self.inputs.class_labels) != n_labels):
        raise ValueError(u'The    length    of    class_labels    {}    does    not    match    the    number    of    regions    {}    found    in    label_files    {}'.format(self.inputs.class_labels, n_labels, self.inputs.label_files))
    if self.inputs.include_global:
        global_label_data = label_data.get_data().sum(axis=3)
        global_label_data = np.rint(global_label_data).astype(int).clip(0, 1)
        global_label_data = self._4d(global_label_data, label_data.affine)
        global_masker = nl.NiftiLabelsMasker(global_label_data, detrend=self.inputs.detrend)
        maskers.insert(0, global_masker)
        self.inputs.class_labels.insert(0, u'GlobalSignal')
    for masker in maskers:
        masker.set_params(detrend=self.inputs.detrend)
    return maskers
 | 
| 
	'takes a 3-dimensional numpy array and an affine,
returns the equivalent 4th dimensional nifti file'
 | 
	def _4d(self, array, affine):
 | 
	    return nb.Nifti1Image(array[:, :, :, np.newaxis], affine)
 | 
| 
	'Parameters
input_names: single str or list or None
names corresponding to function inputs
if ``None``, derive input names from function argument names
output_names: single str or list
names corresponding to function outputs (default: \'out\').
if list of length > 1, has to match the number of outputs
function : callable
callable python object. must be able to execute in an
isolated namespace (possibly in concert with the ``imports``
parameter)
imports : list of strings
list of import statements that allow the function to execute
in an otherwise empty namespace'
 | 
	def __init__(self, input_names=None, output_names=u'out', function=None, imports=None, **inputs):
 | 
	    super(Function, self).__init__(**inputs)
    if function:
        if hasattr(function, u'__call__'):
            try:
                self.inputs.function_str = getsource(function)
            except IOError:
                raise Exception(u'Interface    Function    does    not    accept    function    objects    defined    interactively    in    a    python    session')
            else:
                if (input_names is None):
                    fninfo = function.__code__
        elif isinstance(function, (str, bytes)):
            self.inputs.function_str = function
            if (input_names is None):
                fninfo = create_function_from_source(function, imports).__code__
        else:
            raise Exception(u'Unknown    type    of    function')
        if (input_names is None):
            input_names = fninfo.co_varnames[:fninfo.co_argcount]
    self.inputs.on_trait_change(self._set_function_string, u'function_str')
    self._input_names = filename_to_list(input_names)
    self._output_names = filename_to_list(output_names)
    add_traits(self.inputs, [name for name in self._input_names])
    self.imports = imports
    self._out = {}
    for name in self._output_names:
        self._out[name] = None
 | 
| 
	''
 | 
	def __init__(self, filename):
 | 
	    import threading
    self._filename = filename
    self._size = float(os.path.getsize(filename))
    self._seen_so_far = 0
    self._lock = threading.Lock()
 | 
| 
	''
 | 
	def __call__(self, bytes_amount):
 | 
	    import sys
    with self._lock:
        self._seen_so_far += bytes_amount
        if (self._size != 0):
            percentage = ((self._seen_so_far // self._size) * 100)
        else:
            percentage = 0
        progress_str = (u'%d    /    %d    (%.2f%%)\r' % (self._seen_so_far, self._size, percentage))
        sys.stdout.write(progress_str)
        sys.stdout.flush()
 | 
| 
	'Parameters
infields : list of str
Indicates the input fields to be dynamically created'
 | 
	def __init__(self, infields=None, force_run=True, **kwargs):
 | 
	    super(DataSink, self).__init__(**kwargs)
    undefined_traits = {}
    self._infields = infields
    if infields:
        for key in infields:
            self.inputs.add_trait(key, traits.Any)
            self.inputs._outputs[key] = Undefined
            undefined_traits[key] = Undefined
    self.inputs.trait_set(trait_change_notify=False, **undefined_traits)
    if force_run:
        self._always_run = True
 | 
| 
	'Method to see if the datasink\'s base directory specifies an
S3 bucket path; if it does, it parses the path for the bucket
name in the form \'s3://bucket_name/...\' and returns it
Parameters
Returns
s3_flag : boolean
flag indicating whether the base_directory contained an
S3 bucket path
bucket_name : string
name of the S3 bucket to connect to; if the base directory
is not a valid S3 path, defaults to \'<N/A>\''
 | 
	def _check_s3_base_dir(self):
 | 
	    s3_str = u's3://'
    bucket_name = u'<N/A>'
    base_directory = self.inputs.base_directory
    if (not isdefined(base_directory)):
        s3_flag = False
        return (s3_flag, bucket_name)
    if base_directory.lower().startswith(s3_str):
        base_dir_sp = base_directory.split(u'/')
        base_dir_sp[0] = base_dir_sp[0].lower()
        base_directory = u'/'.join(base_dir_sp)
    if base_directory.startswith(s3_str):
        bucket_name = base_directory.split(s3_str)[1].split(u'/')[0]
        s3_flag = True
    else:
        s3_flag = False
    return (s3_flag, bucket_name)
 | 
| 
	'Method to return AWS access key id and secret access key using
credentials found in a local file.
Parameters
self : nipype.interfaces.io.DataSink
self for instance method
Returns
aws_access_key_id : string
string of the AWS access key ID
aws_secret_access_key : string
string of the AWS secret access key'
 | 
	def _return_aws_keys(self):
 | 
	    import os
    creds_path = self.inputs.creds_path
    if (creds_path and os.path.exists(creds_path)):
        with open(creds_path, u'r') as creds_in:
            row1 = creds_in.readline()
            row2 = creds_in.readline()
        if (u'User    Name' in row1):
            aws_access_key_id = row2.split(u',')[1]
            aws_secret_access_key = row2.split(u',')[2]
        elif (u'AWSAccessKeyId' in row1):
            aws_access_key_id = row1.split(u'=')[1]
            aws_secret_access_key = row2.split(u'=')[1]
        else:
            err_msg = u'Credentials    file    not    recognized,    check    file    is    correct'
            raise Exception(err_msg)
        aws_access_key_id = aws_access_key_id.replace(u'\r', u'').replace(u'\n', u'')
        aws_secret_access_key = aws_secret_access_key.replace(u'\r', u'').replace(u'\n', u'')
    else:
        aws_access_key_id = os.getenv(u'AWS_ACCESS_KEY_ID')
        aws_secret_access_key = os.getenv(u'AWS_SECRET_ACCESS_KEY')
    return (aws_access_key_id, aws_secret_access_key)
 | 
| 
	'Method to return a bucket object which can be used to interact
with an AWS S3 bucket using credentials found in a local file.
Parameters
self : nipype.interfaces.io.DataSink
self for instance method
bucket_name : string
string corresponding to the name of the bucket on S3
Returns
bucket : boto3.resources.factory.s3.Bucket
boto3 s3 Bucket object which is used to interact with files
in an S3 bucket on AWS'
 | 
	def _fetch_bucket(self, bucket_name):
 | 
	    import logging
    try:
        import boto3
        import botocore
    except ImportError as exc:
        err_msg = u'Boto3    package    is    not    installed    -    install    boto3    and    try    again.'
        raise Exception(err_msg)
    creds_path = self.inputs.creds_path
    iflogger = logging.getLogger(u'interface')
    try:
        (aws_access_key_id, aws_secret_access_key) = self._return_aws_keys()
    except Exception as exc:
        err_msg = (u'There    was    a    problem    extracting    the    AWS    credentials    from    the    credentials    file    provided:    %s.    Error:\n%s' % (creds_path, exc))
        raise Exception(err_msg)
    if (aws_access_key_id and aws_secret_access_key):
        iflogger.info((u'Connecting    to    S3    bucket:    %s    with    credentials...' % bucket_name))
        session = boto3.session.Session(aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key)
        s3_resource = session.resource(u's3', use_ssl=True)
    else:
        iflogger.info((u'Connecting    to    AWS:    %s    anonymously...' % bucket_name))
        session = boto3.session.Session()
        s3_resource = session.resource(u's3', use_ssl=True)
        s3_resource.meta.client.meta.events.register(u'choose-signer.s3.*', botocore.handlers.disable_signing)
    bucket = s3_resource.Bucket(bucket_name)
    try:
        s3_resource.meta.client.head_bucket(Bucket=bucket_name)
    except botocore.exceptions.ClientError as exc:
        error_code = int(exc.response[u'Error'][u'Code'])
        if (error_code == 403):
            err_msg = (u'Access    to    bucket:    %s    is    denied;    check    credentials' % bucket_name)
            raise Exception(err_msg)
        elif (error_code == 404):
            err_msg = (u'Bucket:    %s    does    not    exist;    check    spelling    and    try    again' % bucket_name)
            raise Exception(err_msg)
        else:
            err_msg = (u'Unable    to    connect    to    bucket:    %s.    Error    message:\n%s' % (bucket_name, exc))
    except Exception as exc:
        err_msg = (u'Unable    to    connect    to    bucket:    %s.    Error    message:\n%s' % (bucket_name, exc))
        raise Exception(err_msg)
    return bucket
 | 
| 
	'Method to upload outputs to S3 bucket instead of on local disk'
 | 
	def _upload_to_s3(self, bucket, src, dst):
 | 
	    import hashlib
    import logging
    import os
    from botocore.exceptions import ClientError
    iflogger = logging.getLogger(u'interface')
    s3_str = u's3://'
    s3_prefix = (s3_str + bucket.name)
    if dst.lower().startswith(s3_str):
        dst_sp = dst.split(u'/')
        dst_sp[0] = dst_sp[0].lower()
        dst = u'/'.join(dst_sp)
    if os.path.isdir(src):
        src_files = []
        for (root, dirs, files) in os.walk(src):
            src_files.extend([os.path.join(root, fil) for fil in files])
        dst_files = [os.path.join(dst, src_f.split(src)[1]) for src_f in src_files]
    else:
        src_files = [src]
        dst_files = [dst]
    for (src_idx, src_f) in enumerate(src_files):
        dst_f = dst_files[src_idx]
        dst_k = dst_f.replace(s3_prefix, u'').lstrip(u'/')
        try:
            dst_obj = bucket.Object(key=dst_k)
            dst_md5 = dst_obj.e_tag.strip(u'"')
            src_read = open(src_f, u'rb').read()
            src_md5 = hashlib.md5(src_read).hexdigest()
            if (dst_md5 == src_md5):
                iflogger.info((u'File    %s    already    exists    on    S3,    skipping...' % dst_f))
                continue
            else:
                iflogger.info(u'Overwriting    previous    S3    file...')
        except ClientError:
            iflogger.info(u'New    file    to    S3')
        iflogger.info((u'Uploading    %s    to    S3    bucket,    %s,    as    %s...' % (src_f, bucket.name, dst_f)))
        if self.inputs.encrypt_bucket_keys:
            extra_args = {u'ServerSideEncryption': u'AES256'}
        else:
            extra_args = {}
        bucket.upload_file(src_f, dst_k, ExtraArgs=extra_args, Callback=ProgressPercentage(src_f))
 | 
| 
	'Execute this module.'
 | 
	def _list_outputs(self):
 | 
	    iflogger = logging.getLogger(u'interface')
    outputs = self.output_spec().get()
    out_files = []
    use_hardlink = str2bool(config.get(u'execution', u'try_hard_link_datasink'))
    if isdefined(self.inputs.local_copy):
        outdir = self.inputs.local_copy
    else:
        outdir = self.inputs.base_directory
        if (not isdefined(outdir)):
            outdir = u'.'
    (s3_flag, bucket_name) = self._check_s3_base_dir()
    if s3_flag:
        s3dir = self.inputs.base_directory
        if self.inputs.bucket:
            bucket = self.inputs.bucket
        else:
            try:
                bucket = self._fetch_bucket(bucket_name)
            except Exception as exc:
                s3dir = u'<N/A>'
                if (not isdefined(self.inputs.local_copy)):
                    local_out_exception = os.path.join(os.path.expanduser(u'~'), (u's3_datasink_' + bucket_name))
                    outdir = local_out_exception
                iflogger.info((u'Access    to    S3    failed!    Storing    outputs    locally    at:    %s\nError:    %s' % (outdir, exc)))
    else:
        s3dir = u'<N/A>'
    if isdefined(self.inputs.container):
        outdir = os.path.join(outdir, self.inputs.container)
        s3dir = os.path.join(s3dir, self.inputs.container)
    if (outdir != s3dir):
        outdir = os.path.abspath(outdir)
        if (not os.path.exists(outdir)):
            try:
                os.makedirs(outdir)
            except OSError as inst:
                if (u'File    exists' in inst.strerror):
                    pass
                else:
                    raise inst
    for (key, files) in list(self.inputs._outputs.items()):
        if (not isdefined(files)):
            continue
        iflogger.debug((u'key:    %s    files:    %s' % (key, str(files))))
        files = filename_to_list(files)
        tempoutdir = outdir
        if s3_flag:
            s3tempoutdir = s3dir
        for d in key.split(u'.'):
            if (d[0] == u'@'):
                continue
            tempoutdir = os.path.join(tempoutdir, d)
            if s3_flag:
                s3tempoutdir = os.path.join(s3tempoutdir, d)
        if isinstance(files, list):
            if isinstance(files[0], list):
                files = [item for sublist in files for item in sublist]
        for src in filename_to_list(files):
            src = os.path.abspath(src)
            if (not os.path.isfile(src)):
                src = os.path.join(src, u'')
            dst = self._get_dst(src)
            if s3_flag:
                s3dst = os.path.join(s3tempoutdir, dst)
                s3dst = self._substitute(s3dst)
            dst = os.path.join(tempoutdir, dst)
            dst = self._substitute(dst)
            (path, _) = os.path.split(dst)
            if s3_flag:
                self._upload_to_s3(bucket, src, s3dst)
                out_files.append(s3dst)
            if ((not s3_flag) or isdefined(self.inputs.local_copy)):
                if (not os.path.exists(path)):
                    try:
                        os.makedirs(path)
                    except OSError as inst:
                        if (u'File    exists' in inst.strerror):
                            pass
                        else:
                            raise inst
                if os.path.isfile(src):
                    iflogger.debug((u'copyfile:    %s    %s' % (src, dst)))
                    copyfile(src, dst, copy=True, hashmethod=u'content', use_hardlink=use_hardlink)
                    out_files.append(dst)
                elif os.path.isdir(src):
                    if (os.path.exists(dst) and self.inputs.remove_dest_dir):
                        iflogger.debug((u'removing:    %s' % dst))
                        shutil.rmtree(dst)
                    iflogger.debug((u'copydir:    %s    %s' % (src, dst)))
                    copytree(src, dst)
                    out_files.append(dst)
    outputs[u'out_file'] = out_files
    return outputs
 | 
| 
	'Parameters
infields : list of str
Indicates the input fields to be dynamically created
outfields: list of str
Indicates output fields to be dynamically created
See class examples for usage'
 | 
	def __init__(self, infields=None, outfields=None, **kwargs):
 | 
	    if (not outfields):
        outfields = [u'outfiles']
    super(S3DataGrabber, self).__init__(**kwargs)
    undefined_traits = {}
    self._infields = infields
    self._outfields = outfields
    if infields:
        for key in infields:
            self.inputs.add_trait(key, traits.Any)
            undefined_traits[key] = Undefined
    self.inputs.add_trait(u'field_template', traits.Dict(traits.Enum(outfields), desc=u'arguments    that    fit    into    template'))
    undefined_traits[u'field_template'] = Undefined
    if (not isdefined(self.inputs.template_args)):
        self.inputs.template_args = {}
    for key in outfields:
        if (key not in self.inputs.template_args):
            if infields:
                self.inputs.template_args[key] = [infields]
            else:
                self.inputs.template_args[key] = []
    self.inputs.trait_set(trait_change_notify=False, **undefined_traits)
 | 
| 
	'S3 specific: Downloads relevant files to a local folder specified
Using traits.Any instead out OutputMultiPath till add_trait bug
is fixed.'
 | 
	def _add_output_traits(self, base):
 | 
	    return add_traits(base, list(self.inputs.template_args.keys()))
 | 
| 
	'Parameters
infields : list of str
Indicates the input fields to be dynamically created
outfields: list of str
Indicates output fields to be dynamically created
See class examples for usage'
 | 
	def __init__(self, infields=None, outfields=None, **kwargs):
 | 
	    if (not outfields):
        outfields = [u'outfiles']
    super(DataGrabber, self).__init__(**kwargs)
    undefined_traits = {}
    self._infields = infields
    self._outfields = outfields
    if infields:
        for key in infields:
            self.inputs.add_trait(key, traits.Any)
            undefined_traits[key] = Undefined
    self.inputs.add_trait(u'field_template', traits.Dict(traits.Enum(outfields), desc=u'arguments    that    fit    into    template'))
    undefined_traits[u'field_template'] = Undefined
    if (not isdefined(self.inputs.template_args)):
        self.inputs.template_args = {}
    for key in outfields:
        if (key not in self.inputs.template_args):
            if infields:
                self.inputs.template_args[key] = [infields]
            else:
                self.inputs.template_args[key] = []
    self.inputs.trait_set(trait_change_notify=False, **undefined_traits)
 | 
| 
	'Using traits.Any instead out OutputMultiPath till add_trait bug
is fixed.'
 | 
	def _add_output_traits(self, base):
 | 
	    return add_traits(base, list(self.inputs.template_args.keys()))
 | 
| 
	'Create an instance with specific input fields.
Parameters
templates : dictionary
Mapping from string keys to string template values.
The keys become output fields on the interface.
The templates should use {}-formatting syntax, where
the names in curly braces become inputs fields on the interface.
Format strings can also use glob wildcards to match multiple
files. At runtime, the values of the interface inputs will be
plugged into these templates, and the resulting strings will be
used to select files.'
 | 
	def __init__(self, templates, **kwargs):
 | 
	    super(SelectFiles, self).__init__(**kwargs)
    infields = []
    for (name, template) in list(templates.items()):
        for (_, field_name, _, _) in string.Formatter().parse(template):
            if (field_name is not None):
                field_name = re.match(u'\\w+', field_name).group()
                if (field_name not in infields):
                    infields.append(field_name)
    self._infields = infields
    self._outfields = list(templates)
    self._templates = templates
    undefined_traits = {}
    for field in infields:
        self.inputs.add_trait(field, traits.Any)
        undefined_traits[field] = Undefined
    self.inputs.trait_set(trait_change_notify=False, **undefined_traits)
 | 
| 
	'Add the dynamic output fields'
 | 
	def _add_output_traits(self, base):
 | 
	    return add_traits(base, list(self._templates.keys()))
 | 
| 
	'Find the files and expose them as interface outputs.'
 | 
	def _list_outputs(self):
 | 
	    outputs = {}
    info = dict([(k, v) for (k, v) in list(self.inputs.__dict__.items()) if (k in self._infields)])
    force_lists = self.inputs.force_lists
    if isinstance(force_lists, bool):
        force_lists = (self._outfields if force_lists else [])
    bad_fields = (set(force_lists) - set(self._outfields))
    if bad_fields:
        bad_fields = u',    '.join(list(bad_fields))
        plural = (u's' if (len(bad_fields) > 1) else u'')
        verb = (u'were' if (len(bad_fields) > 1) else u'was')
        msg = (u"The    field%s    '%s'    %s    set    in    'force_lists'    and    not    in    'templates'." % (plural, bad_fields, verb))
        raise ValueError(msg)
    for (field, template) in list(self._templates.items()):
        if isdefined(self.inputs.base_directory):
            template = op.abspath(op.join(self.inputs.base_directory, template))
        else:
            template = op.abspath(template)
        filled_template = template.format(**info)
        filelist = glob.glob(filled_template)
        if (not filelist):
            msg = (u'No    files    were    found    matching    %s    template:    %s' % (field, filled_template))
            if self.inputs.raise_on_empty:
                raise IOError(msg)
            else:
                warn(msg)
        if self.inputs.sort_filelist:
            filelist = human_order_sorted(filelist)
        if (field not in force_lists):
            filelist = list_to_filename(filelist)
        outputs[field] = filelist
    return outputs
 | 
| 
	'Parameters
infields : list of str
Indicates the input fields to be dynamically created
outfields: list of str
Indicates output fields to be dynamically created
See class examples for usage'
 | 
	def __init__(self, infields=None, outfields=None, **kwargs):
 | 
	    super(XNATSource, self).__init__(**kwargs)
    undefined_traits = {}
    self._infields = infields
    if infields:
        for key in infields:
            self.inputs.add_trait(key, traits.Any)
            undefined_traits[key] = Undefined
        self.inputs.query_template_args[u'outfiles'] = [infields]
    if outfields:
        self.inputs.add_trait(u'field_template', traits.Dict(traits.Enum(outfields), desc=u'arguments    that    fit    into    query_template'))
        undefined_traits[u'field_template'] = Undefined
        outdict = {}
        for key in outfields:
            outdict[key] = []
        self.inputs.query_template_args = outdict
    self.inputs.trait_set(trait_change_notify=False, **undefined_traits)
 | 
| 
	'Using traits.Any instead out OutputMultiPath till add_trait bug
is fixed.'
 | 
	def _add_output_traits(self, base):
 | 
	    return add_traits(base, list(self.inputs.query_template_args.keys()))
 | 
| 
	'Execute this module.'
 | 
	def _list_outputs(self):
 | 
	    cache_dir = (self.inputs.cache_dir or tempfile.gettempdir())
    if self.inputs.config:
        xnat = pyxnat.Interface(config=self.inputs.config)
    else:
        xnat = pyxnat.Interface(self.inputs.server, self.inputs.user, self.inputs.pwd, cache_dir)
    if self.inputs.share:
        subject_id = self.inputs.subject_id
        result = xnat.select(u'xnat:subjectData', [u'xnat:subjectData/PROJECT', u'xnat:subjectData/SUBJECT_ID']).where((u'xnat:subjectData/SUBJECT_ID    =    %s    AND' % subject_id))
        if (result.data and isinstance(result.data[0], dict)):
            result = result.data[0]
            shared = xnat.select((u'/project/%s/subject/%s' % (self.inputs.project_id, self.inputs.subject_id)))
            if (not shared.exists()):
                share_project = xnat.select((u'/project/%s' % self.inputs.project_id))
                if (not share_project.exists()):
                    share_project.insert()
                subject = xnat.select((u'/project/%(project)s/subject/%(subject_id)s' % result))
                subject.share(str(self.inputs.project_id))
    uri_template_args = dict(project_id=quote_id(self.inputs.project_id), subject_id=self.inputs.subject_id, experiment_id=quote_id(self.inputs.experiment_id))
    if self.inputs.share:
        uri_template_args[u'original_project'] = result[u'project']
    if self.inputs.assessor_id:
        uri_template_args[u'assessor_id'] = quote_id(self.inputs.assessor_id)
    elif self.inputs.reconstruction_id:
        uri_template_args[u'reconstruction_id'] = quote_id(self.inputs.reconstruction_id)
    for (key, files) in list(self.inputs._outputs.items()):
        for name in filename_to_list(files):
            if isinstance(name, list):
                for (i, file_name) in enumerate(name):
                    push_file(self, xnat, file_name, ((u'%s_' % i) + key), uri_template_args)
            else:
                push_file(self, xnat, name, key, uri_template_args)
 | 
| 
	'Execute this module.'
 | 
	def _list_outputs(self):
 | 
	    conn = sqlite3.connect(self.inputs.database_file, check_same_thread=False)
    c = conn.cursor()
    c.execute((((((u'INSERT    OR    REPLACE    INTO    %s    (' % self.inputs.table_name) + u','.join(self._input_names)) + u')    VALUES    (') + u','.join(([u'?'] * len(self._input_names)))) + u')'), [getattr(self.inputs, name) for name in self._input_names])
    conn.commit()
    c.close()
    return None
 | 
| 
	'Execute this module.'
 | 
	def _list_outputs(self):
 | 
	    import MySQLdb
    if isdefined(self.inputs.config):
        conn = MySQLdb.connect(db=self.inputs.database_name, read_default_file=self.inputs.config)
    else:
        conn = MySQLdb.connect(host=self.inputs.host, user=self.inputs.username, passwd=self.inputs.password, db=self.inputs.database_name)
    c = conn.cursor()
    c.execute((((((u'REPLACE    INTO    %s    (' % self.inputs.table_name) + u','.join(self._input_names)) + u')    VALUES    (') + u','.join(([u'%s'] * len(self._input_names)))) + u')'), [getattr(self.inputs, name) for name in self._input_names])
    conn.commit()
    c.close()
    return None
 | 
| 
	'Parameters
infields : list of str
Indicates the input fields to be dynamically created
outfields: list of str
Indicates output fields to be dynamically created
See class examples for usage'
 | 
	def __init__(self, infields=None, outfields=None, **kwargs):
 | 
	    try:
        paramiko
    except NameError:
        warn(u'The    library    paramiko    needs    to    be    installed    for    this    module    to    run.')
    if (not outfields):
        outfields = [u'outfiles']
    kwargs = kwargs.copy()
    kwargs[u'infields'] = infields
    kwargs[u'outfields'] = outfields
    super(SSHDataGrabber, self).__init__(**kwargs)
    if (None in (self.inputs.username, self.inputs.password)):
        raise ValueError(u'either    both    username    and    password    are    provided    or    none    of    them')
    if ((self.inputs.template_expression == u'regexp') and (self.inputs.template[(-1)] != u'$')):
        self.inputs.template += u'$'
 | 
| 
	'Method to instantiate TestRuntimeProfiler
Parameters
self : TestRuntimeProfile'
 | 
	def setup_class(self):
 | 
	    self.num_gb = 1.0
    self.num_threads = 2
    self.mem_err_gb = 0.3
 | 
| 
	'Function to collect a range of runtime stats'
 | 
	def _collect_range_runtime_stats(self, num_threads):
 | 
	    import json
    import numpy as np
    import pandas as pd
    ram_gb_range = 10.0
    ram_gb_step = 0.25
    dict_list = []
    for num_gb in np.arange(0.25, (ram_gb_range + ram_gb_step), ram_gb_step):
        (cmd_start_str, cmd_fin_str) = self._run_cmdline_workflow(num_gb, num_threads)
        cmd_start_ts = json.loads(cmd_start_str)[u'start']
        cmd_node_stats = json.loads(cmd_fin_str)
        cmd_runtime_threads = int(cmd_node_stats[u'runtime_threads'])
        cmd_runtime_gb = float(cmd_node_stats[u'runtime_memory_gb'])
        cmd_finish_ts = cmd_node_stats[u'finish']
        (func_start_str, func_fin_str) = self._run_function_workflow(num_gb, num_threads)
        func_start_ts = json.loads(func_start_str)[u'start']
        func_node_stats = json.loads(func_fin_str)
        func_runtime_threads = int(func_node_stats[u'runtime_threads'])
        func_runtime_gb = float(func_node_stats[u'runtime_memory_gb'])
        func_finish_ts = func_node_stats[u'finish']
        cmd_threads_err = (cmd_runtime_threads - num_threads)
        cmd_gb_err = (cmd_runtime_gb - num_gb)
        func_threads_err = (func_runtime_threads - num_threads)
        func_gb_err = (func_runtime_gb - num_gb)
        results_dict = {u'input_threads': num_threads, u'input_gb': num_gb, u'cmd_runtime_threads': cmd_runtime_threads, u'cmd_runtime_gb': cmd_runtime_gb, u'func_runtime_threads': func_runtime_threads, u'func_runtime_gb': func_runtime_gb, u'cmd_threads_err': cmd_threads_err, u'cmd_gb_err': cmd_gb_err, u'func_threads_err': func_threads_err, u'func_gb_err': func_gb_err, u'cmd_start_ts': cmd_start_ts, u'cmd_finish_ts': cmd_finish_ts, u'func_start_ts': func_start_ts, u'func_finish_ts': func_finish_ts}
        dict_list.append(results_dict)
    runtime_results_df = pd.DataFrame(dict_list)
    return runtime_results_df
 | 
| 
	'Function to run the use_resources cmdline script in a nipype workflow
and return the runtime stats recorded by the profiler
Parameters
self : TestRuntimeProfile
Returns
finish_str : string
a json-compatible dictionary string containing the runtime
statistics of the nipype node that used system resources'
 | 
	def _run_cmdline_workflow(self, num_gb, num_threads):
 | 
	    import logging
    import os
    import shutil
    import tempfile
    import nipype.pipeline.engine as pe
    import nipype.interfaces.utility as util
    from nipype.pipeline.plugins.callback_log import log_nodes_cb
    base_dir = tempfile.mkdtemp()
    log_file = os.path.join(base_dir, u'callback.log')
    logger = logging.getLogger(u'callback')
    logger.setLevel(logging.DEBUG)
    handler = logging.FileHandler(log_file)
    logger.addHandler(handler)
    wf = pe.Workflow(name=u'test_runtime_prof_cmd')
    wf.base_dir = base_dir
    input_node = pe.Node(util.IdentityInterface(fields=[u'num_gb', u'num_threads']), name=u'input_node')
    input_node.inputs.num_gb = num_gb
    input_node.inputs.num_threads = num_threads
    resource_node = pe.Node(UseResources(), name=u'resource_node')
    resource_node.interface.estimated_memory_gb = num_gb
    resource_node.interface.num_threads = num_threads
    wf.connect(input_node, u'num_gb', resource_node, u'num_gb')
    wf.connect(input_node, u'num_threads', resource_node, u'num_threads')
    plugin_args = {u'n_procs': num_threads, u'memory_gb': num_gb, u'status_callback': log_nodes_cb}
    wf.run(plugin=u'MultiProc', plugin_args=plugin_args)
    with open(log_file, u'r') as log_handle:
        lines = log_handle.readlines()
        start_str = lines[0].rstrip(u'\n')
        finish_str = lines[1].rstrip(u'\n')
    shutil.rmtree(base_dir)
    return (start_str, finish_str)
 | 
| 
	'Function to run the use_resources() function in a nipype workflow
and return the runtime stats recorded by the profiler
Parameters
self : TestRuntimeProfile
Returns
finish_str : string
a json-compatible dictionary string containing the runtime
statistics of the nipype node that used system resources'
 | 
	def _run_function_workflow(self, num_gb, num_threads):
 | 
	    import logging
    import os
    import shutil
    import tempfile
    import nipype.pipeline.engine as pe
    import nipype.interfaces.utility as util
    from nipype.pipeline.plugins.callback_log import log_nodes_cb
    base_dir = tempfile.mkdtemp()
    log_file = os.path.join(base_dir, u'callback.log')
    logger = logging.getLogger(u'callback')
    logger.setLevel(logging.DEBUG)
    handler = logging.FileHandler(log_file)
    logger.addHandler(handler)
    wf = pe.Workflow(name=u'test_runtime_prof_func')
    wf.base_dir = base_dir
    input_node = pe.Node(util.IdentityInterface(fields=[u'num_gb', u'num_threads']), name=u'input_node')
    input_node.inputs.num_gb = num_gb
    input_node.inputs.num_threads = num_threads
    resource_node = pe.Node(util.Function(input_names=[u'num_threads', u'num_gb'], output_names=[], function=use_resources), name=u'resource_node')
    resource_node.interface.estimated_memory_gb = num_gb
    resource_node.interface.num_threads = num_threads
    wf.connect(input_node, u'num_gb', resource_node, u'num_gb')
    wf.connect(input_node, u'num_threads', resource_node, u'num_threads')
    plugin_args = {u'n_procs': num_threads, u'memory_gb': num_gb, u'status_callback': log_nodes_cb}
    wf.run(plugin=u'MultiProc', plugin_args=plugin_args)
    with open(log_file, u'r') as log_handle:
        lines = log_handle.readlines()
        start_str = lines[0].rstrip(u'\n')
        finish_str = lines[1].rstrip(u'\n')
    shutil.rmtree(base_dir)
    return (start_str, finish_str)
 | 
| 
	'Test runtime profiler correctly records workflow RAM/CPUs consumption
from a cmdline function'
 | 
	@pytest.mark.skipif((run_profile == False), reason=skip_profile_msg)
def test_cmdline_profiling(self):
 | 
	    import json
    import numpy as np
    num_gb = self.num_gb
    num_threads = self.num_threads
    (start_str, finish_str) = self._run_cmdline_workflow(num_gb, num_threads)
    node_stats = json.loads(finish_str)
    runtime_gb = float(node_stats[u'runtime_memory_gb'])
    runtime_threads = int(node_stats[u'runtime_threads'])
    allowed_gb_err = self.mem_err_gb
    runtime_gb_err = np.abs((runtime_gb - num_gb))
    expected_runtime_threads = num_threads
    mem_err = (u'Input    memory:    %f    is    not    within    %.3f    GB    of    runtime    memory:    %f' % (num_gb, self.mem_err_gb, runtime_gb))
    threads_err = (u'Input    threads:    %d    is    not    equal    to    runtime    threads:    %d' % (expected_runtime_threads, runtime_threads))
    assert (runtime_gb_err <= allowed_gb_err), mem_err
    assert (abs((expected_runtime_threads - runtime_threads)) <= 1), threads_err
 | 
| 
	'Test runtime profiler correctly records workflow RAM/CPUs consumption
from a python function'
 | 
	@pytest.mark.skipif(True, reason=u'https://github.com/nipy/nipype/issues/1663')
@pytest.mark.skipif((run_profile == False), reason=skip_profile_msg)
def test_function_profiling(self):
 | 
	    import json
    import numpy as np
    num_gb = self.num_gb
    num_threads = self.num_threads
    (start_str, finish_str) = self._run_function_workflow(num_gb, num_threads)
    node_stats = json.loads(finish_str)
    runtime_gb = float(node_stats[u'runtime_memory_gb'])
    runtime_threads = int(node_stats[u'runtime_threads'])
    allowed_gb_err = self.mem_err_gb
    runtime_gb_err = np.abs((runtime_gb - num_gb))
    expected_runtime_threads = num_threads
    mem_err = (u'Input    memory:    %f    is    not    within    %.3f    GB    of    runtime    memory:    %f' % (num_gb, self.mem_err_gb, runtime_gb))
    threads_err = (u'Input    threads:    %d    is    not    equal    to    runtime    threads:    %d' % (expected_runtime_threads, runtime_threads))
    assert (runtime_gb_err <= allowed_gb_err), mem_err
    assert (abs((expected_runtime_threads - runtime_threads)) <= 1), threads_err
 | 
| 
	'Test a node using the SignalExtraction interface.
Unlike interface.run(), node.run() checks the traits'
 | 
	def test_signal_extr_traits_valid(self):
 | 
	    node = pe.Node(iface.SignalExtraction(in_file=os.path.abspath(self.filenames['in_file']), label_files=os.path.abspath(self.filenames['label_files']), class_labels=self.labels, incl_shared_variance=False), name='SignalExtraction')
    node.run()
 | 
| 
	'Convenience method for converting input arrays [1,2,3] to commandline format \'1x2x3\''
 | 
	@staticmethod
def _format_xarray(val):
 | 
	    return u'x'.join([str(x) for x in val])
 | 
| 
	'Set the default number of threads for ITK calls
This method is used to set the default number of ITK threads for all
the ANTS interfaces. However, setting this will not update the output
type for any existing instances.  For these, assign the
<instance>.inputs.num_threads'
 | 
	@classmethod
def set_default_num_threads(cls, num_threads):
 | 
	    cls._num_threads = num_threads
 | 
| 
	'Format the antsRegistration -m metric argument(s).
Parameters
index: the stage index'
 | 
	def _format_metric(self, index):
 | 
	    name_input = self.inputs.metric[index]
    stage_inputs = dict(fixed_image=self.inputs.fixed_image[0], moving_image=self.inputs.moving_image[0], metric=name_input, weight=self.inputs.metric_weight[index], radius_or_bins=self.inputs.radius_or_number_of_bins[index], optional=self.inputs.radius_or_number_of_bins[index])
    if (isdefined(self.inputs.sampling_strategy) and self.inputs.sampling_strategy):
        sampling_strategy = self.inputs.sampling_strategy[index]
        if sampling_strategy:
            stage_inputs[u'sampling_strategy'] = sampling_strategy
    if (isdefined(self.inputs.sampling_percentage) and self.inputs.sampling_percentage):
        sampling_percentage = self.inputs.sampling_percentage[index]
        if sampling_percentage:
            stage_inputs[u'sampling_percentage'] = sampling_percentage
    if isinstance(name_input, list):
        items = list(stage_inputs.items())
        indexes = list(range(0, len(name_input)))
        specs = list()
        for i in indexes:
            temp = dict([(k, v[i]) for (k, v) in items])
            if (len(self.inputs.fixed_image) == 1):
                temp[u'fixed_image'] = self.inputs.fixed_image[0]
            else:
                temp[u'fixed_image'] = self.inputs.fixed_image[i]
            if (len(self.inputs.moving_image) == 1):
                temp[u'moving_image'] = self.inputs.moving_image[0]
            else:
                temp[u'moving_image'] = self.inputs.moving_image[i]
            specs.append(temp)
    else:
        specs = [stage_inputs]
    return [self._format_metric_argument(**spec) for spec in specs]
 | 
| 
	'Copy header from input image to an output image'
 | 
	def _copy_header(self, fname):
 | 
	    import nibabel as nb
    in_img = nb.load(self.inputs.input_image)
    out_img = nb.load(fname, mmap=False)
    new_img = out_img.__class__(out_img.get_data(), in_img.affine, in_img.header)
    new_img.set_data_dtype(out_img.get_data_dtype())
    new_img.to_filename(fname)
 | 
| 
	'initializes interface to matlab
(default \'matlab -nodesktop -nosplash\')'
 | 
	def __init__(self, matlab_cmd=None, **inputs):
 | 
	    super(MatlabCommand, self).__init__(**inputs)
    if (matlab_cmd and isdefined(matlab_cmd)):
        self._cmd = matlab_cmd
    elif self._default_matlab_cmd:
        self._cmd = self._default_matlab_cmd
    if (self._default_mfile and (not isdefined(self.inputs.mfile))):
        self.inputs.mfile = self._default_mfile
    if (self._default_paths and (not isdefined(self.inputs.paths))):
        self.inputs.paths = self._default_paths
    if ((not isdefined(self.inputs.single_comp_thread)) and (not isdefined(self.inputs.uses_mcr))):
        if config.getboolean(u'execution', u'single_thread_matlab'):
            self.inputs.single_comp_thread = True
    self.inputs.terminal_output = u'allatonce'
 | 
| 
	'Set the default MATLAB command line for MATLAB classes.
This method is used to set values for all MATLAB
subclasses.  However, setting this will not update the output
type for any existing instances.  For these, assign the
<instance>.inputs.matlab_cmd.'
 | 
	@classmethod
def set_default_matlab_cmd(cls, matlab_cmd):
 | 
	    cls._default_matlab_cmd = matlab_cmd
 | 
| 
	'Set the default MATLAB script file format for MATLAB classes.
This method is used to set values for all MATLAB
subclasses.  However, setting this will not update the output
type for any existing instances.  For these, assign the
<instance>.inputs.mfile.'
 | 
	@classmethod
def set_default_mfile(cls, mfile):
 | 
	    cls._default_mfile = mfile
 | 
| 
	'Set the default MATLAB paths for MATLAB classes.
This method is used to set values for all MATLAB
subclasses.  However, setting this will not update the output
type for any existing instances.  For these, assign the
<instance>.inputs.paths.'
 | 
	@classmethod
def set_default_paths(cls, paths):
 | 
	    cls._default_paths = paths
 | 
| 
	'Generates commands and, if mfile specified, writes it to disk.'
 | 
	def _gen_matlab_command(self, argstr, script_lines):
 | 
	    cwd = os.getcwd()
    mfile = (self.inputs.mfile or self.inputs.uses_mcr)
    paths = []
    if isdefined(self.inputs.paths):
        paths = self.inputs.paths
    prescript = self.inputs.prescript
    postscript = self.inputs.postscript
    if mfile:
        prescript.insert(0, u"fprintf(1,'Executing    %s    at    %s:\\n',mfilename(),datestr(now));")
    else:
        prescript.insert(0, u"fprintf(1,'Executing    code    at    %s:\\n',datestr(now));")
    for path in paths:
        prescript.append((u"addpath('%s');\n" % path))
    if (not mfile):
        script_lines = u','.join([line for line in script_lines.split(u'\n') if (not line.strip().startswith(u'%'))])
    script_lines = ((u'\n'.join(prescript) + script_lines) + u'\n'.join(postscript))
    if mfile:
        with open(os.path.join(cwd, self.inputs.script_file), u'wt') as mfile:
            mfile.write(script_lines)
        if self.inputs.uses_mcr:
            script = (u'%s' % os.path.join(cwd, self.inputs.script_file))
        else:
            script = (u"addpath('%s');%s" % (cwd, self.inputs.script_file.split(u'.')[0]))
    else:
        script = u''.join(script_lines.split(u'\n'))
    return (argstr % script)
 | 
| 
	'Check for minc version on the system
Parameters
None
Returns
version : dict
Version number as dict or None if MINC not found'
 | 
	@staticmethod
def version():
 | 
	    try:
        clout = CommandLine(command=u'mincinfo', args=u'-version', terminal_output=u'allatonce').run()
    except IOError:
        return None
    out = clout.runtime.stdout
    def read_program_version(s):
        if (u'program' in s):
            return s.split(u':')[1].strip()
        return None
    def read_libminc_version(s):
        if (u'libminc' in s):
            return s.split(u':')[1].strip()
        return None
    def read_netcdf_version(s):
        if (u'netcdf' in s):
            return u'    '.join(s.split(u':')[1:]).strip()
        return None
    def read_hdf5_version(s):
        if (u'HDF5' in s):
            return s.split(u':')[1].strip()
        return None
    versions = {u'minc': None, u'libminc': None, u'netcdf': None, u'hdf5': None}
    for l in out.split(u'\n'):
        for (name, f) in [(u'minc', read_program_version), (u'libminc', read_libminc_version), (u'netcdf', read_netcdf_version), (u'hdf5', read_hdf5_version)]:
            if (f(l) is not None):
                versions[name] = f(l)
    return versions
 | 
| 
	'A number of the command line options expect precisely one or two files.'
 | 
	def _parse_inputs(self):
 | 
	    nr_input_files = len(self.inputs.input_files)
    for n in self.input_spec.bool_or_const_traits:
        t = self.inputs.__getattribute__(n)
        if isdefined(t):
            if isinstance(t, bool):
                if (nr_input_files != 2):
                    raise ValueError((u'Due    to    the    %s    option    we    expected    2    files    but    input_files    is    of    length    %d' % (n, nr_input_files)))
            elif isinstance(t, float):
                if (nr_input_files != 1):
                    raise ValueError((u'Due    to    the    %s    option    we    expected    1    file    but    input_files    is    of    length    %d' % (n, nr_input_files)))
            else:
                raise ValueError((u'Argument    should    be    a    bool    or    const,    but    got:    %s' % t))
    for n in self.input_spec.single_volume_traits:
        t = self.inputs.__getattribute__(n)
        if isdefined(t):
            if (nr_input_files != 1):
                raise ValueError((u'Due    to    the    %s    option    we    expected    1    file    but    input_files    is    of    length    %d' % (n, nr_input_files)))
    for n in self.input_spec.two_volume_traits:
        t = self.inputs.__getattribute__(n)
        if isdefined(t):
            if (nr_input_files != 2):
                raise ValueError((u'Due    to    the    %s    option    we    expected    2    files    but    input_files    is    of    length    %d' % (n, nr_input_files)))
    for n in self.input_spec.n_volume_traits:
        t = self.inputs.__getattribute__(n)
        if isdefined(t):
            if (not (nr_input_files >= 1)):
                raise ValueError((u'Due    to    the    %s    option    we    expected    at    least    one    file    but    input_files    is    of    length    %d' % (n, nr_input_files)))
    return super(Math, self)._parse_inputs()
 | 
| 
	'Generate a filename based on the given parameters.
The filename will take the form: cwd/basename<suffix><ext>.
If change_ext is True, it will use the extentions specified in
<instance>intputs.output_type.
Parameters
basename : str
Filename to base the new filename on.
cwd : str
Path to prefix to the new filename. (default is os.getcwd())
suffix : str
Suffix to add to the `basename`.  (defaults is \'\' )
change_ext : bool
Flag to change the filename extension to the given `ext`.
(Default is False)
Returns
fname : str
New filename based on given parameters.'
 | 
	def _gen_fname(self, basename, cwd=None, suffix=None, change_ext=True, ext=u'.nii.gz'):
 | 
	    if (basename == u''):
        msg = (u'Unable    to    generate    filename    for    command    %s.    ' % self.cmd)
        msg += u'basename    is    not    set!'
        raise ValueError(msg)
    if (cwd is None):
        cwd = os.getcwd()
    if change_ext:
        if suffix:
            suffix = u''.join((suffix, ext))
        else:
            suffix = ext
    if (suffix is None):
        suffix = u''
    fname = fname_presuffix(basename, suffix=suffix, use_ext=False, newpath=cwd)
    return fname
 | 
| 
	'Init method calling super. No version to be checked.'
 | 
	def __init__(self, **inputs):
 | 
	    super(NiftyFitCommand, self).__init__(**inputs)
 | 
| 
	'Rewrite the cmdline to write options in text_file.'
 | 
	@property
def cmdline(self):
 | 
	    argv = super(RegAverage, self).cmdline
    reg_average_cmd = os.path.join(os.getcwd(), u'reg_average_cmd')
    with open(reg_average_cmd, u'w') as f:
        f.write(argv)
    return (u'%s    --cmd_file    %s' % (self.cmd, reg_average_cmd))
 | 
| 
	'Returns the path to the SPM directory in the Matlab path
If path not found, returns None.
Parameters
matlab_cmd: str
Sets the default matlab command. If None, the value of the
environment variable SPMMCRCMD will be used if set and use_mcr
is True or the environment variable FORCE_SPMMCR is set.
If one of FORCE_SPMMCR or SPMMCRCMD is not set, the existence
of the environment variable MATLABCMD is checked and its value
is used as the matlab command if possible.
If none of the above was successful, the fallback value of
\'matlab -nodesktop -nosplash\' will be used.
paths : str
use_mcr : bool
Returns
spm_path : string representing path to SPM directory
returns None of path not found'
 | 
	@staticmethod
def version(matlab_cmd=None, paths=None, use_mcr=None):
 | 
	    if (use_mcr or (u'FORCE_SPMMCR' in os.environ)):
        use_mcr = True
        if (matlab_cmd is None):
            try:
                matlab_cmd = os.environ[u'SPMMCRCMD']
            except KeyError:
                pass
    if (matlab_cmd is None):
        try:
            matlab_cmd = os.environ[u'MATLABCMD']
        except KeyError:
            matlab_cmd = u'matlab    -nodesktop    -nosplash'
    mlab = MatlabCommand(matlab_cmd=matlab_cmd)
    mlab.inputs.mfile = False
    if paths:
        mlab.inputs.paths = paths
    if use_mcr:
        mlab.inputs.nodesktop = Undefined
        mlab.inputs.nosplash = Undefined
        mlab.inputs.single_comp_thread = Undefined
        mlab.inputs.mfile = True
        mlab.inputs.uses_mcr = True
    mlab.inputs.script = u"\nif    isempty(which('spm')),\nthrow(MException('SPMCheck:NotFound','SPM    not    in    matlab    path'));\nend;\nspm_path    =    spm('dir');\n[name,    version]    =    spm('ver');\nfprintf(1,    'NIPYPE    path:%s|name:%s|release:%s',    spm_path,    name,    version);\nexit;\n                                "
    try:
        out = mlab.run()
    except (IOError, RuntimeError) as e:
        logger.debug(str(e))
        return None
    else:
        out = sd._strip_header(out.runtime.stdout)
        out_dict = {}
        for part in out.split(u'|'):
            (key, val) = part.split(u':')
            out_dict[key] = val
        return out_dict
 | 
| 
	'Executes the SPM function using MATLAB.'
 | 
	def _run_interface(self, runtime):
 | 
	    self.mlab.inputs.script = self._make_matlab_command(deepcopy(self._parse_inputs()))
    results = self.mlab.run()
    runtime.returncode = results.runtime.returncode
    if self.mlab.inputs.uses_mcr:
        if (u'Skipped' in results.runtime.stdout):
            self.raise_exception(runtime)
    runtime.stdout = results.runtime.stdout
    runtime.stderr = results.runtime.stderr
    runtime.merged = results.runtime.merged
    return runtime
 | 
| 
	'Determine the expected outputs based on inputs.'
 | 
	def _list_outputs(self):
 | 
	    raise NotImplementedError
 | 
| 
	'Convert input to appropriate format for SPM.'
 | 
	def _format_arg(self, opt, spec, val):
 | 
	    if spec.is_trait_type(traits.Bool):
        return int(val)
    else:
        return val
 | 
| 
	'Encloses a dict representation within hierarchical lists.
In order to create an appropriate SPM job structure, a Python
dict storing the job needs to be modified so that each dict
embedded in dict needs to be enclosed as a list element.
Examples
>>> a = SPMCommand()._reformat_dict_for_savemat(dict(a=1,
...                                                  b=dict(c=2, d=3)))
>>> a == [{\'a\': 1, \'b\': [{\'c\': 2, \'d\': 3}]}]
True'
 | 
	def _reformat_dict_for_savemat(self, contents):
 | 
	    newdict = {}
    try:
        for (key, value) in list(contents.items()):
            if isinstance(value, dict):
                if value:
                    newdict[key] = self._reformat_dict_for_savemat(value)
            else:
                newdict[key] = value
        return [newdict]
    except TypeError:
        print(u'Requires    dict    input')
 | 
| 
	'Recursive function to generate spm job specification as a string
Parameters
prefix : string
A string that needs to get
contents : dict
A non-tuple Python structure containing spm job
information gets converted to an appropriate sequence of
matlab commands.'
 | 
	def _generate_job(self, prefix=u'', contents=None):
 | 
	    jobstring = u''
    if (contents is None):
        return jobstring
    if isinstance(contents, list):
        for (i, value) in enumerate(contents):
            if prefix.endswith(u')'):
                newprefix = (u'%s,%d)' % (prefix[:(-1)], (i + 1)))
            else:
                newprefix = (u'%s(%d)' % (prefix, (i + 1)))
            jobstring += self._generate_job(newprefix, value)
        return jobstring
    if isinstance(contents, dict):
        for (key, value) in list(contents.items()):
            newprefix = (u'%s.%s' % (prefix, key))
            jobstring += self._generate_job(newprefix, value)
        return jobstring
    if isinstance(contents, np.ndarray):
        if (contents.dtype == np.dtype(object)):
            if prefix:
                jobstring += (u'%s    =    {...\n' % prefix)
            else:
                jobstring += u'{...\n'
            for (i, val) in enumerate(contents):
                if isinstance(val, np.ndarray):
                    jobstring += self._generate_job(prefix=None, contents=val)
                elif isinstance(val, list):
                    items_format = []
                    for el in val:
                        items_format += [(u'{}' if (not isinstance(el, (str, bytes))) else u"'{}'")]
                    val_format = u',    '.join(items_format).format
                    jobstring += u'[{}];...\n'.format(val_format(*val))
                elif isinstance(val, (str, bytes)):
                    jobstring += u"'{}';...\n".format(val)
                else:
                    jobstring += (u'%s;...\n' % str(val))
            jobstring += u'};\n'
        else:
            for (i, val) in enumerate(contents):
                for field in val.dtype.fields:
                    if prefix:
                        newprefix = (u'%s(%d).%s' % (prefix, (i + 1), field))
                    else:
                        newprefix = (u'(%d).%s' % ((i + 1), field))
                    jobstring += self._generate_job(newprefix, val[field])
        return jobstring
    if isinstance(contents, (str, bytes)):
        jobstring += (u"%s    =    '%s';\n" % (prefix, contents))
        return jobstring
    jobstring += (u'%s    =    %s;\n' % (prefix, str(contents)))
    return jobstring
 | 
| 
	'Generates a mfile to build job structure
Parameters
contents : list
a list of dicts generated by _parse_inputs
in each subclass
cwd : string
default os.getcwd()
Returns
mscript : string
contents of a script called by matlab'
 | 
	def _make_matlab_command(self, contents, postscript=None):
 | 
	    cwd = os.getcwd()
    mscript = u"\n                                %%    Generated    by    nipype.interfaces.spm\n                                if    isempty(which('spm')),\n                                                    throw(MException('SPMCheck:NotFound',    'SPM    not    in    matlab    path'));\n                                end\n                                [name,    version]    =    spm('ver');\n                                fprintf('SPM    version:    %s    Release:    %s\\n',name,    version);\n                                fprintf('SPM    path:    %s\\n',    which('spm'));\n                                spm('Defaults','fMRI');\n\n                                if    strcmp(name,    'SPM8')    ||    strcmp(name(1:5),    'SPM12'),\n                                            spm_jobman('initcfg');\n                                            spm_get_defaults('cmdline',    1);\n                                end\n\n                                "
    if self.mlab.inputs.mfile:
        if (isdefined(self.inputs.use_v8struct) and self.inputs.use_v8struct):
            mscript += self._generate_job((u'jobs{1}.spm.%s.%s' % (self.jobtype, self.jobname)), contents[0])
        elif (self.jobname in [u'st', u'smooth', u'preproc', u'preproc8', u'fmri_spec', u'fmri_est', u'factorial_design', u'defs']):
            mscript += self._generate_job((u'jobs{1}.%s{1}.%s(1)' % (self.jobtype, self.jobname)), contents[0])
        else:
            mscript += self._generate_job((u'jobs{1}.%s{1}.%s{1}' % (self.jobtype, self.jobname)), contents[0])
    else:
        jobdef = {u'jobs': [{self.jobtype: [{self.jobname: self.reformat_dict_for_savemat(contents[0])}]}]}
        savemat(os.path.join(cwd, (u'pyjobs_%s.mat' % self.jobname)), jobdef)
        mscript += (u'load    pyjobs_%s;\n\n' % self.jobname)
    mscript += u"\n                                spm_jobman('run',    jobs);\n\n                                "
    if self.inputs.use_mcr:
        mscript += u"\n                                if    strcmp(name,    'SPM8')    ||    strcmp(name(1:5),    'SPM12'),\n                                                close('all',    'force');\n                                end;\n                                                "
    if (postscript is not None):
        mscript += postscript
    return mscript
 | 
| 
	'Trait handles neuroimaging files.
Parameters
types : list
Strings of file format types accepted
compressed : boolean
Indicates whether the file format can compressed'
 | 
	def __init__(self, value=u'', filter=None, auto_set=False, entries=0, exists=False, types=[u'nifti1', u'nifti2'], allow_compressed=False, **metadata):
 | 
	    self.types = types
    self.allow_compressed = allow_compressed
    super(ImageFileSPM, self).__init__(value, filter, auto_set, entries, exists, types, allow_compressed, **metadata)
 | 
| 
	'makes filename to hold inverse transform if not specified'
 | 
	def _make_inv_file(self):
 | 
	    invmat = fname_presuffix(self.inputs.mat, prefix=u'inverse_')
    return invmat
 | 
| 
	'makes name for matfile if doesn exist'
 | 
	def _make_mat_file(self):
 | 
	    (pth, mv, _) = split_filename(self.inputs.moving)
    (_, tgt, _) = split_filename(self.inputs.target)
    mat = os.path.join(pth, (u'%s_to_%s.mat' % (mv, tgt)))
    return mat
 | 
| 
	'checks for SPM, generates script'
 | 
	def _make_matlab_command(self, _):
 | 
	    if (not isdefined(self.inputs.mat)):
        self.inputs.mat = self._make_mat_file()
    if (not isdefined(self.inputs.invmat)):
        self.inputs.invmat = self._make_inv_file()
    script = (u"\n                                target    =    '%s';\n                                moving    =    '%s';\n                                targetv    =    spm_vol(target);\n                                movingv    =    spm_vol(moving);\n                                x    =    spm_coreg(targetv,    movingv);\n                                M    =    spm_matrix(x);\n                                save('%s'    ,    'M'    );\n                                M    =    inv(M);\n                                save('%s','M')\n                                " % (self.inputs.target, self.inputs.moving, self.inputs.mat, self.inputs.invmat))
    return script
 | 
| 
	'checks for SPM, generates script'
 | 
	def _make_matlab_command(self, _):
 | 
	    outputs = self._list_outputs()
    self.inputs.out_file = outputs[u'out_file']
    script = (u"\n                                infile    =    '%s';\n                                outfile    =    '%s'\n                                transform    =    load('%s');\n\n                                V    =    spm_vol(infile);\n                                X    =    spm_read_vols(V);\n                                [p    n    e    v]    =    spm_fileparts(V.fname);\n                                V.mat    =    transform.M    *    V.mat;\n                                V.fname    =    fullfile(outfile);\n                                spm_write_vol(V,X);\n\n                                " % (self.inputs.in_file, self.inputs.out_file, self.inputs.mat))
    return script
 | 
| 
	'generates script'
 | 
	def _make_matlab_command(self, _):
 | 
	    if (not isdefined(self.inputs.out_file)):
        self.inputs.out_file = fname_presuffix(self.inputs.in_file, prefix=u'r')
    script = (u"\n                                flags.mean    =    0;\n                                flags.which    =    1;\n                                flags.mask    =    0;\n                                flags.interp    =    %d;\n                                infiles    =    strvcat('%s',    '%s');\n                                invols    =    spm_vol(infiles);\n                                spm_reslice(invols,    flags);\n                                " % (self.inputs.interp, self.inputs.space_defining, self.inputs.in_file))
    return script
 | 
| 
	'Convert input to appropriate format for spm'
 | 
	def _format_arg(self, opt, spec, val):
 | 
	    if (opt == u'in_files'):
        return scans_for_fnames(filename_to_list(val))
    if (opt == u'target'):
        return scans_for_fname(filename_to_list(val))
    if (opt == u'deformation'):
        return np.array([list_to_filename(val)], dtype=object)
    if (opt == u'deformation_field'):
        return np.array([list_to_filename(val)], dtype=object)
    return val
 | 
| 
	'Convert input to appropriate format for spm'
 | 
	def _format_arg(self, opt, spec, val):
 | 
	    if (opt == u'in_files'):
        return scans_for_fnames(filename_to_list(val))
    if (opt == u'target'):
        return scans_for_fname(filename_to_list(val))
    if (opt == u'deformation'):
        return np.array([list_to_filename(val)], dtype=object)
    if (opt == u'deformation_field'):
        return np.array([list_to_filename(val)], dtype=object)
    return val
 | 
| 
	'Convert input to appropriate format for spm'
 | 
	def _format_arg(self, opt, spec, val):
 | 
	    if (opt == u'in_files'):
        return np.array(val, dtype=object)
    if (opt == u'output_dir'):
        return np.array([val], dtype=object)
    if (opt == u'output_dir'):
        return os.path.abspath(val)
    if (opt == u'icedims'):
        if val:
            return 1
        return 0
    return super(DicomImport, self)._format_arg(opt, spec, val)
 | 
| 
	'Convert input to appropriate format for spm'
 | 
	def _format_arg(self, opt, spec, val):
 | 
	    if (opt == u'in_files'):
        return scans_for_fnames(filename_to_list(val), keep4d=False, separate_sessions=True)
    return super(SliceTiming, self)._format_arg(opt, spec, val)
 | 
| 
	'Convert input to appropriate format for spm'
 | 
	def _format_arg(self, opt, spec, val):
 | 
	    if (opt == u'in_files'):
        if (self.inputs.jobtype == u'write'):
            separate_sessions = False
        else:
            separate_sessions = True
        return scans_for_fnames(val, keep4d=False, separate_sessions=separate_sessions)
    return super(Realign, self)._format_arg(opt, spec, val)
 | 
| 
	'validate spm realign options if set to None ignore'
 | 
	def _parse_inputs(self):
 | 
	    einputs = super(Realign, self)._parse_inputs()
    return [{(u'%s' % self.inputs.jobtype): einputs[0]}]
 | 
| 
	'Convert input to appropriate format for spm'
 | 
	def _format_arg(self, opt, spec, val):
 | 
	    if ((opt == u'target') or ((opt == u'source') and (self.inputs.jobtype != u'write'))):
        return scans_for_fnames(filename_to_list(val), keep4d=True)
    if (opt == u'apply_to_files'):
        return np.array(filename_to_list(val), dtype=object)
    if ((opt == u'source') and (self.inputs.jobtype == u'write')):
        if isdefined(self.inputs.apply_to_files):
            return scans_for_fnames((val + self.inputs.apply_to_files))
        else:
            return scans_for_fnames(val)
    return super(Coregister, self)._format_arg(opt, spec, val)
 | 
| 
	'validate spm coregister options if set to None ignore'
 | 
	def _parse_inputs(self):
 | 
	    if (self.inputs.jobtype == u'write'):
        einputs = super(Coregister, self)._parse_inputs(skip=(u'jobtype', u'apply_to_files'))
    else:
        einputs = super(Coregister, self)._parse_inputs(skip=u'jobtype')
    jobtype = self.inputs.jobtype
    return [{(u'%s' % jobtype): einputs[0]}]
 | 
| 
	'Convert input to appropriate format for spm'
 | 
	def _format_arg(self, opt, spec, val):
 | 
	    if (opt == u'template'):
        return scans_for_fname(filename_to_list(val))
    if (opt == u'source'):
        return scans_for_fname(filename_to_list(val))
    if (opt == u'apply_to_files'):
        return scans_for_fnames(filename_to_list(val))
    if (opt == u'parameter_file'):
        return np.array([list_to_filename(val)], dtype=object)
    if (opt in [u'write_wrap']):
        if (len(val) != 3):
            raise ValueError((u'%s    must    have    3    elements' % opt))
    return super(Normalize, self)._format_arg(opt, spec, val)
 | 
| 
	'Validate spm normalize options if set to None ignore'
 | 
	def _parse_inputs(self):
 | 
	    einputs = super(Normalize, self)._parse_inputs(skip=(u'jobtype', u'apply_to_files'))
    if isdefined(self.inputs.apply_to_files):
        inputfiles = deepcopy(self.inputs.apply_to_files)
        if isdefined(self.inputs.source):
            inputfiles.extend(self.inputs.source)
        einputs[0][u'subj'][u'resample'] = scans_for_fnames(inputfiles)
    jobtype = self.inputs.jobtype
    if (jobtype in [u'estwrite', u'write']):
        if (not isdefined(self.inputs.apply_to_files)):
            if isdefined(self.inputs.source):
                einputs[0][u'subj'][u'resample'] = scans_for_fname(self.inputs.source)
    return [{(u'%s' % jobtype): einputs[0]}]
 | 
			Subsets and Splits
				
	
				
			
				
No community queries yet
The top public SQL queries from the community will appear here once available.
