desc
stringlengths 3
26.7k
| decl
stringlengths 11
7.89k
| bodies
stringlengths 8
553k
|
---|---|---|
'Convert input to appropriate format for spm'
| def _format_arg(self, opt, spec, val):
| if (opt == u'tpm'):
return scans_for_fname(filename_to_list(val))
if (opt == u'image_to_align'):
return scans_for_fname(filename_to_list(val))
if (opt == u'apply_to_files'):
return scans_for_fnames(filename_to_list(val))
if (opt == u'deformation_file'):
return np.array([list_to_filename(val)], dtype=object)
if (opt in [u'nonlinear_regularization']):
if (len(val) != 5):
raise ValueError((u'%s must have 5 elements' % opt))
return super(Normalize12, self)._format_arg(opt, spec, val)
|
'validate spm normalize options if set to None ignore'
| def _parse_inputs(self, skip=()):
| einputs = super(Normalize12, self)._parse_inputs(skip=(u'jobtype', u'apply_to_files'))
if isdefined(self.inputs.apply_to_files):
inputfiles = deepcopy(self.inputs.apply_to_files)
if isdefined(self.inputs.image_to_align):
inputfiles.extend([self.inputs.image_to_align])
einputs[0][u'subj'][u'resample'] = scans_for_fnames(inputfiles)
jobtype = self.inputs.jobtype
if (jobtype in [u'estwrite', u'write']):
if (not isdefined(self.inputs.apply_to_files)):
if isdefined(self.inputs.image_to_align):
einputs[0][u'subj'][u'resample'] = scans_for_fname(self.inputs.image_to_align)
return [{(u'%s' % jobtype): einputs[0]}]
|
'Convert input to appropriate format for spm'
| def _format_arg(self, opt, spec, val):
| clean_masks_dict = {u'no': 0, u'light': 1, u'thorough': 2}
if (opt in [u'data', u'tissue_prob_maps']):
if isinstance(val, list):
return scans_for_fnames(val)
else:
return scans_for_fname(val)
if (u'output_type' in opt):
return [int(v) for v in val]
if (opt == u'mask_image'):
return scans_for_fname(val)
if (opt == u'clean_masks'):
return clean_masks_dict[val]
return super(Segment, self)._format_arg(opt, spec, val)
|
'Convert input to appropriate format for spm'
| def _format_arg(self, opt, spec, val):
| if (opt in [u'channel_files', u'channel_info']):
new_channel = {}
new_channel[u'vols'] = scans_for_fnames(self.inputs.channel_files)
if isdefined(self.inputs.channel_info):
info = self.inputs.channel_info
new_channel[u'biasreg'] = info[0]
new_channel[u'biasfwhm'] = info[1]
new_channel[u'write'] = [int(info[2][0]), int(info[2][1])]
return [new_channel]
elif (opt == u'tissues'):
new_tissues = []
for tissue in val:
new_tissue = {}
new_tissue[u'tpm'] = np.array([u','.join([tissue[0][0], str(tissue[0][1])])], dtype=object)
new_tissue[u'ngaus'] = tissue[1]
new_tissue[u'native'] = [int(tissue[2][0]), int(tissue[2][1])]
new_tissue[u'warped'] = [int(tissue[3][0]), int(tissue[3][1])]
new_tissues.append(new_tissue)
return new_tissues
elif (opt == u'write_deformation_fields'):
return super(NewSegment, self)._format_arg(opt, spec, [int(val[0]), int(val[1])])
else:
return super(NewSegment, self)._format_arg(opt, spec, val)
|
'Convert input to appropriate format for spm'
| def _format_arg(self, opt, spec, val):
| if (opt in [u'image_files']):
return scans_for_fnames(val, keep4d=True, separate_sessions=True)
elif (opt == u'regularization_form'):
mapper = {u'Linear': 0, u'Membrane': 1, u'Bending': 2}
return mapper[val]
elif (opt == u'iteration_parameters'):
params = []
for param in val:
new_param = {}
new_param[u'its'] = param[0]
new_param[u'rparam'] = list(param[1])
new_param[u'K'] = param[2]
new_param[u'slam'] = param[3]
params.append(new_param)
return params
elif (opt == u'optimization_parameters'):
new_param = {}
new_param[u'lmreg'] = val[0]
new_param[u'cyc'] = val[1]
new_param[u'its'] = val[2]
return [new_param]
else:
return super(DARTEL, self)._format_arg(opt, spec, val)
|
'Convert input to appropriate format for spm'
| def _format_arg(self, opt, spec, val):
| if (opt in [u'template_file']):
return np.array([val], dtype=object)
elif (opt in [u'flowfield_files']):
return scans_for_fnames(val, keep4d=True)
elif (opt in [u'apply_to_files']):
return scans_for_fnames(val, keep4d=True, separate_sessions=True)
elif (opt == u'voxel_size'):
return list(val)
elif (opt == u'bounding_box'):
return list(val)
elif (opt == u'fwhm'):
if isinstance(val, list):
return val
else:
return [val, val, val]
else:
return super(DARTELNorm2MNI, self)._format_arg(opt, spec, val)
|
'Convert input to appropriate format for spm'
| def _format_arg(self, opt, spec, val):
| if (opt in [u'image_files']):
return scans_for_fnames(val, keep4d=True, separate_sessions=True)
if (opt in [u'flowfield_files']):
return scans_for_fnames(val, keep4d=True)
else:
return super(CreateWarped, self)._format_arg(opt, spec, val)
|
'Convert input to appropriate format for spm'
| def _format_arg(self, opt, spec, val):
| if (opt in [u'deformation_field', u'reference_volume']):
val = [val]
if (opt in [u'deformation_field']):
return scans_for_fnames(val, keep4d=True, separate_sessions=False)
if (opt in [u'in_files', u'reference_volume']):
return scans_for_fnames(val, keep4d=False, separate_sessions=False)
else:
return super(ApplyDeformations, self)._format_arg(opt, spec, val)
|
'Convert input to appropriate format for spm'
| def _format_arg(self, opt, spec, val):
| if (opt in [u'in_files']):
return scans_for_fnames(val, keep4d=True)
elif (opt in [u'spatial_normalization']):
if (val == u'low'):
return {u'normlow': []}
elif (opt in [u'dartel_template']):
return np.array([val], dtype=object)
elif (opt in [u'deformation_field']):
return super(VBMSegment, self)._format_arg(opt, spec, [int(val[0]), int(val[1])])
else:
return super(VBMSegment, self)._format_arg(opt, spec, val)
|
'Convert input to appropriate format for spm'
| def _format_arg(self, opt, spec, val):
| if (opt in [u'spm_mat_dir', u'mask_image']):
return np.array([str(val)], dtype=object)
if (opt in [u'session_info']):
if isinstance(val, dict):
return [val]
else:
return val
return super(Level1Design, self)._format_arg(opt, spec, val)
|
'validate spm realign options if set to None ignore'
| def _parse_inputs(self):
| einputs = super(Level1Design, self)._parse_inputs(skip=u'mask_threshold')
for sessinfo in einputs[0][u'sess']:
sessinfo[u'scans'] = scans_for_fnames(filename_to_list(sessinfo[u'scans']), keep4d=False)
if (not isdefined(self.inputs.spm_mat_dir)):
einputs[0][u'dir'] = np.array([str(os.getcwd())], dtype=object)
return einputs
|
'validates spm options and generates job structure
if mfile is True uses matlab .m file
else generates a job structure and saves in .mat'
| def _make_matlab_command(self, content):
| if isdefined(self.inputs.mask_image):
postscript = u'load SPM;\n'
postscript += (u"SPM.xM.VM = spm_vol('%s');\n" % list_to_filename(self.inputs.mask_image))
postscript += u'SPM.xM.I = 0;\n'
postscript += u'SPM.xM.T = [];\n'
postscript += (u'SPM.xM.TH = ones(size(SPM.xM.TH))*(%s);\n' % self.inputs.mask_threshold)
postscript += u"SPM.xM.xs = struct('Masking', 'explicit masking only');\n"
postscript += u'save SPM SPM;\n'
else:
postscript = None
return super(Level1Design, self)._make_matlab_command(content, postscript=postscript)
|
'Convert input to appropriate format for spm'
| def _format_arg(self, opt, spec, val):
| if (opt == u'spm_mat_file'):
return np.array([str(val)], dtype=object)
if (opt == u'estimation_method'):
if isinstance(val, (str, bytes)):
return {u'{}'.format(val): 1}
else:
return val
return super(EstimateModel, self)._format_arg(opt, spec, val)
|
'validate spm realign options if set to None ignore'
| def _parse_inputs(self):
| einputs = super(EstimateModel, self)._parse_inputs(skip=u'flags')
if isdefined(self.inputs.flags):
einputs[0].update({flag: val for (flag, val) in self.inputs.flags.items()})
return einputs
|
'validates spm options and generates job structure'
| def _make_matlab_command(self, _):
| contrasts = []
cname = []
for (i, cont) in enumerate(self.inputs.contrasts):
cname.insert(i, cont[0])
contrasts.insert(i, Bunch(name=cont[0], stat=cont[1], conditions=cont[2], weights=None, sessions=None))
if (len(cont) >= 4):
contrasts[i].weights = cont[3]
if (len(cont) >= 5):
contrasts[i].sessions = cont[4]
script = u'% generated by nipype.interfaces.spm\n'
script += u'spm_defaults;\n'
script += (u"jobs{1}.stats{1}.con.spmmat = {'%s'};\n" % self.inputs.spm_mat_file)
script += u'load(jobs{1}.stats{1}.con.spmmat{:});\n'
script += (u"SPM.swd = '%s';\n" % os.getcwd())
script += u"save(jobs{1}.stats{1}.con.spmmat{:},'SPM');\n"
script += u'names = SPM.xX.name;\n'
if (isdefined(self.inputs.group_contrast) and self.inputs.group_contrast):
script += u'condnames=names;\n'
else:
if self.inputs.use_derivs:
script += u"pat = 'Sn\\([0-9]*\\) (.*)';\n"
else:
script += u"pat = 'Sn\\([0-9]*\\) (.*)\\*bf\\(1\\)|Sn\\([0-9]*\\) .*\\*bf\\([2-9]\\)|Sn\\([0-9]*\\) (.*)';\n"
script += u"t = regexp(names,pat,'tokens');\n"
script += u"pat1 = 'Sn\\(([0-9].*)\\)\\s.*';\n"
script += u"t1 = regexp(names,pat1,'tokens');\n"
script += u"for i0=1:numel(t),condnames{i0}='';condsess(i0)=0;if ~isempty(t{i0}{1}),condnames{i0} = t{i0}{1}{1};condsess(i0)=str2num(t1{i0}{1}{1});end;end;\n"
for (i, contrast) in enumerate(contrasts):
if (contrast.stat == u'T'):
script += (u"consess{%d}.tcon.name = '%s';\n" % ((i + 1), contrast.name))
script += (u'consess{%d}.tcon.convec = zeros(1,numel(names));\n' % (i + 1))
for (c0, cond) in enumerate(contrast.conditions):
script += (u"idx = strmatch('%s',condnames,'exact');\n" % cond)
script += (u"if isempty(idx), throw(MException('CondName:Chk', sprintf('Condition %%s not found in design','%s'))); end;\n" % cond)
if contrast.sessions:
for (sno, sw) in enumerate(contrast.sessions):
script += (u'sidx = find(condsess(idx)==%d);\n' % (sno + 1))
script += (u'consess{%d}.tcon.convec(idx(sidx)) = %f;\n' % ((i + 1), (sw * contrast.weights[c0])))
else:
script += (u'consess{%d}.tcon.convec(idx) = %f;\n' % ((i + 1), contrast.weights[c0]))
for (i, contrast) in enumerate(contrasts):
if (contrast.stat == u'F'):
script += (u"consess{%d}.fcon.name = '%s';\n" % ((i + 1), contrast.name))
for (cl0, fcont) in enumerate(contrast.conditions):
try:
tidx = cname.index(fcont[0])
except:
Exception(u'Contrast Estimate: could not get index of T contrast. probably not defined prior to the F contrasts')
script += (u'consess{%d}.fcon.convec{%d} = consess{%d}.tcon.convec;\n' % ((i + 1), (cl0 + 1), (tidx + 1)))
script += u'jobs{1}.stats{1}.con.consess = consess;\n'
script += u"if strcmp(spm('ver'),'SPM8'), spm_jobman('initcfg');jobs=spm_jobman('spm5tospm8',{jobs});end\n"
script += u"spm_jobman('run',jobs);"
return script
|
'Convert input to appropriate format for spm'
| def _format_arg(self, opt, spec, val):
| if (opt in [u'spm_mat_dir', u'explicit_mask_file']):
return np.array([str(val)], dtype=object)
if (opt in [u'covariates']):
outlist = []
mapping = {u'name': u'cname', u'vector': u'c', u'interaction': u'iCFI', u'centering': u'iCC'}
for dictitem in val:
outdict = {}
for (key, keyval) in list(dictitem.items()):
outdict[mapping[key]] = keyval
outlist.append(outdict)
return outlist
return super(FactorialDesign, self)._format_arg(opt, spec, val)
|
'validate spm realign options if set to None ignore'
| def _parse_inputs(self):
| einputs = super(FactorialDesign, self)._parse_inputs()
if (not isdefined(self.inputs.spm_mat_dir)):
einputs[0][u'dir'] = np.array([str(os.getcwd())], dtype=object)
return einputs
|
'Convert input to appropriate format for spm'
| def _format_arg(self, opt, spec, val):
| if (opt in [u'in_files']):
return np.array(val, dtype=object)
return super(OneSampleTTestDesign, self)._format_arg(opt, spec, val)
|
'Convert input to appropriate format for spm'
| def _format_arg(self, opt, spec, val):
| if (opt in [u'group1_files', u'group2_files']):
return np.array(val, dtype=object)
return super(TwoSampleTTestDesign, self)._format_arg(opt, spec, val)
|
'Convert input to appropriate format for spm'
| def _format_arg(self, opt, spec, val):
| if (opt in [u'paired_files']):
return [dict(scans=np.array(files, dtype=object)) for files in val]
return super(PairedTTestDesign, self)._format_arg(opt, spec, val)
|
'Convert input to appropriate format for spm'
| def _format_arg(self, opt, spec, val):
| if (opt in [u'in_files']):
return np.array(val, dtype=object)
if (opt in [u'user_covariates']):
outlist = []
mapping = {u'name': u'cname', u'vector': u'c', u'centering': u'iCC'}
for dictitem in val:
outdict = {}
for (key, keyval) in list(dictitem.items()):
outdict[mapping[key]] = keyval
outlist.append(outdict)
return outlist
return super(MultipleRegressionDesign, self)._format_arg(opt, spec, val)
|
'Check for freesurfer version on system
Find which freesurfer is being used....and get version from
/path/to/freesurfer/build-stamp.txt
Returns
version : string
version number as string
or None if freesurfer version not found'
| @staticmethod
def version():
| fs_home = os.getenv(u'FREESURFER_HOME')
if (fs_home is None):
return None
versionfile = os.path.join(fs_home, u'build-stamp.txt')
if (not os.path.exists(versionfile)):
return None
fid = open(versionfile, u'rt')
version = fid.readline()
fid.close()
return version
|
'Return a comparable version object
If no version found, use LooseVersion(\'0.0.0\')'
| @classmethod
def looseversion(cls):
| ver = cls.version()
if (ver is None):
return LooseVersion(u'0.0.0')
vinfo = ver.rstrip().split(u'-')
try:
int(vinfo[(-1)], 16)
except ValueError:
githash = u''
else:
githash = (u'.' + vinfo[(-1)])
if githash:
if (vinfo[3] == u'dev'):
vstr = (u'6.0.0-dev' + githash)
elif (vinfo[5][0] == u'v'):
vstr = vinfo[5][1:]
else:
raise RuntimeError((u'Unknown version string: ' + ver))
elif (u'dev' in ver):
vstr = (vinfo[(-1)] + u'-dev')
else:
vstr = ver.rstrip().split(u'-v')[(-1)]
return LooseVersion(vstr)
|
'Check the global SUBJECTS_DIR
Parameters
subjects_dir : string
The system defined subjects directory
Returns
subject_dir : string
Represents the current environment setting of SUBJECTS_DIR'
| @classmethod
def subjectsdir(cls):
| if cls.version():
return os.environ[u'SUBJECTS_DIR']
return None
|
'Define a generic mapping for a single outfile
The filename is potentially autogenerated by suffixing inputs.infile
Parameters
basename : string (required)
filename to base the new filename on
fname : string
if not None, just use this fname
cwd : string
prefix paths with cwd, otherwise os.getcwd()
suffix : string
default suffix'
| def _gen_fname(self, basename, fname=None, cwd=None, suffix=u'_fs', use_ext=True):
| if (basename == u''):
msg = (u'Unable to generate filename for command %s. ' % self.cmd)
msg += u'basename is not set!'
raise ValueError(msg)
if (cwd is None):
cwd = os.getcwd()
fname = fname_presuffix(basename, suffix=suffix, use_ext=use_ext, newpath=cwd)
return fname
|
'Filename normalization routine to perform only when run in Node
context'
| def _normalize_filenames(self):
| pass
|
'Based on MRIsBuildFileName in freesurfer/utils/mrisurf.c
If no path information is provided for out_name, use path and
hemisphere (if also unspecified) from in_file to determine the path
of the associated file.
Use in_file prefix to indicate hemisphere for out_name, rather than
inspecting the surface data structure.'
| @staticmethod
def _associated_file(in_file, out_name):
| (path, base) = os.path.split(out_name)
if (path == u''):
(path, in_file) = os.path.split(in_file)
hemis = (u'lh.', u'rh.')
if ((in_file[:3] in hemis) and (base[:3] not in hemis)):
base = (in_file[:3] + base)
return os.path.join(path, base)
|
'In a Node context, interpret out_file as a literal path to
reduce surprise.'
| def _normalize_filenames(self):
| if isdefined(self.inputs.out_file):
self.inputs.out_file = os.path.abspath(self.inputs.out_file)
|
'Find full paths for pial, thickness and sphere files for copying'
| def _normalize_filenames(self):
| in_file = self.inputs.in_file
pial = self.inputs.pial
if (not isdefined(pial)):
pial = u'pial'
self.inputs.pial = self._associated_file(in_file, pial)
if (isdefined(self.inputs.thickness) and self.inputs.thickness):
thickness_name = self.inputs.thickness_name
if (not isdefined(thickness_name)):
thickness_name = u'thickness'
self.inputs.thickness_name = self._associated_file(in_file, thickness_name)
self.inputs.sphere = self._associated_file(in_file, self.inputs.sphere)
|
'validate fsl bet options
if set to None ignore'
| def _get_dicomfiles(self):
| return glob(os.path.abspath(os.path.join(self.inputs.dicom_dir, u'*-1.dcm')))
|
'returns output directory'
| def _get_outdir(self):
| subjid = self.inputs.subject_id
if (not isdefined(subjid)):
(path, fname) = os.path.split(self._get_dicomfiles()[0])
subjid = int(fname.split(u'-')[0])
if isdefined(self.inputs.subject_dir_template):
subjid = (self.inputs.subject_dir_template % subjid)
basedir = self.inputs.base_output_dir
if (not isdefined(basedir)):
basedir = os.path.abspath(u'.')
outdir = os.path.abspath(os.path.join(basedir, subjid))
return outdir
|
'Returns list of dicom series that should be converted.
Requires a dicom info summary file generated by ``DicomDirInfo``'
| def _get_runs(self):
| seq = np.genfromtxt(self.inputs.dicom_info, dtype=object)
runs = []
for s in seq:
if self.inputs.seq_list:
if self.inputs.ignore_single_slice:
if ((int(s[8]) > 1) and any([s[12].startswith(sn) for sn in self.inputs.seq_list])):
runs.append(int(s[2]))
elif any([s[12].startswith(sn) for sn in self.inputs.seq_list]):
runs.append(int(s[2]))
else:
runs.append(int(s[2]))
return runs
|
'Returns list of files to be converted'
| def _get_filelist(self, outdir):
| filemap = {}
for f in self._get_dicomfiles():
(head, fname) = os.path.split(f)
(fname, ext) = os.path.splitext(fname)
fileparts = fname.split(u'-')
runno = int(fileparts[1])
out_type = MRIConvert.filemap[self.inputs.out_type]
outfile = os.path.join(outdir, u'.'.join(((u'%s-%02d' % (fileparts[0], runno)), out_type)))
filemap[runno] = (f, outfile)
if self.inputs.dicom_info:
files = [filemap[r] for r in self._get_runs()]
else:
files = [filemap[r] for r in list(filemap.keys())]
return files
|
'`command` plus any arguments (args)
validates arguments and generates command line'
| @property
def cmdline(self):
| self._check_mandatory_inputs()
outdir = self._get_outdir()
cmd = []
if (not os.path.exists(outdir)):
cmdstr = (u'python -c "import os; os.makedirs(\'%s\')"' % outdir)
cmd.extend([cmdstr])
infofile = os.path.join(outdir, u'shortinfo.txt')
if (not os.path.exists(infofile)):
cmdstr = (u'dcmdir-info-mgh %s > %s' % (self.inputs.dicom_dir, infofile))
cmd.extend([cmdstr])
files = self._get_filelist(outdir)
for (infile, outfile) in files:
if (not os.path.exists(outfile)):
single_cmd = (u'%s %s %s' % (self.cmd, infile, os.path.join(outdir, outfile)))
cmd.extend([single_cmd])
return u'; '.join(cmd)
|
'See io.FreeSurferSource.outputs for the list of outputs returned'
| def _list_outputs(self):
| if isdefined(self.inputs.subjects_dir):
subjects_dir = self.inputs.subjects_dir
else:
subjects_dir = self._gen_subjects_dir()
if isdefined(self.inputs.hemi):
hemi = self.inputs.hemi
else:
hemi = u'both'
outputs = self._outputs().get()
outputs.update(FreeSurferSource(subject_id=self.inputs.subject_id, subjects_dir=subjects_dir, hemi=hemi)._list_outputs())
outputs[u'subject_id'] = self.inputs.subject_id
outputs[u'subjects_dir'] = subjects_dir
return outputs
|
'Check for dtk version on system
Parameters
None
Returns
version : str
Version number as string or None if FSL not found'
| @staticmethod
def version():
| clout = CommandLine(command=u'dti_recon', terminal_output=u'allatonce').run()
if (clout.runtime.returncode is not 0):
return None
dtirecon = clout.runtime.stdout
result = re.search(u'dti_recon (.*)\n', dtirecon)
version = result.group(0).split()[1]
return version
|
'Read from csv in_file and return an array and ROI names
The input file should have a first row containing the names of the
ROIs (strings)
the rest of the data will be read in and transposed so that the rows
(TRs) will becomes the second (and last) dimension of the array'
| def _read_csv(self):
| first_row = open(self.inputs.in_file).readline()
if (not first_row[1].isalpha()):
raise ValueError(u'First row of in_file should contain ROI names as strings of characters')
roi_names = open(self.inputs.in_file).readline().replace(u'"', u'').strip(u'\n').split(u',')
data = np.loadtxt(self.inputs.in_file, skiprows=1, delimiter=u',').T
return (data, roi_names)
|
'Read data from the in_file and generate a nitime TimeSeries object'
| def _csv2ts(self):
| (data, roi_names) = self._read_csv()
TS = TimeSeries(data=data, sampling_interval=self.inputs.TR, time_unit=u's')
TS.metadata = dict(ROIs=roi_names)
return TS
|
'Generate the output csv files.'
| def _make_output_files(self):
| for this in zip([self.coherence, self.delay], [u'coherence', u'delay']):
tmp_f = tempfile.mkstemp()[1]
np.savetxt(tmp_f, this[0], delimiter=u',')
fid = open(fname_presuffix(self.inputs.output_csv_file, suffix=(u'_%s' % this[1])), u'w+')
fid.write(((u',' + u','.join(self.ROIs)) + u'\n'))
for (r, line) in zip(self.ROIs, open(tmp_f)):
fid.write((u'%s,%s' % (r, line)))
fid.close()
|
'Generate the desired figure and save the files according to
self.inputs.output_figure_file'
| def _make_output_figures(self):
| if (self.inputs.figure_type == u'matrix'):
fig_coh = viz.drawmatrix_channels(self.coherence, channel_names=self.ROIs, color_anchor=0)
fig_coh.savefig(fname_presuffix(self.inputs.output_figure_file, suffix=u'_coherence'))
fig_dt = viz.drawmatrix_channels(self.delay, channel_names=self.ROIs, color_anchor=0)
fig_dt.savefig(fname_presuffix(self.inputs.output_figure_file, suffix=u'_delay'))
else:
fig_coh = viz.drawgraph_channels(self.coherence, channel_names=self.ROIs)
fig_coh.savefig(fname_presuffix(self.inputs.output_figure_file, suffix=u'_coherence'))
fig_dt = viz.drawgraph_channels(self.delay, channel_names=self.ROIs)
fig_dt.savefig(fname_presuffix(self.inputs.output_figure_file, suffix=u'_delay'))
|
'Creates a File trait.
Parameters
value : string
The default value for the trait
filter : string
A wildcard string to filter filenames in the file dialog box used by
the attribute trait editor.
auto_set : boolean
Indicates whether the file editor updates the trait value after
every key stroke.
exists : boolean
Indicates whether the trait value must be an existing file or
not.
Default Value
*value* or \'\''
| def __init__(self, value=u'', filter=None, auto_set=False, entries=0, exists=False, **metadata):
| self.filter = filter
self.auto_set = auto_set
self.entries = entries
self.exists = exists
if exists:
self.info_text = u'an existing file name'
super(BaseFile, self).__init__(value, **metadata)
|
'Validates that a specified value is valid for this trait.
Note: The \'fast validator\' version performs this check in C.'
| def validate(self, object, name, value):
| validated_value = super(BaseFile, self).validate(object, name, value)
if (not self.exists):
return validated_value
elif os.path.isfile(value):
return validated_value
else:
raise TraitError(args=u"The trait '{}' of {} instance is {}, but the path '{}' does not exist.".format(name, class_of(object), self.info_text, value))
self.error(object, name, value)
|
'Creates a File trait.
Parameters
value : string
The default value for the trait
filter : string
A wildcard string to filter filenames in the file dialog box used by
the attribute trait editor.
auto_set : boolean
Indicates whether the file editor updates the trait value after
every key stroke.
exists : boolean
Indicates whether the trait value must be an existing file or
not.
Default Value
*value* or \'\''
| def __init__(self, value=u'', filter=None, auto_set=False, entries=0, exists=False, **metadata):
| super(File, self).__init__(value, filter, auto_set, entries, exists, **metadata)
|
'Creates a BaseDirectory trait.
Parameters
value : string
The default value for the trait
auto_set : boolean
Indicates whether the directory editor updates the trait value
after every key stroke.
exists : boolean
Indicates whether the trait value must be an existing directory or
not.
Default Value
*value* or \'\''
| def __init__(self, value=u'', auto_set=False, entries=0, exists=False, **metadata):
| self.entries = entries
self.auto_set = auto_set
self.exists = exists
if exists:
self.info_text = u'an existing directory name'
super(BaseDirectory, self).__init__(value, **metadata)
|
'Validates that a specified value is valid for this trait.
Note: The \'fast validator\' version performs this check in C.'
| def validate(self, object, name, value):
| if isinstance(value, (str, bytes)):
if (not self.exists):
return value
if os.path.isdir(value):
return value
else:
raise TraitError(args=u"The trait '{}' of {} instance is {}, but the path '{}' does not exist.".format(name, class_of(object), self.info_text, value))
self.error(object, name, value)
|
'Creates a Directory trait.
Parameters
value : string
The default value for the trait
auto_set : boolean
Indicates whether the directory editor updates the trait value
after every key stroke.
exists : boolean
Indicates whether the trait value must be an existing directory or
not.
Default Value
*value* or \'\''
| def __init__(self, value=u'', auto_set=False, entries=0, exists=False, **metadata):
| super(Directory, self).__init__(value, auto_set, entries, exists, **metadata)
|
'Trait handles neuroimaging files.
Parameters
types : list
Strings of file format types accepted
compressed : boolean
Indicates whether the file format can compressed'
| def __init__(self, value=u'', filter=None, auto_set=False, entries=0, exists=False, types=[], allow_compressed=True, **metadata):
| self.types = types
self.allow_compressed = allow_compressed
super(ImageFile, self).__init__(value, filter, auto_set, entries, exists, **metadata)
|
'Validates that a specified value is valid for this trait.'
| def validate(self, object, name, value):
| validated_value = super(ImageFile, self).validate(object, name, value)
if (validated_value and self.types):
self._exts = self.grab_exts()
if (not any((validated_value.endswith(x) for x in self._exts))):
raise TraitError(args=u'{} is not included in allowed types: {}'.format(validated_value, u', '.join(self._exts)))
return validated_value
|
'Generate all possible permutations of < multi - tensor > < single - tensor > options'
| def _gen_model_options():
| single_tensor = [u'dt', u'restore', u'algdt', u'nldt_pos', u'nldt', u'ldt_wtd']
multi_tensor = [u'cylcyl', u'cylcyl_eq', u'pospos', u'pospos_eq', u'poscyl', u'poscyl_eq', u'cylcylcyl', u'cylcylcyl_eq', u'pospospos', u'pospospos_eq', u'posposcyl', u'posposcyl_eq', u'poscylcyl', u'poscylcyl_eq']
other = [u'adc', u'ball_stick']
model_list = single_tensor
model_list.extend(other)
model_list.extend([((multi + u' ') + single) for multi in multi_tensor for single in single_tensor])
return model_list
|
'extract the proper filename from the first line of the artifacts file'
| def _get_cleaned_functional_filename(self, artifacts_list_filename):
| artifacts_list_file = open(artifacts_list_filename, u'r')
(functional_filename, extension) = artifacts_list_file.readline().split(u'.')
(artifacts_list_file_path, artifacts_list_filename) = os.path.split(artifacts_list_filename)
return os.path.join(artifacts_list_file_path, (functional_filename + u'_clean.nii.gz'))
|
'Check for fsl version on system
Parameters
None
Returns
version : str
Version number as string or None if FSL not found'
| @staticmethod
def version():
| try:
basedir = os.environ[u'FSLDIR']
except KeyError:
return None
out = open((u'%s/etc/fslversion' % basedir)).read()
return out.strip(u'\n')
|
'Get the file extension for the given output type.
Parameters
output_type : {\'NIFTI\', \'NIFTI_GZ\', \'NIFTI_PAIR\', \'NIFTI_PAIR_GZ\'}
String specifying the output type.
Returns
extension : str
The file extension for the output type.'
| @classmethod
def output_type_to_ext(cls, output_type):
| try:
return cls.ftypes[output_type]
except KeyError:
msg = (u'Invalid FSLOUTPUTTYPE: ', output_type)
raise KeyError(msg)
|
'Get the global FSL output file type FSLOUTPUTTYPE.
This returns the value of the environment variable
FSLOUTPUTTYPE. An exception is raised if it is not defined.
Returns
fsl_ftype : string
Represents the current environment setting of FSLOUTPUTTYPE'
| @classmethod
def output_type(cls):
| try:
return os.environ[u'FSLOUTPUTTYPE']
except KeyError:
LOGGER.warn(u'FSLOUTPUTTYPE environment variable is not set. Setting FSLOUTPUTTYPE=NIFTI')
return u'NIFTI'
|
'Grab an image from the standard location.
Returns a list of standard images if called without arguments.
Could be made more fancy to allow for more relocatability'
| @staticmethod
def standard_image(img_name=None):
| try:
fsldir = os.environ[u'FSLDIR']
except KeyError:
raise Exception(u'FSL environment variables not set')
stdpath = os.path.join(fsldir, u'data', u'standard')
if (img_name is None):
return [filename.replace((stdpath + u'/'), u'') for filename in glob(os.path.join(stdpath, u'*nii*'))]
return os.path.join(stdpath, img_name)
|
'Set the default output type for FSL classes.
This method is used to set the default output type for all fSL
subclasses. However, setting this will not update the output
type for any existing instances. For these, assign the
<instance>.inputs.output_type.'
| @classmethod
def set_default_output_type(cls, output_type):
| if (output_type in Info.ftypes):
cls._output_type = output_type
else:
raise AttributeError((u'Invalid FSL output_type: %s' % output_type))
|
'Generate a filename based on the given parameters.
The filename will take the form: cwd/basename<suffix><ext>.
If change_ext is True, it will use the extentions specified in
<instance>intputs.output_type.
Parameters
basename : str
Filename to base the new filename on.
cwd : str
Path to prefix to the new filename. (default is os.getcwd())
suffix : str
Suffix to add to the `basename`. (defaults is \'\' )
change_ext : bool
Flag to change the filename extension to the FSL output type.
(default True)
Returns
fname : str
New filename based on given parameters.'
| def _gen_fname(self, basename, cwd=None, suffix=None, change_ext=True, ext=None):
| if (basename == u''):
msg = (u'Unable to generate filename for command %s. ' % self.cmd)
msg += u'basename is not set!'
raise ValueError(msg)
if (cwd is None):
cwd = os.getcwd()
if (ext is None):
ext = Info.output_type_to_ext(self.inputs.output_type)
if change_ext:
if suffix:
suffix = u''.join((suffix, ext))
else:
suffix = ext
if (suffix is None):
suffix = u''
fname = fname_presuffix(basename, suffix=suffix, use_ext=False, newpath=cwd)
return fname
|
'Create a Bunch which contains all possible files generated
by running the interface. Some files are always generated, others
depending on which ``inputs`` options are set.
Returns
outputs : Bunch object
Bunch object containing all possible files generated by
interface object.
If None, file was not generated
Else, contains path, filename of generated outputfile'
| def _list_outputs(self):
| outputs = self._outputs().get()
outputs[u'roi_file'] = self.inputs.roi_file
if (not isdefined(outputs[u'roi_file'])):
outputs[u'roi_file'] = self._gen_fname(self.inputs.in_file, suffix=u'_roi')
outputs[u'roi_file'] = os.path.abspath(outputs[u'roi_file'])
return outputs
|
'Create a Bunch which contains all possible files generated
by running the interface. Some files are always generated, others
depending on which ``inputs`` options are set.
Returns
outputs : Bunch object
Bunch object containing all possible files generated by
interface object.
If None, file was not generated
Else, contains path, filename of generated outputfile'
| def _list_outputs(self):
| outputs = self._outputs().get()
ext = Info.output_type_to_ext(self.inputs.output_type)
outbase = u'vol*'
if isdefined(self.inputs.out_base_name):
outbase = (u'%s*' % self.inputs.out_base_name)
outputs[u'out_files'] = sorted(glob(os.path.join(os.getcwd(), (outbase + ext))))
return outputs
|
'Generate a topup compatible encoding file based on given directions'
| def _generate_encfile(self):
| out_file = self._get_encfilename()
durations = self.inputs.readout_times
if (len(self.inputs.encoding_direction) != len(durations)):
if (len(self.inputs.readout_times) != 1):
raise ValueError(u'Readout time must be a float or match thelength of encoding directions')
durations = (durations * len(self.inputs.encoding_direction))
lines = []
for (idx, encdir) in enumerate(self.inputs.encoding_direction):
direction = 1.0
if encdir.endswith(u'-'):
direction = (-1.0)
line = ([(float((val[0] == encdir[0])) * direction) for val in [u'x', u'y', u'z']] + [durations[idx]])
lines.append(line)
np.savetxt(out_file, np.array(lines), fmt='%d %d %d %.8f')
return out_file
|
'Writes out currently set options to specified config file
XX TODO : need to figure out how the config file is written
Parameters
configfile : /path/to/configfile'
| def write_config(self, configfile):
| try:
fid = open(configfile, u'w+')
except IOError:
print((u'unable to create config_file %s' % configfile))
for item in list(self.inputs.get().items()):
fid.write((u'%s\n' % item))
fid.close()
|
'Removes valid intensitymap extensions from `f`, returning a basename
that can refer to both intensitymap files.'
| @classmethod
def intensitymap_file_basename(cls, f):
| for ext in (list(Info.ftypes.values()) + [u'.txt']):
if f.endswith(ext):
return f[:(- len(ext))]
return f
|
'Creates EV files from condition and regressor information.
Parameters:
runinfo : dict
Generated by `SpecifyModel` and contains information
about events and other regressors.
runidx : int
Index to run number
ev_parameters : dict
A dictionary containing the model parameters for the
given design type.
orthogonalization : dict
A dictionary of dictionaries specifying orthogonal EVs.
contrasts : list of lists
Information on contrasts to be evaluated'
| def _create_ev_files(self, cwd, runinfo, runidx, ev_parameters, orthogonalization, contrasts, do_tempfilter, basis_key):
| conds = {}
evname = []
if (basis_key == u'dgamma'):
basis_key = u'hrf'
elif (basis_key == u'gamma'):
try:
_ = ev_parameters[u'gammasigma']
except KeyError:
ev_parameters[u'gammasigma'] = 3
try:
_ = ev_parameters[u'gammadelay']
except KeyError:
ev_parameters[u'gammadelay'] = 6
ev_template = load_template(((u'feat_ev_' + basis_key) + u'.tcl'))
ev_none = load_template(u'feat_ev_none.tcl')
ev_ortho = load_template(u'feat_ev_ortho.tcl')
ev_txt = u''
num_evs = [0, 0]
for field in [u'cond', u'regress']:
for (i, cond) in enumerate(runinfo[field]):
name = cond[u'name']
evname.append(name)
evfname = os.path.join(cwd, (u'ev_%s_%d_%d.txt' % (name, runidx, len(evname))))
evinfo = []
num_evs[0] += 1
num_evs[1] += 1
if (field == u'cond'):
for (j, onset) in enumerate(cond[u'onset']):
try:
amplitudes = cond[u'amplitudes']
if (len(amplitudes) > 1):
amp = amplitudes[j]
else:
amp = amplitudes[0]
except KeyError:
amp = 1
if (len(cond[u'duration']) > 1):
evinfo.insert(j, [onset, cond[u'duration'][j], amp])
else:
evinfo.insert(j, [onset, cond[u'duration'][0], amp])
ev_parameters[u'cond_file'] = evfname
ev_parameters[u'ev_num'] = num_evs[0]
ev_parameters[u'ev_name'] = name
ev_parameters[u'tempfilt_yn'] = do_tempfilter
if (not (u'basisorth' in ev_parameters)):
ev_parameters[u'basisorth'] = 1
if (not (u'basisfnum' in ev_parameters)):
ev_parameters[u'basisfnum'] = 1
try:
ev_parameters[u'fsldir'] = os.environ[u'FSLDIR']
except KeyError:
if (basis_key == u'flobs'):
raise Exception(u'FSL environment variables not set')
else:
ev_parameters[u'fsldir'] = u'/usr/share/fsl'
ev_parameters[u'temporalderiv'] = int(bool(ev_parameters.get(u'derivs', False)))
if ev_parameters[u'temporalderiv']:
evname.append((name + u'TD'))
num_evs[1] += 1
ev_txt += ev_template.substitute(ev_parameters)
elif (field == u'regress'):
evinfo = [[j] for j in cond[u'val']]
ev_txt += ev_none.substitute(ev_num=num_evs[0], ev_name=name, tempfilt_yn=do_tempfilter, cond_file=evfname)
ev_txt += u'\n'
conds[name] = evfname
self._create_ev_file(evfname, evinfo)
for i in range(1, (num_evs[0] + 1)):
for j in range(0, (num_evs[0] + 1)):
try:
orthogonal = int(orthogonalization[i][j])
except (KeyError, TypeError, ValueError, IndexError):
orthogonal = 0
ev_txt += ev_ortho.substitute(c0=i, c1=j, orthogonal=orthogonal)
ev_txt += u'\n'
if isdefined(contrasts):
contrast_header = load_template(u'feat_contrast_header.tcl')
contrast_prolog = load_template(u'feat_contrast_prolog.tcl')
contrast_element = load_template(u'feat_contrast_element.tcl')
contrast_ftest_element = load_template(u'feat_contrast_ftest_element.tcl')
contrastmask_header = load_template(u'feat_contrastmask_header.tcl')
contrastmask_footer = load_template(u'feat_contrastmask_footer.tcl')
contrastmask_element = load_template(u'feat_contrastmask_element.tcl')
ev_txt += contrast_header.substitute()
con_names = []
for (j, con) in enumerate(contrasts):
con_names.append(con[0])
con_map = {}
ftest_idx = []
ttest_idx = []
for (j, con) in enumerate(contrasts):
if (con[1] == u'F'):
ftest_idx.append(j)
for c in con[2]:
if (c[0] not in list(con_map.keys())):
con_map[c[0]] = []
con_map[c[0]].append(j)
else:
ttest_idx.append(j)
for ctype in [u'real', u'orig']:
for (j, con) in enumerate(contrasts):
if (con[1] == u'F'):
continue
tidx = (ttest_idx.index(j) + 1)
ev_txt += contrast_prolog.substitute(cnum=tidx, ctype=ctype, cname=con[0])
count = 0
for c in range(1, (len(evname) + 1)):
if (evname[(c - 1)].endswith(u'TD') and (ctype == u'orig')):
continue
count = (count + 1)
if (evname[(c - 1)] in con[2]):
val = con[3][con[2].index(evname[(c - 1)])]
else:
val = 0.0
ev_txt += contrast_element.substitute(cnum=tidx, element=count, ctype=ctype, val=val)
ev_txt += u'\n'
for fconidx in ftest_idx:
fval = 0
if ((con[0] in con_map.keys()) and (fconidx in con_map[con[0]])):
fval = 1
ev_txt += contrast_ftest_element.substitute(cnum=(ftest_idx.index(fconidx) + 1), element=tidx, ctype=ctype, val=fval)
ev_txt += u'\n'
ev_txt += contrastmask_header.substitute()
for (j, _) in enumerate(contrasts):
for (k, _) in enumerate(contrasts):
if (j != k):
ev_txt += contrastmask_element.substitute(c1=(j + 1), c2=(k + 1))
ev_txt += contrastmask_footer.substitute()
return (num_evs, ev_txt)
|
'Returns functional files in the order of runs'
| def _get_func_files(self, session_info):
| func_files = []
for (i, info) in enumerate(session_info):
func_files.insert(i, info[u'scans'])
return func_files
|
'Check for afni version on system
Parameters
None
Returns
version : str
Version number as string or None if AFNI not found'
| @staticmethod
def version():
| try:
clout = CommandLine(command=u'afni_vcheck', terminal_output=u'allatonce').run()
currv = clout.runtime.stdout.split(u'\n')[1].split(u'=', 1)[1].strip()
except IOError:
IFLOGGER.warn(u'afni_vcheck executable not found.')
return None
except RuntimeError as e:
currv = str(e).split(u'\n')[4].split(u'=', 1)[1].strip()
nextv = str(e).split(u'\n')[6].split(u'=', 1)[1].strip()
IFLOGGER.warn((u'AFNI is outdated, detected version %s and %s is available.' % (currv, nextv)))
if currv.startswith(u'AFNI_'):
currv = currv[5:]
v = currv.split(u'.')
try:
v = [int(n) for n in v]
except ValueError:
return currv
return tuple(v)
|
'Get the file extension for the given output type.
Parameters
outputtype : {\'NIFTI\', \'NIFTI_GZ\', \'AFNI\'}
String specifying the output type.
Returns
extension : str
The file extension for the output type.'
| @classmethod
def output_type_to_ext(cls, outputtype):
| try:
return cls.ftypes[outputtype]
except KeyError as e:
msg = (u'Invalid AFNIOUTPUTTYPE: ', outputtype)
raise_from(KeyError(msg), e)
|
'AFNI has no environment variables,
Output filetypes get set in command line calls
Nipype uses AFNI as default
Returns
None'
| @classmethod
def outputtype(cls):
| return u'AFNI'
|
'Grab an image from the standard location.
Could be made more fancy to allow for more relocatability'
| @staticmethod
def standard_image(img_name):
| clout = CommandLine(u'which afni', terminal_output=u'allatonce').run()
if (clout.runtime.returncode is not 0):
return None
out = clout.runtime.stdout
basedir = os.path.split(out)[0]
return os.path.join(basedir, img_name)
|
'i think? updates class private attribute based on instance input
in fsl also updates ENVIRON variable....not valid in afni
as it uses no environment variables'
| def _output_update(self):
| self._outputtype = self.inputs.outputtype
|
'Set the default output type for AFNI classes.
This method is used to set the default output type for all afni
subclasses. However, setting this will not update the output
type for any existing instances. For these, assign the
<instance>.inputs.outputtype.'
| @classmethod
def set_default_output_type(cls, outputtype):
| if (outputtype in Info.ftypes):
cls._outputtype = outputtype
else:
raise AttributeError((u'Invalid AFNI outputtype: %s' % outputtype))
|
'Generate a filename based on the given parameters.
The filename will take the form: cwd/basename<suffix><ext>.
If change_ext is True, it will use the extentions specified in
<instance>intputs.output_type.
Parameters
basename : str
Filename to base the new filename on.
cwd : str
Path to prefix to the new filename. (default is os.getcwd())
suffix : str
Suffix to add to the `basename`. (defaults is \'\' )
change_ext : bool
Flag to change the filename extension to the FSL output type.
(default True)
Returns
fname : str
New filename based on given parameters.'
| def _gen_fname(self, basename, cwd=None, suffix=None, change_ext=True, ext=None):
| if (basename == u''):
msg = (u'Unable to generate filename for command %s. ' % self.cmd)
msg += u'basename is not set!'
raise ValueError(msg)
if (cwd is None):
cwd = os.getcwd()
if (ext is None):
ext = Info.output_type_to_ext(self.inputs.outputtype)
if change_ext:
if suffix:
suffix = u''.join((suffix, ext))
else:
suffix = ext
if (suffix is None):
suffix = u''
fname = fname_presuffix(basename, suffix=suffix, use_ext=False, newpath=cwd)
return fname
|
'Skip the arguments without argstr metadata'
| def _parse_inputs(self, skip=None):
| return super(Calc, self)._parse_inputs(skip=(u'start_idx', u'stop_idx', u'other'))
|
'Skip the arguments without argstr metadata'
| def _parse_inputs(self, skip=None):
| return super(Eval, self)._parse_inputs(skip=(u'start_idx', u'stop_idx', u'other'))
|
'Return the output path for the gernerated Nifti.'
| def _get_out_path(self, meta, idx=None):
| if self.inputs.out_format:
out_fmt = self.inputs.out_format
else:
out_fmt = []
if (idx is not None):
out_fmt.append((u'%03d' % idx))
if (u'SeriesNumber' in meta):
out_fmt.append(u'%(SeriesNumber)03d')
if (u'ProtocolName' in meta):
out_fmt.append(u'%(ProtocolName)s')
elif (u'SeriesDescription' in meta):
out_fmt.append(u'%(SeriesDescription)s')
else:
out_fmt.append(u'sequence')
out_fmt = u'-'.join(out_fmt)
out_fn = ((out_fmt % meta) + self.inputs.out_ext)
out_fn = sanitize_path_comp(out_fn)
out_path = os.getcwd()
if isdefined(self.inputs.out_path):
out_path = op.abspath(self.inputs.out_path)
try:
os.makedirs(out_path)
except OSError as exc:
if ((exc.errno == errno.EEXIST) and op.isdir(out_path)):
pass
else:
raise
return op.join(out_path, out_fn)
|
'Enables debug configuration'
| def enable_debug_mode(self):
| self._config.set(u'execution', u'stop_on_first_crash', u'true')
self._config.set(u'execution', u'remove_unnecessary_outputs', u'false')
self._config.set(u'execution', u'keep_inputs', u'true')
self._config.set(u'logging', u'workflow_level', u'DEBUG')
self._config.set(u'logging', u'interface_level', u'DEBUG')
|
'Sets logging directory
This should be the first thing that is done before any nipype class
with logging is imported.'
| def set_log_dir(self, log_dir):
| self._config.set(u'logging', u'log_directory', log_dir)
|
'Helper to log what actually changed from old to new values of
dictionaries.
typical use -- log difference for hashed_inputs'
| def logdebug_dict_differences(self, dold, dnew, prefix=u''):
| if isinstance(dnew, list):
dnew = dict(dnew)
if isinstance(dold, list):
dold = dict(dold)
new_keys = set(dnew.keys())
old_keys = set(dold.keys())
if len((new_keys - old_keys)):
self._logger.debug((u'%s not previously seen: %s' % (prefix, (new_keys - old_keys))))
if len((old_keys - new_keys)):
self._logger.debug((u'%s not presently seen: %s' % (prefix, (old_keys - new_keys))))
msgs = []
for k in new_keys.intersection(old_keys):
same = False
try:
(new, old) = (dnew[k], dold[k])
same = (new == old)
if (not same):
same = (old.__class__(new) == old)
except Exception as e:
same = False
if (not same):
msgs += [(u'%s: %r != %r' % (k, dnew[k], dold[k]))]
if len(msgs):
self._logger.debug((u'%s values differ in fields: %s' % (prefix, u', '.join(msgs))))
|
'Create a OneTimeProperty instance.
Parameters
func : method
The method that will be called the first time to compute a value.
Afterwards, the method\'s name will be a standard attribute holding
the value of this computation.'
| def __init__(self, func):
| self.getter = func
self.name = func.__name__
|
'Called on attribute access on the class or instance.'
| def __get__(self, obj, type=None):
| if (obj is None):
return self.getter
val = self.getter(obj)
setattr(obj, self.name, val)
return val
|
'build and install nipype in a temporary location.'
| def run(self):
| install = self.distribution.get_command_obj(u'install')
install.install_scripts = self.temp_install_dir
install.install_base = self.temp_install_dir
install.install_platlib = self.temp_install_dir
install.install_purelib = self.temp_install_dir
install.install_data = self.temp_install_dir
install.install_lib = self.temp_install_dir
install.install_headers = self.temp_install_dir
install.run()
for key in list(sys.modules.keys()):
if key.startswith(u'nipype'):
sys.modules.pop(key, None)
sys.path.append(os.path.abspath(self.temp_install_dir))
sys.path.pop(0)
import nipype
|
'Parameters
data : str
String with lines separated by \''
| def __init__(self, data):
| if isinstance(data, list):
self._str = data
else:
self._str = data.split('\n')
self.reset()
|
'func_name : Descriptive text
continued text
another_func_name : Descriptive text
func_name1, func_name2, :meth:`func_name`, func_name3'
| def _parse_see_also(self, content):
| items = []
def parse_item_name(text):
"Match ':role:`name`' or 'name'"
m = self._name_rgx.match(text)
if m:
g = m.groups()
if (g[1] is None):
return (g[3], None)
else:
return (g[2], g[1])
raise ValueError(('%s is not a item name' % text))
def push_item(name, rest):
if (not name):
return
(name, role) = parse_item_name(name)
items.append((name, list(rest), role))
del rest[:]
current_func = None
rest = []
for line in content:
if (not line.strip()):
continue
m = self._name_rgx.match(line)
if (m and line[m.end():].strip().startswith(':')):
push_item(current_func, rest)
(current_func, line) = (line[:m.end()], line[m.end():])
rest = [line.split(':', 1)[1].strip()]
if (not rest[0]):
rest = []
elif (not line.startswith(' ')):
push_item(current_func, rest)
current_func = None
if (',' in line):
for func in line.split(','):
if func.strip():
push_item(func, [])
elif line.strip():
current_func = line
elif (current_func is not None):
rest.append(line.strip())
push_item(current_func, rest)
return items
|
'.. index: default
:refguide: something, else, and more'
| def _parse_index(self, section, content):
| def strip_each_in(lst):
return [s.strip() for s in lst]
out = {}
section = section.split('::')
if (len(section) > 1):
out['default'] = strip_each_in(section[1].split(','))[0]
for line in content:
line = line.split(':')
if (len(line) > 2):
out[line[1]] = strip_each_in(line[2].split(','))
return out
|
'Grab signature (if given) and summary'
| def _parse_summary(self):
| if self._is_at_section():
return
summary = self._doc.read_to_next_empty_line()
summary_str = ' '.join([s.strip() for s in summary]).strip()
if re.compile('^([\\w., ]+=)?\\s*[\\w\\.]+\\(.*\\)$').match(summary_str):
self['Signature'] = summary_str
if (not self._is_at_section()):
self['Summary'] = self._doc.read_to_next_empty_line()
else:
self['Summary'] = summary
if (not self._is_at_section()):
self['Extended Summary'] = self._read_to_next_section()
|
'Generate a member listing, autosummary:: table where possible,
and a table where not.'
| def _str_member_list(self, name):
| out = []
if self[name]:
out += [(u'.. rubric:: %s' % name), u'']
prefix = getattr(self, u'_name', u'')
if prefix:
prefix = (u'~%s.' % prefix)
autosum = []
others = []
for (param, param_type, desc) in self[name]:
param = param.strip()
if ((not self._obj) or hasattr(self._obj, param)):
autosum += [(u' %s%s' % (prefix, param))]
else:
others.append((param, param_type, desc))
if autosum:
out += [u'.. autosummary::', u' :toctree:', u'']
out += autosum
if others:
maxlen_0 = max([len(x[0]) for x in others])
maxlen_1 = max([len(x[1]) for x in others])
hdr = (((((u'=' * maxlen_0) + u' ') + (u'=' * maxlen_1)) + u' ') + (u'=' * 10))
fmt = (u'%%%ds %%%ds ' % (maxlen_0, maxlen_1))
n_indent = ((maxlen_0 + maxlen_1) + 4)
out += [hdr]
for (param, param_type, desc) in others:
out += [(fmt % (param.strip(), param_type))]
out += self._str_indent(desc, n_indent)
out += [hdr]
out += [u'']
return out
|
'Finds all the tests modules in tests/, and runs them.'
| def run(self):
| from pymysqlreplication import tests
import unittest
unittest.main(tests, argv=sys.argv[:1])
|
'Use for WRITE, UPDATE and DELETE events.
Return an array of column data'
| def _read_column_data(self, cols_bitmap):
| values = {}
null_bitmap = self.packet.read(((BitCount(cols_bitmap) + 7) / 8))
nullBitmapIndex = 0
nb_columns = len(self.columns)
for i in range(0, nb_columns):
column = self.columns[i]
name = self.table_map[self.table_id].columns[i].name
unsigned = self.table_map[self.table_id].columns[i].unsigned
if (BitGet(cols_bitmap, i) == 0):
values[name] = None
continue
if self.__is_null(null_bitmap, nullBitmapIndex):
values[name] = None
elif (column.type == FIELD_TYPE.TINY):
if unsigned:
values[name] = struct.unpack('<B', self.packet.read(1))[0]
else:
values[name] = struct.unpack('<b', self.packet.read(1))[0]
elif (column.type == FIELD_TYPE.SHORT):
if unsigned:
values[name] = struct.unpack('<H', self.packet.read(2))[0]
else:
values[name] = struct.unpack('<h', self.packet.read(2))[0]
elif (column.type == FIELD_TYPE.LONG):
if unsigned:
values[name] = struct.unpack('<I', self.packet.read(4))[0]
else:
values[name] = struct.unpack('<i', self.packet.read(4))[0]
elif (column.type == FIELD_TYPE.INT24):
if unsigned:
values[name] = self.packet.read_uint24()
else:
values[name] = self.packet.read_int24()
elif (column.type == FIELD_TYPE.FLOAT):
values[name] = struct.unpack('<f', self.packet.read(4))[0]
elif (column.type == FIELD_TYPE.DOUBLE):
values[name] = struct.unpack('<d', self.packet.read(8))[0]
elif ((column.type == FIELD_TYPE.VARCHAR) or (column.type == FIELD_TYPE.STRING)):
if (column.max_length > 255):
values[name] = self.__read_string(2, column)
else:
values[name] = self.__read_string(1, column)
elif (column.type == FIELD_TYPE.NEWDECIMAL):
values[name] = self.__read_new_decimal(column)
elif (column.type == FIELD_TYPE.BLOB):
values[name] = self.__read_string(column.length_size, column)
elif (column.type == FIELD_TYPE.DATETIME):
values[name] = self.__read_datetime()
elif (column.type == FIELD_TYPE.TIME):
values[name] = self.__read_time()
elif (column.type == FIELD_TYPE.DATE):
values[name] = self.__read_date()
elif (column.type == FIELD_TYPE.TIMESTAMP):
values[name] = datetime.datetime.fromtimestamp(self.packet.read_uint32())
elif (column.type == FIELD_TYPE.DATETIME2):
values[name] = self.__read_datetime2(column)
elif (column.type == FIELD_TYPE.TIME2):
values[name] = self.__read_time2(column)
elif (column.type == FIELD_TYPE.TIMESTAMP2):
values[name] = self.__add_fsp_to_time(datetime.datetime.fromtimestamp(self.packet.read_int_be_by_size(4)), column)
elif (column.type == FIELD_TYPE.LONGLONG):
if unsigned:
values[name] = self.packet.read_uint64()
else:
values[name] = self.packet.read_int64()
elif (column.type == FIELD_TYPE.YEAR):
values[name] = (self.packet.read_uint8() + 1900)
elif (column.type == FIELD_TYPE.ENUM):
values[name] = column.enum_values[(self.packet.read_uint_by_size(column.size) - 1)]
elif (column.type == FIELD_TYPE.SET):
bit_mask = self.packet.read_uint_by_size(column.size)
values[name] = (set((val for (idx, val) in enumerate(column.set_values) if (bit_mask & (2 ** idx)))) or None)
elif (column.type == FIELD_TYPE.BIT):
values[name] = self.__read_bit(column)
elif (column.type == FIELD_TYPE.GEOMETRY):
values[name] = self.packet.read_length_coded_pascal_string(column.length_size)
elif (column.type == FIELD_TYPE.JSON):
values[name] = self.packet.read_binary_json(column.length_size)
else:
raise NotImplementedError(('Unknown MySQL column type: %d' % column.type))
nullBitmapIndex += 1
return values
|
'Read and add the fractional part of time
For more details about new date format:
http://dev.mysql.com/doc/internals/en/date-and-time-data-type-representation.html'
| def __add_fsp_to_time(self, time, column):
| microsecond = self.__read_fsp(column)
if (microsecond > 0):
time = time.replace(microsecond=microsecond)
return time
|
'Read MySQL BIT type'
| def __read_bit(self, column):
| resp = ''
for byte in range(0, column.bytes):
current_byte = ''
data = self.packet.read_uint8()
if (byte == 0):
if (column.bytes == 1):
end = column.bits
else:
end = (column.bits % 8)
if (end == 0):
end = 8
else:
end = 8
for bit in range(0, end):
if (data & (1 << bit)):
current_byte += '1'
else:
current_byte += '0'
resp += current_byte[::(-1)]
return resp
|
'TIME encoding for nonfractional part:
1 bit sign (1= non-negative, 0= negative)
1 bit unused (reserved for future extensions)
10 bits hour (0-838)
6 bits minute (0-59)
6 bits second (0-59)
24 bits = 3 bytes'
| def __read_time2(self, column):
| data = self.packet.read_int_be_by_size(3)
sign = (1 if self.__read_binary_slice(data, 0, 1, 24) else (-1))
if (sign == (-1)):
data = ((~ data) + 1)
t = datetime.timedelta(hours=(sign * self.__read_binary_slice(data, 2, 10, 24)), minutes=self.__read_binary_slice(data, 12, 6, 24), seconds=self.__read_binary_slice(data, 18, 6, 24), microseconds=self.__read_fsp(column))
return t
|
'DATETIME
1 bit sign (1= non-negative, 0= negative)
17 bits year*13+month (year 0-9999, month 0-12)
5 bits day (0-31)
5 bits hour (0-23)
6 bits minute (0-59)
6 bits second (0-59)
40 bits = 5 bytes'
| def __read_datetime2(self, column):
| data = self.packet.read_int_be_by_size(5)
year_month = self.__read_binary_slice(data, 1, 17, 40)
try:
t = datetime.datetime(year=int((year_month / 13)), month=(year_month % 13), day=self.__read_binary_slice(data, 18, 5, 40), hour=self.__read_binary_slice(data, 23, 5, 40), minute=self.__read_binary_slice(data, 28, 6, 40), second=self.__read_binary_slice(data, 34, 6, 40))
except ValueError:
return None
return self.__add_fsp_to_time(t, column)
|
'Read MySQL\'s new decimal format introduced in MySQL 5'
| def __read_new_decimal(self, column):
| digits_per_integer = 9
compressed_bytes = [0, 1, 1, 2, 2, 3, 3, 4, 4, 4]
integral = (column.precision - column.decimals)
uncomp_integral = int((integral / digits_per_integer))
uncomp_fractional = int((column.decimals / digits_per_integer))
comp_integral = (integral - (uncomp_integral * digits_per_integer))
comp_fractional = (column.decimals - (uncomp_fractional * digits_per_integer))
value = self.packet.read_uint8()
if ((value & 128) != 0):
res = ''
mask = 0
else:
mask = (-1)
res = '-'
self.packet.unread(struct.pack('<B', (value ^ 128)))
size = compressed_bytes[comp_integral]
if (size > 0):
value = (self.packet.read_int_be_by_size(size) ^ mask)
res += str(value)
for i in range(0, uncomp_integral):
value = (struct.unpack('>i', self.packet.read(4))[0] ^ mask)
res += ('%09d' % value)
res += '.'
for i in range(0, uncomp_fractional):
value = (struct.unpack('>i', self.packet.read(4))[0] ^ mask)
res += ('%09d' % value)
size = compressed_bytes[comp_fractional]
if (size > 0):
value = (self.packet.read_int_be_by_size(size) ^ mask)
res += ('%0*d' % (comp_fractional, value))
return decimal.Decimal(res)
|
'Read a part of binary data and extract a number
binary: the data
start: From which bit (1 to X)
size: How many bits should be read
data_length: data size'
| def __read_binary_slice(self, binary, start, size, data_length):
| binary = (binary >> (data_length - (start + size)))
mask = ((1 << size) - 1)
return (binary & mask)
|
'Return the MySQL version of the server
If version is 5.6.10-log the result is 5.6.10'
| def getMySQLVersion(self):
| return self.execute('SELECT VERSION()').fetchone()[0].split('-')[0]
|
'set sql_mode to test with same sql_mode (mysql 5.7 sql_mode default is changed)'
| def set_sql_mode(self):
| version = float(self.getMySQLVersion().rsplit('.', 1)[0])
if (version == 5.7):
self.execute("set @@sql_mode='NO_ENGINE_SUBSTITUTION'")
|
'Events the BinLogStreamReader should ignore'
| @staticmethod
def ignored_events():
| return [GtidEvent]
|
'A missing RotateEvent and skip_to_timestamp cause corruption
This test shows that a binlog file which lacks the trailing RotateEvent
and the use of the ``skip_to_timestamp`` argument together can cause
the table_map to become corrupt. The trailing RotateEvent has a
timestamp, but may be lost if the server crashes. The leading
RotateEvent in the next binlog file always has a timestamp of 0, thus
is discarded when ``skip_to_timestamp`` is greater than zero.'
| def test_no_trailing_rotate_event(self):
| self.execute('CREATE TABLE test (id INT NOT NULL AUTO_INCREMENT, data VARCHAR (50) NOT NULL, PRIMARY KEY(id))')
self.execute('SET AUTOCOMMIT = 0')
self.execute('INSERT INTO test(id, data) VALUES (1, "Hello")')
self.execute('COMMIT')
timestamp = self.execute('SELECT UNIX_TIMESTAMP()').fetchone()[0]
self.execute('FLUSH BINARY LOGS')
self.execute('INSERT INTO test(id, data) VALUES (2, "Hi")')
self.stream.close()
self._remove_trailing_rotate_event_from_first_binlog()
binlog = self.execute('SHOW BINARY LOGS').fetchone()[0]
self.stream = BinLogStreamReader(self.database, server_id=1024, log_pos=4, log_file=binlog, skip_to_timestamp=timestamp, ignored_events=self.ignored_events())
for _ in self.stream:
pass
self.assertEqual({}, self.stream.table_map)
|
'Remove the trailing RotateEvent from the first binlog
According to the MySQL Internals Manual, a RotateEvent will be added to
the end of a binlog when the binlog is rotated. This may not happen if
the server crashes, for example.
This method removes the trailing RotateEvent to verify that the library
properly handles this case.'
| def _remove_trailing_rotate_event_from_first_binlog(self):
| datadir = self.execute("SHOW VARIABLES LIKE 'datadir'").fetchone()[1]
binlog = self.execute('SHOW BINARY LOGS').fetchone()[0]
binlogpath = os.path.join(datadir, binlog)
reader = SimpleBinLogFileReader(binlogpath, only_events=[RotateEvent])
for _ in reader:
reader.truncatebinlog()
break
|
'Fetch one record from the binlog file'
| def fetchone(self):
| if ((self._pos is None) or (self._pos < 4)):
self._read_magic()
while True:
event = self._read_event()
self._current_event = event
if (event is None):
return None
if self._filter_events(event):
return event
|
'Truncate the binlog file at the current event'
| def truncatebinlog(self):
| if (self._current_event is not None):
self._file.truncate(self._current_event.pos)
|
'Return True if an event can be returned'
| def _filter_events(self, event):
| event_type = {constants.QUERY_EVENT: QueryEvent, constants.ROTATE_EVENT: RotateEvent, constants.FORMAT_DESCRIPTION_EVENT: FormatDescriptionEvent, constants.XID_EVENT: XidEvent, constants.TABLE_MAP_EVENT: TableMapEvent, constants.WRITE_ROWS_EVENT_V2: WriteRowsEvent}.get(event.event_type)
return (event_type in self._only_events)
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.