Search is not available for this dataset
text
stringlengths
75
104k
def _load_image(file_path): """ Parameters ---------- file_path: str Path to the nifti file Returns ------- nipy.Image with a file_path member """ if not os.path.exists(file_path): raise FileNotFound(file_path) try: nii_img = load_nipy_img(file_path) nii_img.file_path = file_path return nii_img except Exception as exc: raise Exception('Reading file {0}.'.format(file_path)) from exc
def _smooth_img(nii_img, smooth_fwhm): """ Parameters ---------- nii_img: nipy.Image smooth_fwhm: float Returns ------- smoothed nipy.Image """ # delayed import because could not install nipy on Python 3 on OSX from nipy.algorithms.kernel_smooth import LinearFilter if smooth_fwhm <= 0: return nii_img filter = LinearFilter(nii_img.coordmap, nii_img.shape) return filter.smooth(nii_img)
def from_dict(self, subj_files): """ Parameters ---------- subj_files: dict of str file_path -> int/str """ for group_label in subj_files: try: group_files = subj_files[group_label] self.items.extend([self._load_image(get_abspath(imgf)) for imgf in group_files]) self.labels.extend([group_label]*len(group_files)) except Exception as exc: raise Exception('Error while reading files from ' 'group {0}.'.format(group_label)) from exc
def from_list(self, subj_files): """ Parameters ---------- subj_files: list of str file_paths """ for sf in subj_files: try: nii_img = self._load_image(get_abspath(sf)) self.items.append(nii_img) except Exception as exc: raise Exception('Error while reading file {0}.'.format(sf)) from exc
def set_labels(self, subj_labels): """ Parameters ---------- subj_labels: list of int or str This list will be checked to have the same size as files list (self.items) """ if len(subj_labels) != self.n_subjs: raise ValueError('The number of given labels is not the same as the number of subjects.') self.labels = subj_labels
def to_matrix(self, smooth_fwhm=0, outdtype=None): """Create a Numpy array with the data and return the relevant information (mask indices and volume shape). Parameters ---------- smooth_fwhm: int Integer indicating the size of the FWHM Gaussian smoothing kernel to smooth the subject volumes before creating the data matrix outdtype: dtype Type of the elements of the array, if None will obtain the dtype from the first nifti file. Returns ------- outmat, mask_indices, vol_shape outmat: Numpy array with shape N x prod(vol.shape) containing the N files as flat vectors. mask_indices: matrix with indices of the voxels in the mask vol_shape: Tuple with shape of the volumes, for reshaping. """ vol = self.items[0].get_data() if not outdtype: outdtype = vol.dtype n_voxels = None mask_indices = None mask_shape = self.items[0].shape if self.has_mask: mask_arr = get_img_data(self.mask_file) mask_indices = np.where(mask_arr > 0) mask_shape = mask_arr.shape n_voxels = np.count_nonzero(mask_arr) if n_voxels is None: log.debug('Non-zero voxels have not been found in mask {}'.format(self.mask_file)) n_voxels = np.prod(vol.shape) outmat = np.zeros((self.n_subjs, n_voxels), dtype=outdtype) try: for i, nipy_img in enumerate(self.items): vol = self._smooth_img(nipy_img, smooth_fwhm).get_data() if self.has_mask is not None: outmat[i, :] = vol[mask_indices] else: outmat[i, :] = vol.flatten() except Exception as exc: raise Exception('Error when flattening file {0}'.format(nipy_img.file_path)) from exc else: return outmat, mask_indices, mask_shape
def die(msg, code=-1): """Writes msg to stderr and exits with return code""" sys.stderr.write(msg + "\n") sys.exit(code)
def check_call(cmd_args): """ Calls the command Parameters ---------- cmd_args: list of str Command name to call and its arguments in a list. Returns ------- Command output """ p = subprocess.Popen(cmd_args, stdout=subprocess.PIPE) (output, err) = p.communicate() return output
def call_command(cmd_name, args_strings): """Call CLI command with arguments and returns its return value. Parameters ---------- cmd_name: str Command name or full path to the binary file. arg_strings: list of str Argument strings list. Returns ------- return_value Command return value. """ if not op.isabs(cmd_name): cmd_fullpath = which(cmd_name) else: cmd_fullpath = cmd_name try: cmd_line = [cmd_fullpath] + args_strings log.info('Calling: {}.'.format(cmd_line)) retval = subprocess.check_call(cmd_line) except CalledProcessError as ce: log.exception("Error calling command {} with arguments: " "{} \n With return code: {}".format(cmd_name, args_strings, ce.returncode)) raise else: return retval
def condor_call(cmd, shell=True): """ Tries to submit cmd to HTCondor, if it does not succeed, it will be called with subprocess.call. Parameters ---------- cmd: string Command to be submitted Returns ------- """ log.info(cmd) ret = condor_submit(cmd) if ret != 0: subprocess.call(cmd, shell=shell)
def condor_submit(cmd): """ Submits cmd to HTCondor queue Parameters ---------- cmd: string Command to be submitted Returns ------- int returncode value from calling the submission command. """ is_running = subprocess.call('condor_status', shell=True) == 0 if not is_running: raise CalledProcessError('HTCondor is not running.') sub_cmd = 'condor_qsub -shell n -b y -r y -N ' \ + cmd.split()[0] + ' -m n' log.info('Calling: ' + sub_cmd) return subprocess.call(sub_cmd + ' ' + cmd, shell=True)
def clean(ctx): """Clean previously built package artifacts. """ ctx.run(f'python setup.py clean') dist = ROOT.joinpath('dist') print(f'removing {dist}') shutil.rmtree(str(dist))
def upload(ctx, repo): """Upload the package to an index server. This implies cleaning and re-building the package. :param repo: Required. Name of the index server to upload to, as specifies in your .pypirc configuration file. """ artifacts = ' '.join( shlex.quote(str(n)) for n in ROOT.joinpath('dist').glob('pipfile[-_]cli-*') ) ctx.run(f'twine upload --repository="{repo}" {artifacts}')
def load_command_table(self, args): #pylint: disable=too-many-statements """Load all Service Fabric commands""" # Need an empty client for the select and upload operations with CommandSuperGroup(__name__, self, 'rcctl.custom_cluster#{}') as super_group: with super_group.group('cluster') as group: group.command('select', 'select') with CommandSuperGroup(__name__, self, 'rcctl.custom_reliablecollections#{}', client_factory=client_create) as super_group: with super_group.group('dictionary') as group: group.command('query', 'query_reliabledictionary') group.command('execute', 'execute_reliabledictionary') group.command('schema', 'get_reliabledictionary_schema') group.command('list', 'get_reliabledictionary_list') group.command('type-schema', 'get_reliabledictionary_type_schema') with ArgumentsContext(self, 'dictionary') as ac: ac.argument('application_name', options_list=['--application-name', '-a']) ac.argument('service_name', options_list=['--service-name', '-s']) ac.argument('dictionary_name', options_list=['--dictionary-name', '-d']) ac.argument('output_file', options_list=['--output-file', '-out']) ac.argument('input_file', options_list=['--input-file', '-in']) ac.argument('query_string', options_list=['--query-string', '-q']) ac.argument('type_name', options_list=['--type-name', '-t']) return OrderedDict(self.command_table)
def open_volume_file(filepath): """Open a volumetric file using the tools following the file extension. Parameters ---------- filepath: str Path to a volume file Returns ------- volume_data: np.ndarray Volume data pixdim: 1xN np.ndarray Vector with the description of the voxels physical size (usually in mm) for each volume dimension. Raises ------ IOError In case the file is not found. """ # check if the file exists if not op.exists(filepath): raise IOError('Could not find file {}.'.format(filepath)) # define helper functions def open_nifti_file(filepath): return NiftiImage(filepath) def open_mhd_file(filepath): return MedicalImage(filepath) vol_data, hdr_data = load_raw_data_with_mhd(filepath) # TODO: convert vol_data and hdr_data into MedicalImage return vol_data, hdr_data def open_mha_file(filepath): raise NotImplementedError('This function has not been implemented yet.') # generic loader function def _load_file(filepath, loader): return loader(filepath) # file_extension -> file loader function filext_loader = { 'nii': open_nifti_file, 'mhd': open_mhd_file, 'mha': open_mha_file, } # get extension of the `filepath` ext = get_extension(filepath) # find the loader from `ext` loader = None for e in filext_loader: if ext in e: loader = filext_loader[e] if loader is None: raise ValueError('Could not find a loader for file {}.'.format(filepath)) return _load_file(filepath, loader)
def _check_medimg(image, make_it_3d=True): """Check that image is a proper img. Turn filenames into objects. Parameters ---------- image: img-like object or str Can either be: - a file path to a medical image file, e.g. NifTI, .mhd/raw, .mha - any object with get_data() method and affine & header attributes, e.g., nibabel.Nifti1Image. - a Numpy array, which will be wrapped by a nibabel.Nifti2Image class with an `eye` affine. If niimg is a string, consider it as a path to Nifti image and call nibabel.load on it. If it is an object, check if get_data() and get_affine() methods are present, raise TypeError otherwise. make_it_3d: boolean, optional If True, check if the image is a 3D image and raise an error if not. Returns ------- result: nifti-like result can be nibabel.Nifti1Image or the input, as-is. It is guaranteed that the returned object has get_data() and get_affine() methods. """ if isinstance(image, string_types): # a filename, load it img = open_volume_file(image) if make_it_3d: img = _make_it_3d(img) return img elif isinstance(image, np.array): return nib.Nifti2Image(image, affine=np.eye(image.ndim + 1)) elif isinstance(image, nib.Nifti1Image) or is_img(image): return image else: raise TypeError('Data given cannot be converted to a medical image' ' image: this object -"{}"- does not have' ' get_data or get_affine methods'.format(type(image)))
def rename_file_group_to_serial_nums(file_lst): """Will rename all files in file_lst to a padded serial number plus its extension :param file_lst: list of path.py paths """ file_lst.sort() c = 1 for f in file_lst: dirname = get_abspath(f.dirname()) fdest = f.joinpath(dirname, "{0:04d}".format(c) + OUTPUT_DICOM_EXTENSION) log.info('Renaming {0} to {1}'.format(f, fdest)) f.rename(fdest) c += 1
def _store_dicom_paths(self, folders): """Search for dicoms in folders and save file paths into self.dicom_paths set. :param folders: str or list of str """ if isinstance(folders, str): folders = [folders] for folder in folders: if not os.path.exists(folder): raise FolderNotFound(folder) self.items.extend(list(find_all_dicom_files(folder)))
def from_set(self, fileset, check_if_dicoms=True): """Overwrites self.items with the given set of files. Will filter the fileset and keep only Dicom files. Parameters ---------- fileset: iterable of str Paths to files check_if_dicoms: bool Whether to check if the items in fileset are dicom file paths """ if check_if_dicoms: self.items = [] for f in fileset: if is_dicom_file(f): self.items.append(f) else: self.items = fileset
def update(self, dicomset): """Update this set with the union of itself and dicomset. Parameters ---------- dicomset: DicomFileSet """ if not isinstance(dicomset, DicomFileSet): raise ValueError('Given dicomset is not a DicomFileSet.') self.items = list(set(self.items).update(dicomset))
def copy_files_to_other_folder(self, output_folder, rename_files=True, mkdir=True, verbose=False): """ Copies all files within this set to the output_folder Parameters ---------- output_folder: str Path of the destination folder of the files rename_files: bool Whether or not rename the files to a sequential format mkdir: bool Whether to make the folder if it does not exist verbose: bool Whether to print to stdout the files that are beind copied """ import shutil if not os.path.exists(output_folder): os.mkdir(output_folder) if not rename_files: for dcmf in self.items: outf = os.path.join(output_folder, os.path.basename(dcmf)) if verbose: print('{} -> {}'.format(dcmf, outf)) shutil.copyfile(dcmf, outf) else: n_pad = len(self.items)+2 for idx, dcmf in enumerate(self.items): outf = '{number:0{width}d}.dcm'.format(width=n_pad, number=idx) outf = os.path.join(output_folder, outf) if verbose: print('{} -> {}'.format(dcmf, outf)) shutil.copyfile(dcmf, outf)
def get_dcm_reader(store_metadata=True, header_fields=None): """ Creates a lambda function to read DICOM files. If store_store_metadata is False, will only return the file path. Else if you give header_fields, will return only the set of of header_fields within a DicomFile object or the whole DICOM file if None. :return: function This function has only one parameter: file_path """ if not store_metadata: return lambda fpath: fpath if header_fields is None: build_dcm = lambda fpath: DicomFile(fpath) else: dicom_header = namedtuple('DicomHeader', header_fields) build_dcm = lambda fpath: dicom_header._make(DicomFile(fpath).get_attributes(header_fields)) return build_dcm
def scrape_all_files(self): """ Generator that yields one by one the return value for self.read_dcm for each file within this set """ try: for dcmf in self.items: yield self.read_dcm(dcmf) except IOError as ioe: raise IOError('Error reading DICOM file: {}.'.format(dcmf)) from ioe
def get_unique_field_values(dcm_file_list, field_name): """Return a set of unique field values from a list of DICOM files Parameters ---------- dcm_file_list: iterable of DICOM file paths field_name: str Name of the field from where to get each value Returns ------- Set of field values """ field_values = set() for dcm in dcm_file_list: field_values.add(str(DicomFile(dcm).get_attributes(field_name))) return field_values
def find_all_dicom_files(root_path): """ Returns a list of the dicom files within root_path Parameters ---------- root_path: str Path to the directory to be recursively searched for DICOM files. Returns ------- dicoms: set Set of DICOM absolute file paths """ dicoms = set() try: for fpath in get_all_files(root_path): if is_dicom_file(fpath): dicoms.add(fpath) except IOError as ioe: raise IOError('Error reading file {0}.'.format(fpath)) from ioe return dicoms
def is_dicom_file(filepath): """ Tries to read the file using dicom.read_file, if the file exists and dicom.read_file does not raise and Exception returns True. False otherwise. :param filepath: str Path to DICOM file :return: bool """ if not os.path.exists(filepath): raise IOError('File {} not found.'.format(filepath)) filename = os.path.basename(filepath) if filename == 'DICOMDIR': return False try: _ = dicom.read_file(filepath) except Exception as exc: log.debug('Checking if {0} was a DICOM, but returned ' 'False.'.format(filepath)) return False return True
def group_dicom_files(dicom_paths, hdr_field='PatientID'): """Group in a dictionary all the DICOM files in dicom_paths separated by the given `hdr_field` tag value. Parameters ---------- dicom_paths: str Iterable of DICOM file paths. hdr_field: str Name of the DICOM tag whose values will be used as key for the group. Returns ------- dicom_groups: dict of dicom_paths """ dicom_groups = defaultdict(list) try: for dcm in dicom_paths: hdr = dicom.read_file(dcm) group_key = getattr(hdr, hdr_field) dicom_groups[group_key].append(dcm) except KeyError as ke: raise KeyError('Error reading field {} from file {}.'.format(hdr_field, dcm)) from ke return dicom_groups
def decompress(input_dir, dcm_pattern='*.dcm'): """ Decompress all *.dcm files recursively found in DICOM_DIR. This uses 'gdcmconv --raw'. It works when 'dcm2nii' shows the `Unsupported Transfer Syntax` error. This error is usually caused by lack of JPEG2000 support in dcm2nii compilation. Read more: http://www.nitrc.org/plugins/mwiki/index.php/dcm2nii:MainPage#Transfer_Syntaxes_and_Compressed_Images Parameters ---------- input_dir: str Folder path dcm_patther: str Pattern of the DICOM file names in `input_dir`. Notes ----- The *.dcm files in `input_folder` will be overwritten. """ dcmfiles = sorted(recursive_glob(input_dir, dcm_pattern)) for dcm in dcmfiles: cmd = 'gdcmconv --raw -i "{0}" -o "{0}"'.format(dcm) log.debug('Calling {}.'.format(cmd)) subprocess.check_call(cmd, shell=True)
def get_attributes(self, attributes, default=''): """Return the attributes values from this DicomFile Parameters ---------- attributes: str or list of str DICOM field names default: str Default value if the attribute does not exist. Returns ------- Value of the field or list of values. """ if isinstance(attributes, str): attributes = [attributes] attrs = [getattr(self, attr, default) for attr in attributes] if len(attrs) == 1: return attrs[0] return tuple(attrs)
def merge_images(images, axis='t'): """ Concatenate `images` in the direction determined in `axis`. Parameters ---------- images: list of str or img-like object. See NeuroImage constructor docstring. axis: str 't' : concatenate images in time 'x' : concatenate images in the x direction 'y' : concatenate images in the y direction 'z' : concatenate images in the z direction Returns ------- merged: img-like object """ # check if images is not empty if not images: return None # the given axis name to axis idx axis_dim = {'x': 0, 'y': 1, 'z': 2, 't': 3, } # check if the given axis name is valid if axis not in axis_dim: raise ValueError('Expected `axis` to be one of ({}), got {}.'.format(set(axis_dim.keys()), axis)) # check if all images are compatible with each other img1 = images[0] for img in images: check_img_compatibility(img1, img) # read the data of all the given images # TODO: optimize memory consumption by merging one by one. image_data = [] for img in images: image_data.append(check_img(img).get_data()) # if the work_axis is bigger than the number of axis of the images, # create a new axis for the images work_axis = axis_dim[axis] ndim = image_data[0].ndim if ndim - 1 < work_axis: image_data = [np.expand_dims(img, axis=work_axis) for img in image_data] # concatenate and return return np.concatenate(image_data, axis=work_axis)
def nifti_out(f): """ Picks a function whose first argument is an `img`, processes its data and returns a numpy array. This decorator wraps this numpy array into a nibabel.Nifti1Image.""" @wraps(f) def wrapped(*args, **kwargs): r = f(*args, **kwargs) img = read_img(args[0]) return nib.Nifti1Image(r, affine=img.get_affine(), header=img.header) return wrapped
def thr_img(img, thr=2., mode='+'): """ Use the given magic function name `func` to threshold with value `thr` the data of `img` and return a new nibabel.Nifti1Image. Parameters ---------- img: img-like thr: float or int The threshold value. mode: str Choices: '+' for positive threshold, '+-' for positive and negative threshold and '-' for negative threshold. Returns ------- thr_img: nibabel.Nifti1Image Thresholded image """ vol = read_img(img).get_data() if mode == '+': mask = vol > thr elif mode == '+-' or mode == '-+': mask = np.abs(vol) > thr elif mode == '-': mask = vol < -thr else: raise ValueError("Expected `mode` to be one of ('+', '+-', '-+', '-'), " "got {}.".format(mode)) return vol * mask
def div_img(img1, div2): """ Pixelwise division or divide by a number """ if is_img(div2): return img1.get_data()/div2.get_data() elif isinstance(div2, (float, int)): return img1.get_data()/div2 else: raise NotImplementedError('Cannot divide {}({}) by ' '{}({})'.format(type(img1), img1, type(div2), div2))
def apply_mask(img, mask): """Return the image with the given `mask` applied.""" from .mask import apply_mask vol, _ = apply_mask(img, mask) return vector_to_volume(vol, read_img(mask).get_data().astype(bool))
def abs_img(img): """ Return an image with the binarised version of the data of `img`.""" bool_img = np.abs(read_img(img).get_data()) return bool_img.astype(int)
def icc_img_to_zscore(icc, center_image=False): """ Return a z-scored version of `icc`. This function is based on GIFT `icatb_convertImageToZScores` function. """ vol = read_img(icc).get_data() v2 = vol[vol != 0] if center_image: v2 = detrend(v2, axis=0) vstd = np.linalg.norm(v2, ord=2) / np.sqrt(np.prod(v2.shape) - 1) eps = np.finfo(vstd.dtype).eps vol /= (eps + vstd) return vol
def spatial_map(icc, thr, mode='+'): """ Return the thresholded z-scored `icc`. """ return thr_img(icc_img_to_zscore(icc), thr=thr, mode=mode).get_data()
def filter_icc(icc, mask=None, thr=2, zscore=True, mode="+"): """ Threshold then mask an IC correlation map. Parameters ---------- icc: img-like The 'raw' ICC map. mask: img-like If not None. Will apply this masks in the end of the process. thr: float The threshold value. zscore: bool If True will calculate the z-score of the ICC before thresholding. mode: str Choices: '+' for positive threshold, '+-' for positive and negative threshold and '-' for negative threshold. Returns ------- icc_filt: nibabel.NiftiImage Thresholded and masked ICC. """ if zscore: icc_filt = thr_img(icc_img_to_zscore(icc), thr=thr, mode=mode) else: icc_filt = thr_img(icc, thr=thr, mode=mode) if mask is not None: icc_filt = apply_mask(icc_filt, mask) return icc_filt
def check_mhd_img(image, make_it_3d=False): """Check that image is a proper img. Turn filenames into objects. Parameters ---------- image: img-like object or str Can either be: - a file path to a .mhd file. (if it is a .raw file, this won't work). - any object with get_data() and get_affine() methods, e.g., nibabel.Nifti1Image. If niimg is a string, consider it as a path to .mhd image and call load_raw_data_with_mhd on it. If it is an object, check if get_data() and get_affine() methods are present, raise TypeError otherwise. make_it_3d: boolean, optional If True, check if the image is a 3D image and raise an error if not. Returns ------- result: nifti-like result can be nibabel.Nifti1Image or the input, as-is. It is guaranteed that the returned object has get_data() and get_affine() methods. """ if isinstance(image, string_types): # a filename, load it if not op.exists(image): raise FileNotFound(image) ext = get_extension(image).lower() if not 'mhd' in ext: warnings.warn('Expecting a filepath with `.mhd` extension, got {}.'.format(image)) img, hdr = load_raw_data_with_mhd(image) if make_it_3d: img = _make_it_3d(img) return img elif is_img(image): return image else: raise TypeError('Data given cannot be converted to a nifti' ' image: this object -"{}"- does not have' ' get_data or get_affine methods'.format(type(image)))
def _make_it_3d(img): """Enforce that img is a 3D img-like object, if it is not, raise a TypeError. i.e., remove dimensions of size 1. Parameters ---------- img: numpy.ndarray Image data array Returns ------- 3D numpy ndarray object """ shape = img.shape if len(shape) == 3: return img elif len(shape) == 4 and shape[3] == 1: # "squeeze" the image. return img[:, :, :, 0] else: raise TypeError('A 3D image is expected, but an image with a shape of {} was given.'.format(shape))
def write_meta_header(filename, meta_dict): """ Write the content of the `meta_dict` into `filename`. Parameters ---------- filename: str Path to the output file meta_dict: dict Dictionary with the fields of the metadata .mhd file """ header = '' # do not use tags = meta_dict.keys() because the order of tags matters for tag in MHD_TAGS: if tag in meta_dict.keys(): header += '{} = {}\n'.format(tag, meta_dict[tag]) with open(filename, 'w') as f: f.write(header)
def dump_raw_data(filename, data): """ Write the data into a raw format file. Big endian is always used. Parameters ---------- filename: str Path to the output file data: numpy.ndarray n-dimensional image data array. """ if data.ndim == 3: # Begin 3D fix data = data.reshape([data.shape[0], data.shape[1]*data.shape[2]]) # End 3D fix a = array.array('f') for o in data: a.fromlist(list(o.flatten())) # if is_little_endian(): # a.byteswap() with open(filename, 'wb') as rawf: a.tofile(rawf)
def write_mhd_file(filename, data, shape=None, meta_dict=None): """ Write the `data` and `meta_dict` in two files with names that use `filename` as a prefix. Parameters ---------- filename: str Path to the output file. This is going to be used as a preffix. Two files will be created, one with a '.mhd' extension and another with '.raw'. If `filename` has any of these already they will be taken into account to build the filenames. data: numpy.ndarray n-dimensional image data array. shape: tuple Tuple describing the shape of `data` Default: data.shape meta_dict: dict Dictionary with the fields of the metadata .mhd file Default: {} Returns ------- mhd_filename: str Path to the .mhd file raw_filename: str Path to the .raw file """ # check its extension ext = get_extension(filename) fname = op.basename(filename) if ext != '.mhd' or ext != '.raw': mhd_filename = fname + '.mhd' raw_filename = fname + '.raw' elif ext == '.mhd': mhd_filename = fname raw_filename = remove_ext(fname) + '.raw' elif ext == '.raw': mhd_filename = remove_ext(fname) + '.mhd' raw_filename = fname else: raise ValueError('`filename` extension {} from {} is not recognised. ' 'Expected .mhd or .raw.'.format(ext, filename)) # default values if meta_dict is None: meta_dict = {} if shape is None: shape = data.shape # prepare the default header meta_dict['ObjectType'] = meta_dict.get('ObjectType', 'Image') meta_dict['BinaryData'] = meta_dict.get('BinaryData', 'True' ) meta_dict['BinaryDataByteOrderMSB'] = meta_dict.get('BinaryDataByteOrderMSB', 'False') meta_dict['ElementType'] = meta_dict.get('ElementType', NUMPY_TO_MHD_TYPE[data.dtype.type]) meta_dict['NDims'] = meta_dict.get('NDims', str(len(shape))) meta_dict['DimSize'] = meta_dict.get('DimSize', ' '.join([str(i) for i in shape])) meta_dict['ElementDataFile'] = meta_dict.get('ElementDataFile', raw_filename) # target files mhd_filename = op.join(op.dirname(filename), mhd_filename) raw_filename = op.join(op.dirname(filename), raw_filename) # write the header write_meta_header(mhd_filename, meta_dict) # write the data dump_raw_data(raw_filename, data) return mhd_filename, raw_filename
def copy_mhd_and_raw(src, dst): """Copy .mhd and .raw files to dst. If dst is a folder, won't change the file, but if dst is another filepath, will modify the ElementDataFile field in the .mhd to point to the new renamed .raw file. Parameters ---------- src: str Path to the .mhd file to be copied dst: str Path to the destination of the .mhd and .raw files. If a new file name is given, the extension will be ignored. Returns ------- dst: str """ # check if src exists if not op.exists(src): raise IOError('Could not find file {}.'.format(src)) # check its extension ext = get_extension(src) if ext != '.mhd': msg = 'The src file path must be a .mhd file. Given: {}.'.format(src) raise ValueError(msg) # get the raw file for this src mhd file meta_src = _read_meta_header(src) # get the source raw file src_raw = meta_src['ElementDataFile'] if not op.isabs(src_raw): src_raw = op.join(op.dirname(src), src_raw) # check if dst is dir if op.isdir(dst): # copy the mhd and raw file to its destiny shutil.copyfile(src, dst) shutil.copyfile(src_raw, dst) return dst # build raw file dst file name dst_raw = op.join(op.dirname(dst), remove_ext(op.basename(dst))) + '.raw' # add extension to the dst path if get_extension(dst) != '.mhd': dst += '.mhd' # copy the mhd and raw file to its destiny log.debug('cp: {} -> {}'.format(src, dst)) log.debug('cp: {} -> {}'.format(src_raw, dst_raw)) shutil.copyfile(src, dst) shutil.copyfile(src_raw, dst_raw) # check if src file name is different than dst file name # if not the same file name, change the content of the ElementDataFile field if op.basename(dst) != op.basename(src): log.debug('modify {}: ElementDataFile: {} -> {}'.format(dst, src_raw, op.basename(dst_raw))) meta_dst = _read_meta_header(dst) meta_dst['ElementDataFile'] = op.basename(dst_raw) write_meta_header(dst, meta_dst) return dst
def sav_to_pandas_rpy2(input_file): """ SPSS .sav files to Pandas DataFrame through Rpy2 :param input_file: string :return: """ import pandas.rpy.common as com w = com.robj.r('foreign::read.spss("%s", to.data.frame=TRUE)' % input_file) return com.convert_robj(w)
def sav_to_pandas_savreader(input_file): """ SPSS .sav files to Pandas DataFrame through savreader module :param input_file: string :return: """ from savReaderWriter import SavReader lines = [] with SavReader(input_file, returnHeader=True) as reader: header = next(reader) for line in reader: lines.append(line) return pd.DataFrame(data=lines, columns=header)
def save_variables(filename, variables): """Save given variables in a file. Valid extensions: '.pyshelf' or '.shelf' (Python shelve) '.mat' (Matlab archive), '.hdf5' or '.h5' (HDF5 file) Parameters ---------- filename: str Output file path. variables: dict Dictionary varname -> variable Raises ------ ValueError: if the extension of the filesname is not recognized. """ ext = get_extension(filename).lower() out_exts = {'.pyshelf', '.shelf', '.mat', '.hdf5', '.h5'} output_file = filename if not ext in out_exts: output_file = add_extension_if_needed(filename, '.pyshelf') ext = get_extension(filename) if ext == '.pyshelf' or ext == '.shelf': save_variables_to_shelve(output_file, variables) elif ext == '.mat': save_variables_to_mat(output_file, variables) elif ext == '.hdf5' or ext == '.h5': from .hdf5 import save_variables_to_hdf5 save_variables_to_hdf5(output_file, variables) else: raise ValueError('Filename extension {0} not accepted.'.format(ext))
def save_varlist(filename, varnames, varlist): """ Valid extensions '.pyshelf', '.mat', '.hdf5' or '.h5' @param filename: string @param varnames: list of strings Names of the variables @param varlist: list of objects The objects to be saved """ variables = {} for i, vn in enumerate(varnames): variables[vn] = varlist[i] ExportData.save_variables(filename, variables)
def cli(): """Create CLI environment""" return VersionedCLI(cli_name=SF_CLI_NAME, config_dir=SF_CLI_CONFIG_DIR, config_env_var_prefix=SF_CLI_ENV_VAR_PREFIX, commands_loader_cls=SFCommandLoader, help_cls=SFCommandHelp)
def drain_rois(img): """Find all the ROIs in img and returns a similar volume with the ROIs emptied, keeping only their border voxels. This is useful for DTI tractography. Parameters ---------- img: img-like object or str Can either be: - a file path to a Nifti image - any object with get_data() and get_affine() methods, e.g., nibabel.Nifti1Image. If niimg is a string, consider it as a path to Nifti image and call nibabel.load on it. If it is an object, check if get_data() and get_affine() methods are present, raise TypeError otherwise. Returns ------- np.ndarray an array of same shape as img_data """ img_data = get_img_data(img) out = np.zeros(img_data.shape, dtype=img_data.dtype) krn_dim = [3] * img_data.ndim kernel = np.ones(krn_dim, dtype=int) vals = np.unique(img_data) vals = vals[vals != 0] for i in vals: roi = img_data == i hits = scn.binary_hit_or_miss(roi, kernel) roi[hits] = 0 out[roi > 0] = i return out
def pick_rois(rois_img, roi_values, bg_val=0): """ Return the `rois_img` only with the ROI values from `roi_values`. Parameters ---------- rois_img: niimg-like roi_values: list of int or float The list of values from rois_img. bg_val: int or float The background value of `rois_img`. Returns ------- subset_rois_img: nibabel.Nifti2Image """ img = read_img(rois_img) img_data = img.get_data() if bg_val == 0: out = np.zeros(img_data.shape, dtype=img_data.dtype) else: out = np.ones(img_data.shape, dtype=img_data.dtype) * bg_val for r in roi_values: out[img_data == r] = r return nib.Nifti2Image(out, affine=img.affine, header=img.header)
def largest_connected_component(volume): """Return the largest connected component of a 3D array. Parameters ----------- volume: numpy.array 3D boolean array. Returns -------- volume: numpy.array 3D boolean array with only one connected component. """ # We use asarray to be able to work with masked arrays. volume = np.asarray(volume) labels, num_labels = scn.label(volume) if not num_labels: raise ValueError('No non-zero values: no connected components found.') if num_labels == 1: return volume.astype(np.bool) label_count = np.bincount(labels.ravel().astype(np.int)) # discard the 0 label label_count[0] = 0 return labels == label_count.argmax()
def large_clusters_mask(volume, min_cluster_size): """ Return as mask for `volume` that includes only areas where the connected components have a size bigger than `min_cluster_size` in number of voxels. Parameters ----------- volume: numpy.array 3D boolean array. min_cluster_size: int Minimum size in voxels that the connected component must have. Returns -------- volume: numpy.array 3D int array with a mask excluding small connected components. """ labels, num_labels = scn.label(volume) labels_to_keep = set([i for i in range(num_labels) if np.sum(labels == i) >= min_cluster_size]) clusters_mask = np.zeros_like(volume, dtype=int) for l in range(num_labels): if l in labels_to_keep: clusters_mask[labels == l] = 1 return clusters_mask
def create_rois_mask(roislist, filelist): """Look for the files in filelist containing the names in roislist, these files will be opened, binarised and merged in one mask. Parameters ---------- roislist: list of strings Names of the ROIs, which will have to be in the names of the files in filelist. filelist: list of strings List of paths to the volume files containing the ROIs. Returns ------- numpy.ndarray Mask volume """ roifiles = [] for roi in roislist: try: roi_file = search_list(roi, filelist)[0] except Exception as exc: raise Exception('Error creating list of roi files. \n {}'.format(str(exc))) else: roifiles.append(roi_file) return binarise(roifiles)
def get_unique_nonzeros(arr): """ Return a sorted list of the non-zero unique values of arr. Parameters ---------- arr: numpy.ndarray The data array Returns ------- list of items of arr. """ rois = np.unique(arr) rois = rois[np.nonzero(rois)] rois.sort() return rois
def get_rois_centers_of_mass(vol): """Get the center of mass for each ROI in the given volume. Parameters ---------- vol: numpy ndarray Volume with different values for each ROI. Returns ------- OrderedDict Each entry in the dict has the ROI value as key and the center_of_mass coordinate as value. """ from scipy.ndimage.measurements import center_of_mass roisvals = np.unique(vol) roisvals = roisvals[roisvals != 0] rois_centers = OrderedDict() for r in roisvals: rois_centers[r] = center_of_mass(vol, vol, r) return rois_centers
def partition_timeseries(image, roi_img, mask_img=None, zeroe=True, roi_values=None, outdict=False): """Partition the timeseries in tsvol according to the ROIs in roivol. If a mask is given, will use it to exclude any voxel outside of it. The outdict indicates whether you want a dictionary for each set of timeseries keyed by the ROI value or a list of timeseries sets. If True and roi_img is not None will return an OrderedDict, if False or roi_img or roi_list is None will return a list. Background value is assumed to be 0 and won't be used here. Parameters ---------- image: img-like object or str 4D timeseries volume roi_img: img-like object or str 3D volume defining different ROIs. mask_img: img-like object or str 3D mask volume zeroe: bool If true will remove the null timeseries voxels. roi_values: list of ROI values (int?) List of the values of the ROIs to indicate the order and which ROIs will be processed. outdict: bool If True will return an OrderedDict of timeseries sets, otherwise a list. Returns ------- timeseries: list or OrderedDict A dict with the timeseries as items and keys as the ROIs voxel values or a list where each element is the timeseries set ordered by the sorted values in roi_img or by the roi_values argument. """ img = read_img(image) rois = read_img(roi_img) # check if roi_img and image are compatible check_img_compatibility(img, rois, only_check_3d=True) # check if rois has all roi_values roi_data = rois.get_data() if roi_values is not None: for rv in roi_values: if not np.any(roi_data == rv): raise ValueError('Could not find value {} in rois_img {}.'.format(rv, repr_imgs(roi_img))) else: roi_values = get_unique_nonzeros(roi_data) # check if mask and image are compatible if mask_img is None: mask_data = None else: mask = load_mask(mask_img) check_img_compatibility(img, mask, only_check_3d=True) mask_data = mask.get_data() # choose function to call if outdict: extract_data = _extract_timeseries_dict else: extract_data = _extract_timeseries_list # extract data and return it try: return extract_data(img.get_data(), rois.get_data(), mask_data, roi_values=roi_values, zeroe=zeroe) except: raise
def _partition_data(datavol, roivol, roivalue, maskvol=None, zeroe=True): """ Extracts the values in `datavol` that are in the ROI with value `roivalue` in `roivol`. The ROI can be masked by `maskvol`. Parameters ---------- datavol: numpy.ndarray 4D timeseries volume or a 3D volume to be partitioned roivol: numpy.ndarray 3D ROIs volume roivalue: int or float A value from roivol that represents the ROI to be used for extraction. maskvol: numpy.ndarray 3D mask volume zeroe: bool If true will remove the null timeseries voxels. Only applied to timeseries (4D) data. Returns ------- values: np.array An array of the values in the indicated ROI. A 2D matrix if `datavol` is 4D or a 1D vector if `datavol` is 3D. """ if maskvol is not None: # get all masked time series within this roi r indices = (roivol == roivalue) * (maskvol > 0) else: # get all time series within this roi r indices = roivol == roivalue if datavol.ndim == 4: ts = datavol[indices, :] else: ts = datavol[indices] # remove zeroed time series if zeroe: if datavol.ndim == 4: ts = ts[ts.sum(axis=1) != 0, :] return ts
def _extract_timeseries_dict(tsvol, roivol, maskvol=None, roi_values=None, zeroe=True): """Partition the timeseries in tsvol according to the ROIs in roivol. If a mask is given, will use it to exclude any voxel outside of it. Parameters ---------- tsvol: numpy.ndarray 4D timeseries volume or a 3D volume to be partitioned roivol: numpy.ndarray 3D ROIs volume maskvol: numpy.ndarray 3D mask volume zeroe: bool If true will remove the null timeseries voxels. roi_values: list of ROI values (int?) List of the values of the ROIs to indicate the order and which ROIs will be processed. Returns ------- ts_dict: OrderedDict A dict with the timeseries as items and keys as the ROIs voxel values. """ _check_for_partition(tsvol, roivol, maskvol) # get unique values of the atlas if roi_values is None: roi_values = get_unique_nonzeros(roivol) ts_dict = OrderedDict() for r in roi_values: ts = _partition_data(tsvol, roivol, r, maskvol, zeroe) if len(ts) == 0: ts = np.zeros(tsvol.shape[-1]) ts_dict[r] = ts return ts_dict
def _extract_timeseries_list(tsvol, roivol, maskvol=None, roi_values=None, zeroe=True): """Partition the timeseries in tsvol according to the ROIs in roivol. If a mask is given, will use it to exclude any voxel outside of it. Parameters ---------- tsvol: numpy.ndarray 4D timeseries volume or a 3D volume to be partitioned roivol: numpy.ndarray 3D ROIs volume maskvol: numpy.ndarray 3D mask volume zeroe: bool If true will remove the null timeseries voxels. Only applied to timeseries (4D) data. roi_values: list of ROI values (int?) List of the values of the ROIs to indicate the order and which ROIs will be processed. Returns ------- ts_list: list A list with the timeseries arrays as items """ _check_for_partition(tsvol, roivol, maskvol) if roi_values is None: roi_values = get_unique_nonzeros(roivol) ts_list = [] for r in roi_values: ts = _partition_data(tsvol, roivol, r, maskvol, zeroe) if len(ts) == 0: ts = np.zeros(tsvol.shape[-1]) ts_list.append(ts) return ts_list
def get_3D_from_4D(image, vol_idx=0): """Pick one 3D volume from a 4D nifti image file Parameters ---------- image: img-like object or str Volume defining different ROIs. Can either be: - a file path to a Nifti image - any object with get_data() and get_affine() methods, e.g., nibabel.Nifti1Image. If niimg is a string, consider it as a path to Nifti image and call nibabel.load on it. If it is an object, check if get_data() and get_affine() methods are present, raise TypeError otherwise. vol_idx: int Index of the 3D volume to be extracted from the 4D volume. Returns ------- vol, hdr, aff The data array, the image header and the affine transform matrix. """ img = check_img(image) hdr, aff = get_img_info(img) if len(img.shape) != 4: raise AttributeError('Volume in {} does not have 4 dimensions.'.format(repr_imgs(img))) if not 0 <= vol_idx < img.shape[3]: raise IndexError('IndexError: 4th dimension in volume {} has {} volumes, ' 'not {}.'.format(repr_imgs(img), img.shape[3], vol_idx)) img_data = img.get_data() new_vol = img_data[:, :, :, vol_idx].copy() hdr.set_data_shape(hdr.get_data_shape()[:3]) return new_vol, hdr, aff
def create_hdf_file(self): """ :return: h5py DataSet """ mode = 'w' if not self._overwrite and os.path.exists(self._fname): mode = 'a' self._hdf_file = h5py.File(self._fname, mode) if self._hdf_basepath == '/': self._group = self._hdf_file['/'] else: self._group = self._hdf_file.create_group(self._hdf_basepath)
def get_dataset(self, ds_name, mode='r'): """ Returns a h5py dataset given its registered name. :param ds_name: string Name of the dataset to be returned. :return: """ if ds_name in self._datasets: return self._datasets[ds_name] else: return self.create_empty_dataset(ds_name)
def create_empty_dataset(self, ds_name, dtype=np.float32): """ Creates a Dataset with unknown size. Resize it before using. :param ds_name: string :param dtype: dtype Datatype of the dataset :return: h5py DataSet """ if ds_name in self._datasets: return self._datasets[ds_name] ds = self._group.create_dataset(ds_name, (1, 1), maxshape=None, dtype=dtype) self._datasets[ds_name] = ds return ds
def create_dataset(self, ds_name, data, attrs=None, dtype=None): """ Saves a Numpy array in a dataset in the HDF file, registers it as ds_name and returns the h5py dataset. :param ds_name: string Registration name of the dataset to be registered. :param data: Numpy ndarray :param dtype: dtype Datatype of the dataset :return: h5py dataset """ if ds_name in self._datasets: ds = self._datasets[ds_name] if ds.dtype != data.dtype: warnings.warn('Dataset and data dtype are different!') else: if dtype is None: dtype = data.dtype ds = self._group.create_dataset(ds_name, data.shape, dtype=dtype) if attrs is not None: for key in attrs: setattr(ds.attrs, key, attrs[key]) ds.read_direct(data) self._datasets[ds_name] = ds return ds
def save(self, ds_name, data, dtype=None): """ See create_dataset. """ return self.create_dataset(ds_name, data, dtype)
def _fill_missing_values(df, range_values, fill_value=0, fill_method=None): """ Will get the names of the index colums of df, obtain their ranges from range_values dict and return a reindexed version of df with the given range values. :param df: pandas DataFrame :param range_values: dict or array-like Must contain for each index column of df an entry with all the values within the range of the column. :param fill_value: scalar or 'nearest', default 0 Value to use for missing values. Defaults to 0, but can be any "compatible" value, e.g., NaN. The 'nearest' mode will fill the missing value with the nearest value in the column. :param fill_method: {'backfill', 'bfill', 'pad', 'ffill', None}, default None Method to use for filling holes in reindexed DataFrame 'pad' / 'ffill': propagate last valid observation forward to next valid 'backfill' / 'bfill': use NEXT valid observation to fill gap :return: pandas Dataframe and used column ranges reindexed DataFrame and dict with index column ranges """ idx_colnames = df.index.names idx_colranges = [range_values[x] for x in idx_colnames] fullindex = pd.Index([p for p in product(*idx_colranges)], name=tuple(idx_colnames)) fulldf = df.reindex(index=fullindex, fill_value=fill_value, method=fill_method) fulldf.index.names = idx_colnames return fulldf, idx_colranges
def get(self, key): """ Retrieve pandas object or group of Numpy ndarrays stored in file Parameters ---------- key : object Returns ------- obj : type of object stored in file """ node = self.get_node(key) if node is None: raise KeyError('No object named %s in the file' % key) if hasattr(node, 'attrs'): if 'pandas_type' in node.attrs: return self._read_group(node) return self._read_array(node)
def put(self, key, value, attrs=None, format=None, append=False, **kwargs): """ Store object in HDFStore Parameters ---------- key : str value : {Series, DataFrame, Panel, Numpy ndarray} format : 'fixed(f)|table(t)', default is 'fixed' fixed(f) : Fixed format Fast writing/reading. Not-appendable, nor searchable table(t) : Table format Write as a PyTables Table structure which may perform worse but allow more flexible operations like searching/selecting subsets of the data append : boolean, default False This will force Table format, append the input data to the existing. encoding : default None, provide an encoding for strings """ if not isinstance(value, np.ndarray): super(NumpyHDFStore, self).put(key, value, format, append, **kwargs) else: group = self.get_node(key) # remove the node if we are not appending if group is not None and not append: self._handle.removeNode(group, recursive=True) group = None if group is None: paths = key.split('/') # recursively create the groups path = '/' for p in paths: if not len(p): continue new_path = path if not path.endswith('/'): new_path += '/' new_path += p group = self.get_node(new_path) if group is None: group = self._handle.createGroup(path, p) path = new_path ds_name = kwargs.get('ds_name', self._array_dsname) ds = self._handle.createArray(group, ds_name, value) if attrs is not None: for key in attrs: setattr(ds.attrs, key, attrs[key]) self._handle.flush() return ds
def _push_dfblock(self, key, df, ds_name, range_values): """ :param key: string :param df: pandas Dataframe :param ds_name: string """ #create numpy array and put into hdf_file vals_colranges = [range_values[x] for x in df.index.names] nu_shape = [len(x) for x in vals_colranges] return self.put(key, np.reshape(df.values, tuple(nu_shape)), attrs={'axes': df.index.names}, ds_name=ds_name, append=True)
def put_df_as_ndarray(self, key, df, range_values, loop_multiindex=False, unstack=False, fill_value=0, fill_method=None): """Returns a PyTables HDF Array from df in the shape given by its index columns range values. :param key: string object :param df: pandas DataFrame :param range_values: dict or array-like Must contain for each index column of df an entry with all the values within the range of the column. :param loop_multiindex: bool Will loop through the first index in a multiindex dataframe, extract a dataframe only for one value, complete and fill the missing values and store in the HDF. If this is True, it will not use unstack. This is as fast as unstacking. :param unstack: bool Unstack means that this will use the first index name to unfold the DataFrame, and will create a group with as many datasets as valus has this first index. Use this if you think the filled dataframe won't fit in your RAM memory. If set to False, this will transform the dataframe in memory first and only then save it. :param fill_value: scalar or 'nearest', default 0 Value to use for missing values. Defaults to 0, but can be any "compatible" value, e.g., NaN. The 'nearest' mode will fill the missing value with the nearest value in the column. :param fill_method: {'backfill', 'bfill', 'pad', 'ffill', None}, default None Method to use for filling holes in reindexed DataFrame 'pad' / 'ffill': propagate last valid observation forward to next valid 'backfill' / 'bfill': use NEXT valid observation to fill gap :return: PyTables data node """ idx_colnames = df.index.names #idx_colranges = [range_values[x] for x in idx_colnames] #dataset group name if not given if key is None: key = idx_colnames[0] if loop_multiindex: idx_values = df.index.get_level_values(0).unique() for idx in idx_values: vals, _ = self._fill_missing_values(df.xs((idx,), level=idx_colnames[0]), range_values, fill_value=fill_value, fill_method=fill_method) ds_name = str(idx) + '_' + '_'.join(vals.columns) self._push_dfblock(key, vals, ds_name, range_values) return self._handle.get_node('/' + str(key)) #separate the dataframe into blocks, only with the first index else: if unstack: df = df.unstack(idx_colnames[0]) for idx in df: vals, _ = self._fill_missing_values(df[idx], range_values, fill_value=fill_value, fill_method=fill_method) vals = np.nan_to_num(vals) ds_name = '_'.join([str(x) for x in vals.name]) self._push_dfblock(key, vals, ds_name, range_values) return self._handle.get_node('/' + str(key)) #not separate the data vals, _ = self._fill_missing_values(df, range_values, fill_value=fill_value, fill_method=fill_method) ds_name = self._array_dsname return self._push_dfblock(key, vals, ds_name, range_values)
def get_data(self, safe_copy=False): """Get the data in the image. If save_copy is True, will perform a deep copy of the data and return it. Parameters ---------- smoothed: (optional) bool If True and self._smooth_fwhm > 0 will smooth the data before masking. masked: (optional) bool If True and self.has_mask will return the masked data, the plain data otherwise. safe_copy: (optional) bool Returns ------- np.ndarray """ if safe_copy: data = get_data(self.img) else: data = self.img.get_data(caching=self._caching) return data
def smooth_fwhm(self, fwhm): """ Set a smoothing Gaussian kernel given its FWHM in mm. """ if fwhm != self._smooth_fwhm: self._is_data_smooth = False self._smooth_fwhm = fwhm
def get_data(self, smoothed=True, masked=True, safe_copy=False): """Get the data in the image. If save_copy is True, will perform a deep copy of the data and return it. Parameters ---------- smoothed: (optional) bool If True and self._smooth_fwhm > 0 will smooth the data before masking. masked: (optional) bool If True and self.has_mask will return the masked data, the plain data otherwise. safe_copy: (optional) bool Returns ------- np.ndarray """ if not safe_copy and smoothed == self._is_data_smooth and masked == self._is_data_masked: if self.has_data_loaded() and self._caching == 'fill': return self.get_data() if safe_copy: data = get_data(self.img) else: data = self.img.get_data(caching=self._caching) is_smoothed = False if smoothed and self._smooth_fwhm > 0: try: data = _smooth_data_array(data, self.get_affine(), self._smooth_fwhm, copy=False) except ValueError as ve: raise ValueError('Error smoothing image {} with a {}mm FWHM ' 'kernel.'.format(self.img, self._smooth_fwhm)) from ve else: is_smoothed = True is_data_masked = False if masked and self.has_mask(): try: data = self.unmask(self._mask_data(data)[0]) except: raise else: is_data_masked = True if not safe_copy: self._is_data_masked = is_data_masked self._is_data_smooth = is_smoothed return data
def apply_mask(self, mask_img): """First set_mask and the get_masked_data. Parameters ---------- mask_img: nifti-like image, NeuroImage or str 3D mask array: True where a voxel should be used. Can either be: - a file path to a Nifti image - any object with get_data() and get_affine() methods, e.g., nibabel.Nifti1Image. If niimg is a string, consider it as a path to Nifti image and call nibabel.load on it. If it is an object, check if get_data() and get_affine() methods are present, raise TypeError otherwise. Returns ------- The masked data deepcopied """ self.set_mask(mask_img) return self.get_data(masked=True, smoothed=True, safe_copy=True)
def set_mask(self, mask_img): """Sets a mask img to this. So every operation to self, this mask will be taken into account. Parameters ---------- mask_img: nifti-like image, NeuroImage or str 3D mask array: True where a voxel should be used. Can either be: - a file path to a Nifti image - any object with get_data() and get_affine() methods, e.g., nibabel.Nifti1Image. If niimg is a string, consider it as a path to Nifti image and call nibabel.load on it. If it is an object, check if get_data() and get_affine() methods are present, raise TypeError otherwise. Note ---- self.img and mask_file must have the same shape. Raises ------ FileNotFound, NiftiFilesNotCompatible """ mask = load_mask(mask_img, allow_empty=True) check_img_compatibility(self.img, mask, only_check_3d=True) # this will raise an exception if something is wrong self.mask = mask
def _mask_data(self, data): """Return the data masked with self.mask Parameters ---------- data: np.ndarray Returns ------- masked np.ndarray Raises ------ ValueError if the data and mask dimensions are not compatible. Other exceptions related to numpy computations. """ self._check_for_mask() msk_data = self.mask.get_data() if self.ndim == 3: return data[msk_data], np.where(msk_data) elif self.ndim == 4: return _apply_mask_to_4d_data(data, self.mask) else: raise ValueError('Cannot mask {} with {} dimensions using mask {}.'.format(self, self.ndim, self.mask))
def apply_smoothing(self, smooth_fwhm): """Set self._smooth_fwhm and then smooths the data. See boyle.nifti.smooth.smooth_imgs. Returns ------- the smoothed data deepcopied. """ if smooth_fwhm <= 0: return old_smooth_fwhm = self._smooth_fwhm self._smooth_fwhm = smooth_fwhm try: data = self.get_data(smoothed=True, masked=True, safe_copy=True) except ValueError as ve: self._smooth_fwhm = old_smooth_fwhm raise else: self._smooth_fwhm = smooth_fwhm return data
def mask_and_flatten(self): """Return a vector of the masked data. Returns ------- np.ndarray, tuple of indices (np.ndarray), tuple of the mask shape """ self._check_for_mask() return self.get_data(smoothed=True, masked=True, safe_copy=False)[self.get_mask_indices()],\ self.get_mask_indices(), self.mask.shape
def unmask(self, arr): """Use self.mask to reshape arr and self.img to get an affine and header to create a new self.img using the data in arr. If self.has_mask() is False, will return the same arr. """ self._check_for_mask() if 1 > arr.ndim > 2: raise ValueError('The given array has {} dimensions while my mask has {}. ' 'Masked data must be 1D or 2D array. '.format(arr.ndim, len(self.mask.shape))) if arr.ndim == 2: return matrix_to_4dvolume(arr, self.mask.get_data()) elif arr.ndim == 1: return vector_to_volume(arr, self.mask.get_data())
def to_file(self, outpath): """Save this object instance in outpath. Parameters ---------- outpath: str Output file path """ if not self.has_mask() and not self.is_smoothed(): save_niigz(outpath, self.img) else: save_niigz(outpath, self.get_data(masked=True, smoothed=True), self.get_header(), self.get_affine())
def setup_logging(log_config_file=op.join(op.dirname(__file__), 'logger.yml'), log_default_level=LOG_LEVEL, env_key=MODULE_NAME.upper() + '_LOG_CFG'): """Setup logging configuration.""" path = log_config_file value = os.getenv(env_key, None) if value: path = value if op.exists(path): log_cfg = yaml.load(read(path).format(MODULE_NAME)) logging.config.dictConfig(log_cfg) #print('Started logging using config file {0}.'.format(path)) else: logging.basicConfig(level=log_default_level) #print('Started default logging. Could not find config file ' # 'in {0}.'.format(path)) log = logging.getLogger(__name__) log.debug('Start logging.')
def _read_meta_header(filename): """Return a dictionary of meta data from meta header file. Parameters ---------- filename: str Path to a .mhd file Returns ------- meta_dict: dict A dictionary with the .mhd header content. """ fileIN = open(filename, 'r') line = fileIN.readline() meta_dict = {} tag_flag = [False]*len(MHD_TAGS) while line: tags = str.split(line, '=') # print tags[0] for i in range(len(MHD_TAGS)): tag = MHD_TAGS[i] if (str.strip(tags[0]) == tag) and (not tag_flag[i]): # print tags[1] meta_dict[tag] = str.strip(tags[1]) tag_flag[i] = True line = fileIN.readline() # comment fileIN.close() return meta_dict
def load_raw_data_with_mhd(filename): """Return a dictionary of meta data from meta header file. Parameters ---------- filename: str Path to a .mhd file Returns ------- data: numpy.ndarray n-dimensional image data array. meta_dict: dict A dictionary with the .mhd header content. """ meta_dict = _read_meta_header(filename) dim = int(meta_dict['NDims']) assert (meta_dict['ElementType'] in MHD_TO_NUMPY_TYPE) arr = [int(i) for i in meta_dict['DimSize'].split()] volume = reduce(lambda x, y: x*y, arr[0:dim-1], 1) pwd = op.dirname(filename) raw_file = meta_dict['ElementDataFile'] data_file = op.join(pwd, raw_file) ndtype = MHD_TO_NUMPY_TYPE[meta_dict['ElementType']] arrtype = NDARRAY_TO_ARRAY_TYPE[ndtype] with open(data_file, 'rb') as fid: binvalues = array.array(arrtype) binvalues.fromfile(fid, volume*arr[dim-1]) data = np.array (binvalues, ndtype) data = np.reshape(data, (arr[dim-1], volume)) if dim >= 3: # Begin 3D fix dimensions = [int(i) for i in meta_dict['DimSize'].split()] # dimensions.reverse() ?? data = data.reshape(dimensions) # End 3D fix return data, meta_dict
def get_3D_from_4D(filename, vol_idx=0): """Return a 3D volume from a 4D nifti image file Parameters ---------- filename: str Path to the 4D .mhd file vol_idx: int Index of the 3D volume to be extracted from the 4D volume. Returns ------- vol, hdr The data array and the new 3D image header. """ def remove_4th_element_from_hdr_string(hdr, fieldname): if fieldname in hdr: hdr[fieldname] = ' '.join(hdr[fieldname].split()[:3]) vol, hdr = load_raw_data_with_mhd(filename) if vol.ndim != 4: raise ValueError('Volume in {} does not have 4 dimensions.'.format(op.join(op.dirname(filename), hdr['ElementDataFile']))) if not 0 <= vol_idx < vol.shape[3]: raise IndexError('IndexError: 4th dimension in volume {} has {} volumes, not {}.'.format(filename, vol.shape[3], vol_idx)) new_vol = vol[:, :, :, vol_idx].copy() hdr['NDims'] = 3 remove_4th_element_from_hdr_string(hdr, 'ElementSpacing') remove_4th_element_from_hdr_string(hdr, 'DimSize') return new_vol, hdr
def _safe_cache(memory, func, **kwargs): """ A wrapper for mem.cache that flushes the cache if the version number of nibabel has changed. """ cachedir = memory.cachedir if cachedir is None or cachedir in __CACHE_CHECKED: return memory.cache(func, **kwargs) version_file = os.path.join(cachedir, 'module_versions.json') versions = dict() if os.path.exists(version_file): with open(version_file, 'r') as _version_file: versions = json.load(_version_file) modules = (nibabel, ) # Keep only the major + minor version numbers my_versions = dict((m.__name__, LooseVersion(m.__version__).version[:2]) for m in modules) commons = set(versions.keys()).intersection(set(my_versions.keys())) collisions = [m for m in commons if versions[m] != my_versions[m]] # Flush cache if version collision if len(collisions) > 0: if nilearn.CHECK_CACHE_VERSION: warnings.warn("Incompatible cache in %s: " "different version of nibabel. Deleting " "the cache. Put nilearn.CHECK_CACHE_VERSION " "to false to avoid this behavior." % cachedir) try: tmp_dir = (os.path.split(cachedir)[:-1] + ('old_%i' % os.getpid(), )) tmp_dir = os.path.join(*tmp_dir) # We use rename + unlink to be more robust to race # conditions os.rename(cachedir, tmp_dir) shutil.rmtree(tmp_dir) except OSError: # Another process could have removed this dir pass try: os.makedirs(cachedir) except OSError: # File exists? pass else: warnings.warn("Incompatible cache in %s: " "old version of nibabel." % cachedir) # Write json files if configuration is different if versions != my_versions: with open(version_file, 'w') as _version_file: json.dump(my_versions, _version_file) __CACHE_CHECKED[cachedir] = True return memory.cache(func, **kwargs)
def cache(func, memory, func_memory_level=None, memory_level=None, **kwargs): """ Return a joblib.Memory object. The memory_level determines the level above which the wrapped function output is cached. By specifying a numeric value for this level, the user can to control the amount of cache memory used. This function will cache the function call or not depending on the cache level. Parameters ---------- func: function The function which output is to be cached. memory: instance of joblib.Memory or string Used to cache the function call. func_memory_level: int, optional The memory_level from which caching must be enabled for the wrapped function. memory_level: int, optional The memory_level used to determine if function call must be cached or not (if user_memory_level is equal of greater than func_memory_level the function is cached) kwargs: keyword arguments The keyword arguments passed to memory.cache Returns ------- mem: joblib.MemorizedFunc object that wraps the function func. This object may be a no-op, if the requested level is lower than the value given to _cache()). For consistency, a joblib.Memory object is always returned. """ verbose = kwargs.get('verbose', 0) # memory_level and func_memory_level must be both None or both integers. memory_levels = [memory_level, func_memory_level] both_params_integers = all(isinstance(lvl, int) for lvl in memory_levels) both_params_none = all(lvl is None for lvl in memory_levels) if not (both_params_integers or both_params_none): raise ValueError('Reference and user memory levels must be both None ' 'or both integers.') if memory is not None and (func_memory_level is None or memory_level >= func_memory_level): if isinstance(memory, _basestring): memory = Memory(cachedir=memory, verbose=verbose) if not isinstance(memory, MEMORY_CLASSES): raise TypeError("'memory' argument must be a string or a " "joblib.Memory object. " "%s %s was given." % (memory, type(memory))) if (memory.cachedir is None and memory_level is not None and memory_level > 1): warnings.warn("Caching has been enabled (memory_level = %d) " "but no Memory object or path has been provided" " (parameter memory). Caching deactivated for " "function %s." % (memory_level, func.__name__), stacklevel=2) else: memory = Memory(cachedir=None, verbose=verbose) return _safe_cache(memory, func, **kwargs)
def _cache(self, func, func_memory_level=1, **kwargs): """ Return a joblib.Memory object. The memory_level determines the level above which the wrapped function output is cached. By specifying a numeric value for this level, the user can to control the amount of cache memory used. This function will cache the function call or not depending on the cache level. Parameters ---------- func: function The function the output of which is to be cached. memory_level: int The memory_level from which caching must be enabled for the wrapped function. Returns ------- mem: joblib.Memory object that wraps the function func. This object may be a no-op, if the requested level is lower than the value given to _cache()). For consistency, a joblib.Memory object is always returned. """ verbose = getattr(self, 'verbose', 0) # Creates attributes if they don't exist # This is to make creating them in __init__() optional. if not hasattr(self, "memory_level"): self.memory_level = 0 if not hasattr(self, "memory"): self.memory = Memory(cachedir=None, verbose=verbose) if isinstance(self.memory, _basestring): self.memory = Memory(cachedir=self.memory, verbose=verbose) # If cache level is 0 but a memory object has been provided, set # memory_level to 1 with a warning. if self.memory_level == 0: if (isinstance(self.memory, _basestring) or self.memory.cachedir is not None): warnings.warn("memory_level is currently set to 0 but " "a Memory object has been provided. " "Setting memory_level to 1.") self.memory_level = 1 return cache(func, self.memory, func_memory_level=func_memory_level, memory_level=self.memory_level, **kwargs)
def save_niigz(filepath, vol, header=None, affine=None): """Saves a volume into a Nifti (.nii.gz) file. Parameters ---------- vol: Numpy 3D or 4D array Volume with the data to be saved. file_path: string Output file name path affine: (optional) 4x4 Numpy array Array with the affine transform of the file. This is needed if vol is a np.ndarray. header: (optional) nibabel.nifti1.Nifti1Header, optional Header for the file, optional but recommended. This is needed if vol is a np.ndarray. Note ---- affine and header only work for numpy volumes. """ # delayed import because could not install nipy on Python 3 on OSX we_have_nipy = False try: import nipy.core.image as niim from nipy import save_image except: pass else: we_have_nipy = True if isinstance(vol, np.ndarray): log.debug('Saving numpy nifti file: {}.'.format(filepath)) ni = nib.Nifti1Image(vol, affine, header) nib.save(ni, filepath) elif isinstance(vol, nib.Nifti1Image): log.debug('Saving nibabel nifti file: {}.'.format(filepath)) nib.save(vol, filepath) elif we_have_nipy and isinstance(vol, niim.Image): log.debug('Saving nipy nifti file: {}.'.format(filepath)) save_image(vol, filepath) #elif isinstance(vol, NeuroImage): # log.debug('Saving boyle.NeuroImage nifti file: {}.'.format(filepath)) # nib.save(vol.img, filepath) else: raise ValueError('Could not recognise input vol filetype. Got: {}.'.format(repr_imgs(vol)))
def spatialimg_to_hdfgroup(h5group, spatial_img): """Saves a Nifti1Image into an HDF5 group. Parameters ---------- h5group: h5py Group Output HDF5 file path spatial_img: nibabel SpatialImage Image to be saved h5path: str HDF5 group path where the image data will be saved. Datasets will be created inside the given group path: 'data', 'extra', 'affine', the header information will be set as attributes of the 'data' dataset. """ try: h5group['data'] = spatial_img.get_data() h5group['affine'] = spatial_img.get_affine() if hasattr(h5group, 'get_extra'): h5group['extra'] = spatial_img.get_extra() hdr = spatial_img.get_header() for k in list(hdr.keys()): h5group['data'].attrs[k] = hdr[k] except ValueError as ve: raise Exception('Error creating group ' + h5group.name) from ve
def spatialimg_to_hdfpath(file_path, spatial_img, h5path=None, append=True): """Saves a Nifti1Image into an HDF5 file. Parameters ---------- file_path: string Output HDF5 file path spatial_img: nibabel SpatialImage Image to be saved h5path: string HDF5 group path where the image data will be saved. Datasets will be created inside the given group path: 'data', 'extra', 'affine', the header information will be set as attributes of the 'data' dataset. Default: '/img' append: bool True if you don't want to erase the content of the file if it already exists, False otherwise. Note ---- HDF5 open modes >>> 'r' Readonly, file must exist >>> 'r+' Read/write, file must exist >>> 'w' Create file, truncate if exists >>> 'w-' Create file, fail if exists >>> 'a' Read/write if exists, create otherwise (default) """ if h5path is None: h5path = '/img' mode = 'w' if os.path.exists(file_path): if append: mode = 'a' with h5py.File(file_path, mode) as f: try: h5img = f.create_group(h5path) spatialimg_to_hdfgroup(h5img, spatial_img) except ValueError as ve: raise Exception('Error creating group ' + h5path) from ve
def hdfpath_to_nifti1image(file_path, h5path): """Returns a nibabel Nifti1Image from a HDF5 group datasets Parameters ---------- file_path: string HDF5 file path h5path: HDF5 group path in file_path Returns ------- nibabel Nifti1Image """ with h5py.File(file_path, 'r') as f: return hdfgroup_to_nifti1image(f[h5path])
def hdfgroup_to_nifti1image(h5group): """Returns a nibabel Nifti1Image from a HDF5 group datasets Parameters ---------- h5group: h5py.Group HDF5 group Returns ------- nibabel Nifti1Image """ try: data = h5group['data'][:] affine = h5group['affine'][:] extra = None if 'extra' in h5group: extra = h5group['extra'][:] header = get_nifti1hdr_from_h5attrs(h5group['data'].attrs) img = nib.Nifti1Image(data, affine, header=header, extra=extra) return img except KeyError as ke: raise Exception('Could not read Nifti1Image datasets from ' + h5group.name) from ke
def get_nifti1hdr_from_h5attrs(h5attrs): """Transforms an H5py Attributes set to a dict. Converts unicode string keys into standard strings and each value into a numpy array. Parameters ---------- h5attrs: H5py Attributes Returns -------- dict """ hdr = nib.Nifti1Header() for k in list(h5attrs.keys()): hdr[str(k)] = np.array(h5attrs[k]) return hdr
def all_childnodes_to_nifti1img(h5group): """Returns in a list all images found under h5group. Parameters ---------- h5group: h5py.Group HDF group Returns ------- list of nifti1Image """ child_nodes = [] def append_parent_if_dataset(name, obj): if isinstance(obj, h5py.Dataset): if name.split('/')[-1] == 'data': child_nodes.append(obj.parent) vols = [] h5group.visititems(append_parent_if_dataset) for c in child_nodes: vols.append(hdfgroup_to_nifti1image(c)) return vols
def insert_volumes_in_one_dataset(file_path, h5path, file_list, newshape=None, concat_axis=0, dtype=None, append=True): """Inserts all given nifti files from file_list into one dataset in fname. This will not check if the dimensionality of all files match. Parameters ---------- file_path: string HDF5 file path h5path: string file_list: list of strings newshape: tuple or lambda function If None, it will not reshape the images. If a lambda function, this lambda will receive only the shape array. e.g., newshape = lambda x: (np.prod(x[0:3]), x[3]) If a tuple, it will try to reshape all the images with the same shape. It must work for all the images in file_list. concat_axis: int Axis of concatenation after reshaping dtype: data type Dataset data type If not set, will use the type of the first file. append: bool Raises ------ ValueError if concat_axis is bigger than data dimensionality. Note ---- For now, this only works if the dataset ends up being a 2D matrix. I haven't tested for multi-dimensionality concatenations. """ def isalambda(v): return isinstance(v, type(lambda: None)) and v.__name__ == '<lambda>' mode = 'w' if os.path.exists(file_path): if append: mode = 'a' #loading the metadata into spatialimages imgs = [nib.load(vol) for vol in file_list] #getting the shapes of all volumes shapes = [np.array(img.get_shape()) for img in imgs] #getting the reshaped shapes if newshape is not None: if isalambda(newshape): nushapes = np.array([newshape(shape) for shape in shapes]) else: nushapes = np.array([shape for shape in shapes]) #checking if concat_axis is available in this new shapes for nushape in nushapes: assert(len(nushape) - 1 < concat_axis) #calculate the shape of the new dataset n_dims = nushapes.shape[1] ds_shape = np.zeros(n_dims, dtype=np.int) for a in list(range(n_dims)): if a == concat_axis: ds_shape[a] = np.sum(nushapes[:, concat_axis]) else: ds_shape[a] = np.max(nushapes[:, a]) #get the type of the new dataset #dtypes = [img.get_data_dtype() for img in imgs] if dtype is None: dtype = imgs[0].get_data_dtype() with h5py.File(file_path, mode) as f: try: ic = 0 h5grp = f.create_group(os.path.dirname(h5path)) h5ds = h5grp.create_dataset(os.path.basename(h5path), ds_shape, dtype) for img in imgs: #get the shape of the current image nushape = nushapes[ic, :] def append_to_dataset(h5ds, idx, data, concat_axis): """ @param h5ds: H5py DataSet @param idx: int @param data: ndarray @param concat_axis: int @return: """ shape = data.shape ndims = len(shape) if ndims == 1: if concat_axis == 0: h5ds[idx] = data elif ndims == 2: if concat_axis == 0: h5ds[idx ] = data elif concat_axis == 1: h5ds[idx ] = data elif ndims == 3: if concat_axis == 0: h5ds[idx ] = data elif concat_axis == 1: h5ds[idx ] = data elif concat_axis == 2: h5ds[idx ] = data #appending the reshaped image into the dataset append_to_dataset(h5ds, ic, np.reshape(img.get_data(), tuple(nushape)), concat_axis) ic += 1 except ValueError as ve: raise Exception('Error creating group {} in hdf file {}'.format(h5path, file_path)) from ve
def treefall(iterable): """ Generate all combinations of the elements of iterable and its subsets. Parameters ---------- iterable: list, set or dict or any iterable object Returns ------- A generator of all possible combinations of the iterable. Example: ------- >>> for i in treefall([1, 2, 3, 4, 5]): print(i) >>> (1, 2, 3) >>> (1, 2) >>> (1, 3) >>> (2, 3) >>> (1,) >>> (2,) >>> (3,) >>> () """ num_elems = len(iterable) for i in range(num_elems, -1, -1): for c in combinations(iterable, i): yield c
def get_reliabledictionary_list(client, application_name, service_name): """List existing reliable dictionaries. List existing reliable dictionaries and respective schema for given application and service. :param application_name: Name of the application. :type application_name: str :param service_name: Name of the service. :type service_name: str """ cluster = Cluster.from_sfclient(client) service = cluster.get_application(application_name).get_service(service_name) for dictionary in service.get_dictionaries(): print(dictionary.name)
def get_reliabledictionary_schema(client, application_name, service_name, dictionary_name, output_file=None): """Query Schema information for existing reliable dictionaries. Query Schema information existing reliable dictionaries for given application and service. :param application_name: Name of the application. :type application_name: str :param service_name: Name of the service. :type service_name: str :param dictionary: Name of the reliable dictionary. :type dictionary: str :param output_file: Optional file to save the schema. """ cluster = Cluster.from_sfclient(client) dictionary = cluster.get_application(application_name).get_service(service_name).get_dictionary(dictionary_name) result = json.dumps(dictionary.get_information(), indent=4) if (output_file == None): output_file = "{}-{}-{}-schema-output.json".format(application_name, service_name, dictionary_name) with open(output_file, "w") as output: output.write(result) print('Printed schema information to: ' + output_file) print(result)
def query_reliabledictionary(client, application_name, service_name, dictionary_name, query_string, partition_key=None, partition_id=None, output_file=None): """Query existing reliable dictionary. Query existing reliable dictionaries for given application and service. :param application_name: Name of the application. :type application_name: str :param service_name: Name of the service. :type service_name: str :param dictionary_name: Name of the reliable dictionary. :type dictionary_name: str :param query_string: An OData query string. For example $top=10. Check https://www.odata.org/documentation/ for more information. :type query_string: str :param partition_key: Optional partition key of the desired partition, either a string if named schema or int if Int64 schema :type partition_id: str :param partition_id: Optional partition GUID of the owning reliable dictionary. :type partition_id: str :param output_file: Optional file to save the schema. """ cluster = Cluster.from_sfclient(client) dictionary = cluster.get_application(application_name).get_service(service_name).get_dictionary(dictionary_name) start = time.time() if (partition_id != None): result = dictionary.query(query_string, PartitionLookup.ID, partition_id) elif (partition_key != None): result = dictionary.query(query_string, PartitionLookup.KEY, partition_key) else: result = dictionary.query(query_string) if type(result) is str: print(result) return else: result = json.dumps(result.get("value"), indent=4) print("Query took " + str(time.time() - start) + " seconds") if (output_file == None): output_file = "{}-{}-{}-query-output.json".format(application_name, service_name, dictionary_name) with open(output_file, "w") as output: output.write(result) print() print('Printed output to: ' + output_file) print(result)