Search is not available for this dataset
text
stringlengths 75
104k
|
---|
def require_http_allowed_method(cls, request):
"""Ensure that we're allowed to use this HTTP method."""
allowed = cls.meta.http_allowed_methods
if request.method not in allowed:
# The specified method is not allowed for the resource
# identified by the request URI.
# RFC 2616 § 10.4.6 — 405 Method Not Allowed
raise http.exceptions.MethodNotAllowed(allowed) |
def route(self, request, response):
"""Processes every request.
Directs control flow to the appropriate HTTP/1.1 method.
"""
# Ensure that we're allowed to use this HTTP method.
self.require_http_allowed_method(request)
# Retrieve the function corresponding to this HTTP method.
function = getattr(self, request.method.lower(), None)
if function is None:
# Server is not capable of supporting it.
raise http.exceptions.NotImplemented()
# Delegate to the determined function to process the request.
return function(request, response) |
def options(self, request, response):
"""Process an `OPTIONS` request.
Used to initiate a cross-origin request. All handling specific to
CORS requests is done on every request however this method also
returns a list of available methods.
"""
# Gather a list available HTTP/1.1 methods for this URI.
response['Allowed'] = ', '.join(self.meta.http_allowed_methods)
# All CORS handling is done for every HTTP/1.1 method.
# No more handling is neccesary; set the response to 200 and return.
response.status = http.client.OK |
def resource(**kwargs):
"""Wraps the decorated function in a lightweight resource."""
def inner(function):
name = kwargs.pop('name', None)
if name is None:
name = utils.dasherize(function.__name__)
methods = kwargs.pop('methods', None)
if isinstance(methods, six.string_types):
# Tuple-ify the method if we got just a string.
methods = methods,
# Construct a handler.
handler = (function, methods)
if name not in _resources:
# Initiate the handlers list.
_handlers[name] = []
# Construct a light-weight resource using the passed kwargs
# as the arguments for the meta.
from armet import resources
kwargs['name'] = name
class LightweightResource(resources.Resource):
Meta = type(str('Meta'), (), kwargs)
def route(self, request, response):
for handler, methods in _handlers[name]:
if methods is None or request.method in methods:
return handler(request, response)
resources.Resource.route(self)
# Construct and add this resource.
_resources[name] = LightweightResource
# Add this to the handlers.
_handlers[name].append(handler)
# Return the resource.
return _resources[name]
# Return the inner method.
return inner |
def threewise(iterable):
"""s -> (None, s0, s1), (s0, s1, s2), ... (sn-1, sn, None)
example:
for (last, cur, next) in threewise(l):
"""
a, b, c = itertools.tee(iterable,3)
def prepend(val, l):
yield val
for i in l: yield i
def postpend(val, l):
for i in l: yield i
yield val
next(c,None)
for _xa, _xb, _xc in six.moves.zip(prepend(None,a), b, postpend(None,c)):
yield (_xa, _xb, _xc) |
def lines2less(lines):
"""
input: lines = list / iterator of strings
eg: lines = ["This is the first line", "This is the second line"]
output: print those lines to stdout if the output is short + narrow
otherwise print the lines to less
"""
lines = iter(lines) #cast list to iterator
#print output to stdout if small, otherwise to less
has_term = True
terminal_cols = 100
try:
terminal_cols = terminal_size()
except:
#getting terminal info failed -- maybe it's a
#weird situation like running through cron
has_term = False
MAX_CAT_ROWS = 20 #if there are <= this many rows then print to screen
first_rows = list(itertools.islice(lines,0,MAX_CAT_ROWS))
wide = any(len(l) > terminal_cols for l in first_rows)
use_less = False
if has_term and (wide or len(first_rows) == MAX_CAT_ROWS):
use_less = True
lines = itertools.chain(first_rows, lines)
lines = six.moves.map(lambda x: x + '\n', lines)
if use_less:
lesspager(lines)
else:
for l in lines:
sys.stdout.write(l) |
def lesspager(lines):
"""
Use for streaming writes to a less process
Taken from pydoc.pipepager:
/usr/lib/python2.7/pydoc.py
and
/usr/lib/python3.5/pydoc.py
"""
cmd = "less -S"
if sys.version_info[0] >= 3:
"""Page through text by feeding it to another program."""
import subprocess
proc = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE)
try:
with io.TextIOWrapper(proc.stdin, errors='backslashreplace') as pipe:
try:
for l in lines:
pipe.write(l)
except KeyboardInterrupt:
# We've hereby abandoned whatever text hasn't been written,
# but the pager is still in control of the terminal.
pass
except OSError:
pass # Ignore broken pipes caused by quitting the pager program.
while True:
try:
proc.wait()
break
except KeyboardInterrupt:
# Ignore ctl-c like the pager itself does. Otherwise the pager is
# left running and the terminal is in raw mode and unusable.
pass
else:
proc = os.popen(cmd, 'w')
try:
for l in lines:
proc.write(l)
except IOError:
proc.close()
sys.exit() |
def argmax(l,f=None):
"""http://stackoverflow.com/questions/5098580/implementing-argmax-in-python"""
if f:
l = [f(i) for i in l]
return max(enumerate(l), key=lambda x:x[1])[0] |
def render_to_string(self):
"""Render to cookie strings.
"""
values = ''
for key, value in self.items():
values += '{}={};'.format(key, value)
return values |
def from_cookie_string(self, cookie_string):
"""update self with cookie_string.
"""
for key_value in cookie_string.split(';'):
if '=' in key_value:
key, value = key_value.split('=', 1)
else:
key = key_value
strip_key = key.strip()
if strip_key and strip_key.lower() not in COOKIE_ATTRIBUTE_NAMES:
self[strip_key] = value.strip() |
def _add_method(self, effect, verb, resource, conditions):
"""
Adds a method to the internal lists of allowed or denied methods.
Each object in the internal list contains a resource ARN and a
condition statement. The condition statement can be null.
"""
if verb != '*' and not hasattr(HttpVerb, verb):
raise NameError('Invalid HTTP verb ' + verb +
'. Allowed verbs in HttpVerb class')
resource_pattern = re.compile(self.path_regex)
if not resource_pattern.match(resource):
raise NameError('Invalid resource path: ' + resource +
'. Path should match ' + self.path_regex)
if resource[:1] == '/':
resource = resource[1:]
resource_arn = ('arn:aws:execute-api:' +
self.region + ':' +
self.aws_account_id + ':' +
self.rest_api_id + '/' +
self.stage + '/' +
verb + '/' +
resource)
if effect.lower() == 'allow':
self.allowMethods.append({
'resource_arn': resource_arn,
'conditions': conditions
})
elif effect.lower() == 'deny':
self.denyMethods.append({
'resource_arn': resource_arn,
'conditions': conditions
}) |
def _get_effect_statement(self, effect, methods):
"""
This function loops over an array of objects containing
a resourceArn and conditions statement and generates
the array of statements for the policy.
"""
statements = []
if len(methods) > 0:
statement = self._get_empty_statement(effect)
for method in methods:
if (method['conditions'] is None or
len(method['conditions']) == 0):
statement['Resource'].append(method['resource_arn'])
else:
cond_statement = self._get_empty_statement(effect)
cond_statement['Resource'].append(method['resource_arn'])
cond_statement['Condition'] = method['conditions']
statements.append(cond_statement)
statements.append(statement)
return statements |
def allow_method_with_conditions(self, verb, resource, conditions):
"""
Adds an API Gateway method (Http verb + Resource path) to the
list of allowed methods and includes a condition for the policy
statement. More on AWS policy conditions here:
http://docs.aws.amazon.com/IAM/latest/UserGuide/
reference_policies_elements.html#Condition
"""
self._add_method('Allow', verb, resource, conditions) |
def deny_method_with_conditions(self, verb, resource, conditions):
"""
Adds an API Gateway method (Http verb + Resource path) to the
list of denied methods and includes a condition for the policy
statement. More on AWS policy conditions here:
http://docs.aws.amazon.com/IAM/latest/UserGuide/
reference_policies_elements.html#Condition
"""
self._add_method('Deny', verb, resource, conditions) |
def build(self):
"""
Generates the policy document based on the internal lists of
allowed and denied conditions. This will generate a policy with
two main statements for the effect: one statement for Allow and
one statement for Deny. Methods that includes conditions will
have their own statement in the policy.
"""
if ((self.allowMethods is None or len(self.allowMethods) == 0) and
(self.denyMethods is None or len(self.denyMethods) == 0)):
raise NameError('No statements defined for the policy')
policy = {
'principalId': self.principal_id,
'policyDocument': {
'Version': self.version,
'Statement': []
}
}
policy['policyDocument']['Statement'].extend(
self._get_effect_statement('Allow', self.allowMethods))
policy['policyDocument']['Statement'].extend(
self._get_effect_statement('Deny', self.denyMethods))
return policy |
def deref(self, data):
"""AWS doesn't quite have Swagger 2.0 validation right and will fail
on some refs. So, we need to convert to deref before
upload."""
# We have to make a deepcopy here to create a proper JSON
# compatible object, otherwise `json.dumps` fails when it
# hits jsonref.JsonRef objects.
deref = copy.deepcopy(jsonref.JsonRef.replace_refs(data))
# Write out JSON version because we might want this.
self.write_template(deref, filename='swagger.json')
return deref |
def check_pre_requirements(pre_requirements):
"""Check all necessary system requirements to exist.
:param pre_requirements:
Sequence of pre-requirements to check by running
``where <pre_requirement>`` on Windows and ``which ...`` elsewhere.
"""
pre_requirements = set(pre_requirements or [])
pre_requirements.add('virtualenv')
for requirement in pre_requirements:
if not which(requirement):
print_error('Requirement {0!r} is not found in system'.
format(requirement))
return False
return True |
def config_to_args(config):
"""Convert config dict to arguments list.
:param config: Configuration dict.
"""
result = []
for key, value in iteritems(config):
if value is False:
continue
key = '--{0}'.format(key.replace('_', '-'))
if isinstance(value, (list, set, tuple)):
for item in value:
result.extend((key, smart_str(item)))
elif value is not True:
result.extend((key, smart_str(value)))
else:
result.append(key)
return tuple(result) |
def create_env(env, args, recreate=False, ignore_activated=False, quiet=False):
"""Create virtual environment.
:param env: Virtual environment name.
:param args: Pass given arguments to ``virtualenv`` script.
:param recerate: Recreate virtual environment? By default: False
:param ignore_activated:
Ignore already activated virtual environment and create new one. By
default: False
:param quiet: Do not output messages into terminal. By default: False
"""
cmd = None
result = True
inside_env = hasattr(sys, 'real_prefix') or os.environ.get('VIRTUAL_ENV')
env_exists = os.path.isdir(env)
if not quiet:
print_message('== Step 1. Create virtual environment ==')
if (
recreate or (not inside_env and not env_exists)
) or (
ignore_activated and not env_exists
):
cmd = ('virtualenv', ) + args + (env, )
if not cmd and not quiet:
if inside_env:
message = 'Working inside of virtual environment, done...'
else:
message = 'Virtual environment {0!r} already created, done...'
print_message(message.format(env))
if cmd:
with disable_error_handler():
result = not run_cmd(cmd, echo=not quiet)
if not quiet:
print_message()
return result |
def error_handler(func):
"""Decorator to error handling."""
@wraps(func)
def wrapper(*args, **kwargs):
"""
Run actual function and if exception catched and error handler enabled
put traceback to log file
"""
try:
return func(*args, **kwargs)
except BaseException as err:
# Do not catch exceptions on testing
if BOOTSTRAPPER_TEST_KEY in os.environ:
raise
# Fail silently if error handling disabled
if ERROR_HANDLER_DISABLED:
return True
# Otherwise save traceback to log
return save_traceback(err)
return wrapper |
def install(env, requirements, args, ignore_activated=False,
install_dev_requirements=False, quiet=False):
"""Install library or project into virtual environment.
:param env: Use given virtual environment name.
:param requirements: Use given requirements file for pip.
:param args: Pass given arguments to pip script.
:param ignore_activated:
Do not run pip inside already activated virtual environment. By
default: False
:param install_dev_requirements:
When enabled install prefixed or suffixed dev requirements after
original installation process completed. By default: False
:param quiet: Do not output message to terminal. By default: False
"""
if os.path.isfile(requirements):
args += ('-r', requirements)
label = 'project'
else:
args += ('-U', '-e', '.')
label = 'library'
# Attempt to install development requirements
if install_dev_requirements:
dev_requirements = None
dirname = os.path.dirname(requirements)
basename, ext = os.path.splitext(os.path.basename(requirements))
# Possible dev requirements files:
#
# * <requirements>-dev.<ext>
# * dev-<requirements>.<ext>
# * <requirements>_dev.<ext>
# * dev_<requirements>.<ext>
# * <requirements>dev.<ext>
# * dev<requirements>.<ext>
#
# Where <requirements> is basename of given requirements file to use
# and <ext> is its extension.
for delimiter in ('-', '_', ''):
filename = os.path.join(
dirname, ''.join((basename, delimiter, 'dev', ext))
)
if os.path.isfile(filename):
dev_requirements = filename
break
filename = os.path.join(
dirname, ''.join(('dev', delimiter, basename, ext))
)
if os.path.isfile(filename):
dev_requirements = filename
break
# If at least one dev requirements file found, install dev requirements
if dev_requirements:
args += ('-r', dev_requirements)
if not quiet:
print_message('== Step 2. Install {0} =='.format(label))
result = not pip_cmd(env,
('install', ) + args,
ignore_activated,
echo=not quiet)
if not quiet:
print_message()
return result |
def iteritems(data, **kwargs):
"""Iterate over dict items."""
return iter(data.items(**kwargs)) if IS_PY3 else data.iteritems(**kwargs) |
def iterkeys(data, **kwargs):
"""Iterate over dict keys."""
return iter(data.keys(**kwargs)) if IS_PY3 else data.iterkeys(**kwargs) |
def main(*args):
r"""Bootstrap Python projects and libraries with virtualenv and pip.
Also check system requirements before bootstrap and run post bootstrap
hook if any.
:param \*args: Command line arguments list.
"""
# Create parser, read arguments from direct input or command line
with disable_error_handler():
args = parse_args(args or sys.argv[1:])
# Read current config from file and command line arguments
config = read_config(args.config, args)
if config is None:
return True
bootstrap = config[__script__]
# Check pre-requirements
if not check_pre_requirements(bootstrap['pre_requirements']):
return True
# Create virtual environment
env_args = prepare_args(config['virtualenv'], bootstrap)
if not create_env(
bootstrap['env'],
env_args,
bootstrap['recreate'],
bootstrap['ignore_activated'],
bootstrap['quiet']
):
# Exit if couldn't create virtual environment
return True
# And install library or project here
pip_args = prepare_args(config['pip'], bootstrap)
if not install(
bootstrap['env'],
bootstrap['requirements'],
pip_args,
bootstrap['ignore_activated'],
bootstrap['install_dev_requirements'],
bootstrap['quiet']
):
# Exist if couldn't install requirements into venv
return True
# Run post-bootstrap hook
run_hook(bootstrap['hook'], bootstrap, bootstrap['quiet'])
# All OK!
if not bootstrap['quiet']:
print_message('All OK!')
# False means everything went alright, exit code: 0
return False |
def parse_args(args):
"""
Parse args from command line by creating argument parser instance and
process it.
:param args: Command line arguments list.
"""
from argparse import ArgumentParser
description = ('Bootstrap Python projects and libraries with virtualenv '
'and pip.')
parser = ArgumentParser(description=description)
parser.add_argument('--version', action='version', version=__version__)
parser.add_argument(
'-c', '--config', default=DEFAULT_CONFIG,
help='Path to config file. By default: {0}'.format(DEFAULT_CONFIG)
)
parser.add_argument(
'-p', '--pre-requirements', default=[], nargs='+',
help='List of pre-requirements to check, separated by space.'
)
parser.add_argument(
'-e', '--env',
help='Virtual environment name. By default: {0}'.
format(CONFIG[__script__]['env'])
)
parser.add_argument(
'-r', '--requirements',
help='Path to requirements file. By default: {0}'.
format(CONFIG[__script__]['requirements'])
)
parser.add_argument(
'-d', '--install-dev-requirements', action='store_true', default=None,
help='Install prefixed or suffixed "dev" requirements after '
'installation of original requirements file or library completed '
'without errors.'
)
parser.add_argument(
'-C', '--hook', help='Execute this hook after bootstrap process.'
)
parser.add_argument(
'--ignore-activated', action='store_true', default=None,
help='Ignore pre-activated virtualenv, like on Travis CI.'
)
parser.add_argument(
'--recreate', action='store_true', default=None,
help='Recreate virtualenv on every run.'
)
parser.add_argument(
'-q', '--quiet', action='store_true', default=None,
help='Minimize output, show only error messages.'
)
return parser.parse_args(args) |
def pip_cmd(env, cmd, ignore_activated=False, **kwargs):
r"""Run pip command in given or activated virtual environment.
:param env: Virtual environment name.
:param cmd: Pip subcommand to run.
:param ignore_activated:
Ignore activated virtual environment and use given venv instead. By
default: False
:param \*\*kwargs:
Additional keyword arguments to be passed to :func:`~run_cmd`
"""
cmd = tuple(cmd)
dirname = safe_path(env)
if not ignore_activated:
activated_env = os.environ.get('VIRTUAL_ENV')
if hasattr(sys, 'real_prefix'):
dirname = sys.prefix
elif activated_env:
dirname = activated_env
pip_path = os.path.join(dirname, 'Scripts' if IS_WINDOWS else 'bin', 'pip')
if kwargs.pop('return_path', False):
return pip_path
if not os.path.isfile(pip_path):
raise OSError('No pip found at {0!r}'.format(pip_path))
# Disable pip version check in tests
if BOOTSTRAPPER_TEST_KEY in os.environ and cmd[0] == 'install':
cmd = list(cmd)
cmd.insert(1, '--disable-pip-version-check')
cmd = tuple(cmd)
with disable_error_handler():
return run_cmd((pip_path, ) + cmd, **kwargs) |
def prepare_args(config, bootstrap):
"""Convert config dict to command line args line.
:param config: Configuration dict.
:param bootstrap: Bootstrapper configuration dict.
"""
config = copy.deepcopy(config)
environ = dict(copy.deepcopy(os.environ))
data = {'env': bootstrap['env'],
'pip': pip_cmd(bootstrap['env'], '', return_path=True),
'requirements': bootstrap['requirements']}
environ.update(data)
if isinstance(config, string_types):
return config.format(**environ)
for key, value in iteritems(config):
if not isinstance(value, string_types):
continue
config[key] = value.format(**environ)
return config_to_args(config) |
def print_error(message, wrap=True):
"""Print error message to stderr, using ANSI-colors.
:param message: Message to print
:param wrap:
Wrap message into ``ERROR: <message>. Exit...`` template. By default:
True
"""
if wrap:
message = 'ERROR: {0}. Exit...'.format(message.rstrip('.'))
colorizer = (_color_wrap(colorama.Fore.RED)
if colorama
else lambda message: message)
return print(colorizer(message), file=sys.stderr) |
def print_message(message=None):
"""Print message via ``subprocess.call`` function.
This helps to ensure consistent output and avoid situations where print
messages actually shown after messages from all inner threads.
:param message: Text message to print.
"""
kwargs = {'stdout': sys.stdout,
'stderr': sys.stderr,
'shell': True}
return subprocess.call('echo "{0}"'.format(message or ''), **kwargs) |
def read_config(filename, args):
"""
Read and parse configuration file. By default, ``filename`` is relative
path to current work directory.
If no config file found, default ``CONFIG`` would be used.
:param filename: Read config from given filename.
:param args: Parsed command line arguments.
"""
# Initial vars
config = defaultdict(dict)
splitter = operator.methodcaller('split', ' ')
converters = {
__script__: {
'env': safe_path,
'pre_requirements': splitter,
},
'pip': {
'allow_external': splitter,
'allow_unverified': splitter,
}
}
default = copy.deepcopy(CONFIG)
sections = set(iterkeys(default))
# Append download-cache for old pip versions
if int(getattr(pip, '__version__', '1.x').split('.')[0]) < 6:
default['pip']['download_cache'] = safe_path(os.path.expanduser(
os.path.join('~', '.{0}'.format(__script__), 'pip-cache')
))
# Expand user and environ vars in config filename
is_default = filename == DEFAULT_CONFIG
filename = os.path.expandvars(os.path.expanduser(filename))
# Read config if it exists on disk
if not is_default and not os.path.isfile(filename):
print_error('Config file does not exist at {0!r}'.format(filename))
return None
parser = ConfigParser()
try:
parser.read(filename)
except ConfigParserError:
print_error('Cannot parse config file at {0!r}'.format(filename))
return None
# Apply config for each possible section
for section in sections:
if not parser.has_section(section):
continue
items = parser.items(section)
# Make auto convert here for integers and boolean values
for key, value in items:
try:
value = int(value)
except (TypeError, ValueError):
try:
value = bool(strtobool(value))
except ValueError:
pass
if section in converters and key in converters[section]:
value = converters[section][key](value)
config[section][key] = value
# Update config with default values if necessary
for section, data in iteritems(default):
if section not in config:
config[section] = data
else:
for key, value in iteritems(data):
config[section].setdefault(key, value)
# Update bootstrap config from parsed args
keys = set((
'env', 'hook', 'install_dev_requirements', 'ignore_activated',
'pre_requirements', 'quiet', 'recreate', 'requirements'
))
for key in keys:
value = getattr(args, key)
config[__script__].setdefault(key, value)
if key == 'pre_requirements' and not value:
continue
if value is not None:
config[__script__][key] = value
return config |
def run_cmd(cmd, echo=False, fail_silently=False, **kwargs):
r"""Call given command with ``subprocess.call`` function.
:param cmd: Command to run.
:type cmd: tuple or str
:param echo:
If enabled show command to call and its output in STDOUT, otherwise
hide all output. By default: False
:param fail_silently: Do not raise exception on error. By default: False
:param \*\*kwargs:
Additional keyword arguments to be passed to ``subprocess.call``
function. STDOUT and STDERR streams would be setup inside of function
to ensure hiding command output in case of disabling ``echo``.
"""
out, err = None, None
if echo:
cmd_str = cmd if isinstance(cmd, string_types) else ' '.join(cmd)
kwargs['stdout'], kwargs['stderr'] = sys.stdout, sys.stderr
print_message('$ {0}'.format(cmd_str))
else:
out, err = get_temp_streams()
kwargs['stdout'], kwargs['stderr'] = out, err
try:
retcode = subprocess.call(cmd, **kwargs)
except subprocess.CalledProcessError as err:
if fail_silently:
return False
print_error(str(err) if IS_PY3 else unicode(err)) # noqa
finally:
if out:
out.close()
if err:
err.close()
if retcode and echo and not fail_silently:
print_error('Command {0!r} returned non-zero exit status {1}'.
format(cmd_str, retcode))
return retcode |
def run_hook(hook, config, quiet=False):
"""Run post-bootstrap hook if any.
:param hook: Hook to run.
:param config: Configuration dict.
:param quiet: Do not output messages to STDOUT/STDERR. By default: False
"""
if not hook:
return True
if not quiet:
print_message('== Step 3. Run post-bootstrap hook ==')
result = not run_cmd(prepare_args(hook, config),
echo=not quiet,
fail_silently=True,
shell=True)
if not quiet:
print_message()
return result |
def save_traceback(err):
"""Save error traceback to bootstrapper log file.
:param err: Catched exception.
"""
# Store logs to ~/.bootstrapper directory
dirname = safe_path(os.path.expanduser(
os.path.join('~', '.{0}'.format(__script__))
))
# But ensure that directory exists
if not os.path.isdir(dirname):
os.mkdir(dirname)
# Now we ready to put traceback to log file
filename = os.path.join(dirname, '{0}.log'.format(__script__))
with open(filename, 'a+') as handler:
traceback.print_exc(file=handler)
# And show colorized message
message = ('User aborted workflow'
if isinstance(err, KeyboardInterrupt)
else 'Unexpected error catched')
print_error(message)
print_error('Full log stored to {0}'.format(filename), False)
return True |
def smart_str(value, encoding='utf-8', errors='strict'):
"""Convert Python object to string.
:param value: Python object to convert.
:param encoding: Encoding to use if in Python 2 given object is unicode.
:param errors: Errors mode to use if in Python 2 given object is unicode.
"""
if not IS_PY3 and isinstance(value, unicode): # noqa
return value.encode(encoding, errors)
return str(value) |
def copy_w_ext(srcfile, destdir, basename):
""" Copy `srcfile` in `destdir` with name `basename + get_extension(srcfile)`.
Add pluses to the destination path basename if a file with the same name already
exists in `destdir`.
Parameters
----------
srcfile: str
destdir: str
basename:str
Returns
-------
dstpath: str
"""
ext = get_extension(op.basename(srcfile))
dstpath = op.join(destdir, basename + ext)
return copy_w_plus(srcfile, dstpath) |
def copy_w_plus(src, dst):
"""Copy file from `src` path to `dst` path. If `dst` already exists, will add '+' characters
to the end of the basename without extension.
Parameters
----------
src: str
dst: str
Returns
-------
dstpath: str
"""
dst_ext = get_extension(dst)
dst_pre = remove_ext (dst)
while op.exists(dst_pre + dst_ext):
dst_pre += '+'
shutil.copy(src, dst_pre + dst_ext)
return dst_pre + dst_ext |
def get_abspath(folderpath):
"""Returns the absolute path of folderpath.
If the path does not exist, will raise IOError.
"""
if not op.exists(folderpath):
raise FolderNotFound(folderpath)
return op.abspath(folderpath) |
def get_extension(filepath, check_if_exists=False, allowed_exts=ALLOWED_EXTS):
"""Return the extension of fpath.
Parameters
----------
fpath: string
File name or path
check_if_exists: bool
allowed_exts: dict
Dictionary of strings, where the key if the last part of a complex ('.' separated) extension
and the value is the previous part.
For example: for the '.nii.gz' extension I would have a dict as {'.gz': ['.nii',]}
Returns
-------
str
The extension of the file name or path
"""
if check_if_exists:
if not op.exists(filepath):
raise IOError('File not found: ' + filepath)
rest, ext = op.splitext(filepath)
if ext in allowed_exts:
alloweds = allowed_exts[ext]
_, ext2 = op.splitext(rest)
if ext2 in alloweds:
ext = ext2 + ext
return ext |
def add_extension_if_needed(filepath, ext, check_if_exists=False):
"""Add the extension ext to fpath if it doesn't have it.
Parameters
----------
filepath: str
File name or path
ext: str
File extension
check_if_exists: bool
Returns
-------
File name or path with extension added, if needed.
"""
if not filepath.endswith(ext):
filepath += ext
if check_if_exists:
if not op.exists(filepath):
raise IOError('File not found: ' + filepath)
return filepath |
def parse_subjects_list(filepath, datadir='', split=':', labelsf=None):
"""Parses a file with a list of: <subject_file>:<subject_class_label>.
Parameters
----------
filepath: str
Path to file with a list of: <subject_file>:<subject_class_label>.
Where ':' can be any split character
datadir: str
String to be path prefix of each line of the fname content,
only in case the lines are relative file paths.
split: str
Split character for each line
labelsf: str
Path to file with a list of the labels if it is not included in
fname. It will overwrite the labels from fname.
Returns
-------
[labels, subjs] where labels is a list of labels and subjs a list of
filepaths
"""
labels = []
subjs = []
if datadir:
datadir += op.sep
with open(filepath, 'r') as f:
for s in f:
line = s.strip().split(split)
if len(line) == 2:
labels.append(np.float(line[1]))
subjf = line[0].strip()
else:
subjf = line.strip()
if not op.isabs(subjf):
subjs.append(datadir + subjf)
else:
subjs.append(subjf)
if labelsf is not None:
labels = np.loadtxt(labelsf)
return [labels, subjs] |
def create_subjects_file(filelist, labels, output_file, split=':'):
"""Creates a file where each line is <subject_file>:<subject_class_label>.
Parameters
----------
filelist: list of str
List of filepaths
labels: list of int, str or labels that can be transformed with str()
List of labels
output_file: str
Output file path
split: str
Split character for each line
"""
if len(filelist) != len(labels):
raise ValueError('Expected `filelist` and `labels` to have the same length.'
'Got {} and {}.'.format(len(filelist), len(labels)))
lines = []
for i, subj in enumerate(filelist):
lab = labels[i]
line = subj + split + str(lab)
lines.append(line)
lines = np.array(lines)
np.savetxt(output_file, lines, fmt='%s') |
def join_path_to_filelist(path, filelist):
"""Joins path to each line in filelist
Parameters
----------
path: str
filelist: list of str
Returns
-------
list of filepaths
"""
return [op.join(path, str(item)) for item in filelist] |
def remove_all(filelist, folder=''):
"""Deletes all files in filelist
Parameters
----------
filelist: list of str
List of the file paths to be removed
folder: str
Path to be used as common directory for all file paths in filelist
"""
if not folder:
for f in filelist:
os.remove(f)
else:
for f in filelist:
os.remove(op.join(folder, f)) |
def get_folder_subpath(path, folder_depth):
"""
Returns a folder path of path with depth given by folder_dept:
Parameters
----------
path: str
folder_depth: int > 0
Returns
-------
A folder path
Example
-------
>>> get_folder_subpath('/home/user/mydoc/work/notes.txt', 3)
>>> '/home/user/mydoc'
"""
if path[0] == op.sep:
folder_depth += 1
return op.sep.join(path.split(op.sep)[0:folder_depth]) |
def get_temp_dir(prefix=None, basepath=None):
"""
Uses tempfile to create a TemporaryDirectory using
the default arguments.
The folder is created using tempfile.mkdtemp() function.
Parameters
----------
prefix: str
Name prefix for the temporary folder.
basepath: str
Directory where the new folder must be created.
The default directory is chosen from a platform-dependent
list, but the user of the application can control the
directory location by setting the TMPDIR, TEMP or TMP
environment variables.
Returns
-------
folder object
"""
if basepath is None:
return tempfile.TemporaryDirectory(dir=basepath)
else:
return tempfile.TemporaryDirectory(prefix=prefix, dir=basepath) |
def ux_file_len(filepath):
"""Returns the length of the file using the 'wc' GNU command
Parameters
----------
filepath: str
Returns
-------
float
"""
p = subprocess.Popen(['wc', '-l', filepath], stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
result, err = p.communicate()
if p.returncode != 0:
raise IOError(err)
l = result.strip()
l = int(l.split()[0])
return l |
def merge(dict_1, dict_2):
"""Merge two dictionaries.
Values that evaluate to true take priority over falsy values.
`dict_1` takes priority over `dict_2`.
"""
return dict((str(key), dict_1.get(key) or dict_2.get(key))
for key in set(dict_2) | set(dict_1)) |
def get_sys_path(rcpath, app_name, section_name=None):
"""Return a folder path if it exists.
First will check if it is an existing system path, if it is, will return it
expanded and absoluted.
If this fails will look for the rcpath variable in the app_name rcfiles or
exclusively within the given section_name, if given.
Parameters
----------
rcpath: str
Existing folder path or variable name in app_name rcfile with an
existing one.
section_name: str
Name of a section in the app_name rcfile to look exclusively there for
variable names.
app_name: str
Name of the application to look for rcfile configuration files.
Returns
-------
sys_path: str
A expanded absolute file or folder path if the path exists.
Raises
------
IOError if the proposed sys_path does not exist.
"""
# first check if it is an existing path
if op.exists(rcpath):
return op.realpath(op.expanduser(rcpath))
# look for the rcfile
try:
settings = rcfile(app_name, section_name)
except:
raise
# look for the variable within the rcfile configutarions
try:
sys_path = op.expanduser(settings[rcpath])
except KeyError:
raise IOError('Could not find an existing variable with name {0} in'
' section {1} of {2}rc config setup. Maybe it is a '
' folder that could not be found.'.format(rcpath,
section_name,
app_name))
# found the variable, now check if it is an existing path
else:
if not op.exists(sys_path):
raise IOError('Could not find the path {3} indicated by the '
'variable {0} in section {1} of {2}rc config '
'setup.'.format(rcpath, section_name, app_name,
sys_path))
# expand the path and return
return op.realpath(op.expanduser(sys_path)) |
def rcfile(appname, section=None, args={}, strip_dashes=True):
"""Read environment variables and config files and return them merged with
predefined list of arguments.
Parameters
----------
appname: str
Application name, used for config files and environment variable
names.
section: str
Name of the section to be read. If this is not set: appname.
args:
arguments from command line (optparse, docopt, etc).
strip_dashes: bool
Strip dashes prefixing key names from args dict.
Returns
--------
dict
containing the merged variables of environment variables, config
files and args.
Raises
------
IOError
In case the return value is empty.
Notes
-----
Environment variables are read if they start with appname in uppercase
with underscore, for example:
TEST_VAR=1
Config files compatible with ConfigParser are read and the section name
appname is read, example:
[appname]
var=1
We can also have host-dependent configuration values, which have
priority over the default appname values.
[appname]
var=1
[appname:mylinux]
var=3
For boolean flags do not try to use: 'True' or 'False',
'on' or 'off',
'1' or '0'.
Unless you are willing to parse this values by yourself.
We recommend commenting the variables out with '#' if you want to set a
flag to False and check if it is in the rcfile cfg dict, i.e.:
flag_value = 'flag_variable' in cfg
Files are read from: /etc/appname/config,
/etc/appfilerc,
~/.config/appname/config,
~/.config/appname,
~/.appname/config,
~/.appnamerc,
appnamerc,
.appnamerc,
appnamerc file found in 'path' folder variable in args,
.appnamerc file found in 'path' folder variable in args,
file provided by 'config' variable in args.
Example
-------
args = rcfile(__name__, docopt(__doc__, version=__version__))
"""
if strip_dashes:
for k in args.keys():
args[k.lstrip('-')] = args.pop(k)
environ = get_environment(appname)
if section is None:
section = appname
config = get_config(appname,
section,
args.get('config', ''),
args.get('path', ''))
config = merge(merge(args, config), environ)
if not config:
raise IOError('Could not find any rcfile for application '
'{}.'.format(appname))
return config |
def get_rcfile_section(app_name, section_name):
""" Return the dictionary containing the rcfile section configuration
variables.
Parameters
----------
section_name: str
Name of the section in the rcfiles.
app_name: str
Name of the application to look for its rcfiles.
Returns
-------
settings: dict
Dict with variable values
"""
try:
settings = rcfile(app_name, section_name)
except IOError:
raise
except:
raise KeyError('Error looking for section {} in {} '
' rcfiles.'.format(section_name, app_name))
else:
return settings |
def get_rcfile_variable_value(var_name, app_name, section_name=None):
""" Return the value of the variable in the section_name section of the
app_name rc file.
Parameters
----------
var_name: str
Name of the variable to be searched for.
section_name: str
Name of the section in the rcfiles.
app_name: str
Name of the application to look for its rcfiles.
Returns
-------
var_value: str
The value of the variable with given var_name.
"""
cfg = get_rcfile_section(app_name, section_name)
if var_name in cfg:
raise KeyError('Option {} not found in {} '
'section.'.format(var_name, section_name))
return cfg[var_name] |
def find_in_sections(var_name, app_name):
""" Return the section and the value of the variable where the first
var_name is found in the app_name rcfiles.
Parameters
----------
var_name: str
Name of the variable to be searched for.
app_name: str
Name of the application to look for its rcfiles.
Returns
-------
section_name: str
Name of the section in the rcfiles where var_name was first found.
var_value: str
The value of the first variable with given var_name.
"""
sections = get_sections(app_name)
if not sections:
raise ValueError('No sections found in {} rcfiles.'.format(app_name))
for s in sections:
try:
var_value = get_rcfile_variable_value(var_name, section_name=s,
app_name=app_name)
except:
pass
else:
return s, var_value
raise KeyError('No variable {} has been found in {} '
'rcfiles.'.format(var_name, app_name)) |
def filter_list(lst, pattern):
"""
Filters the lst using pattern.
If pattern starts with '(' it will be considered a re regular expression,
otherwise it will use fnmatch filter.
:param lst: list of strings
:param pattern: string
:return: list of strings
Filtered list of strings
"""
if is_fnmatch_regex(pattern) and not is_regex(pattern):
#use fnmatch
log.info('Using fnmatch for {0}'.format(pattern))
filst = fnmatch.filter(lst, pattern)
else:
#use re
log.info('Using regex match for {0}'.format(pattern))
filst = match_list(lst, pattern)
if filst:
filst.sort()
return filst |
def get_subdict(adict, path, sep=os.sep):
"""
Given a nested dictionary adict.
This returns its childen just below the path.
The path is a string composed of adict keys separated by sep.
:param adict: nested dict
:param path: str
:param sep: str
:return: dict or list or leaf of treemap
"""
return reduce(adict.__class__.get, [p for p in op.split(sep) if p], adict) |
def get_dict_leaves(data):
"""
Given a nested dictionary, this returns all its leave elements in a list.
:param adict:
:return: list
"""
result = []
if isinstance(data, dict):
for item in data.values():
result.extend(get_dict_leaves(item))
elif isinstance(data, list):
result.extend(data)
else:
result.append(data)
return result |
def get_possible_paths(base_path, path_regex):
"""
Looks for path_regex within base_path. Each match is append
in the returned list.
path_regex may contain subfolder structure.
If any part of the folder structure is a
:param base_path: str
:param path_regex: str
:return list of strings
"""
if not path_regex:
return []
if len(path_regex) < 1:
return []
if path_regex[0] == os.sep:
path_regex = path_regex[1:]
rest_files = ''
if os.sep in path_regex:
#split by os.sep
node_names = path_regex.partition(os.sep)
first_node = node_names[0]
rest_nodes = node_names[2]
folder_names = filter_list(os.listdir(base_path), first_node)
for nom in folder_names:
new_base = op.join(base_path, nom)
if op.isdir(new_base):
rest_files = get_possible_paths(new_base, rest_nodes)
else:
rest_files = filter_list(os.listdir(base_path), path_regex)
files = []
if rest_files:
files = [op.join(base_path, f) for f in rest_files]
return files |
def create_folder(dirpath, overwrite=False):
""" Will create dirpath folder. If dirpath already exists and overwrite is False,
will append a '+' suffix to dirpath until dirpath does not exist."""
if not overwrite:
while op.exists(dirpath):
dirpath += '+'
os.makedirs(dirpath, exist_ok=overwrite)
return dirpath |
def _import_config(filepath):
"""
Imports filetree and root_path variable values from the filepath.
:param filepath:
:return: root_path and filetree
"""
if not op.isfile(filepath):
raise IOError('Data config file not found. '
'Got: {0}'.format(filepath))
cfg = import_pyfile(filepath)
if not hasattr(cfg, 'root_path'):
raise KeyError('Config file root_path key not found.')
if not hasattr(cfg, 'filetree'):
raise KeyError('Config file filetree key not found.')
return cfg.root_path, cfg.filetree |
def remove_nodes(self, pattern, adict):
"""
Remove the nodes that match the pattern.
"""
mydict = self._filetree if adict is None else adict
if isinstance(mydict, dict):
for nom in mydict.keys():
if isinstance(mydict[nom], dict):
matchs = filter_list(mydict[nom], pattern)
for nom in matchs:
mydict = self.remove_nodes(pattern, mydict[nom])
mydict.pop(nom)
else:
mydict[nom] = filter_list(mydict[nom], pattern)
else:
matchs = set(filter_list(mydict, pattern))
mydict = set(mydict) - matchs
return mydict |
def count_node_match(self, pattern, adict=None):
"""
Return the number of nodes that match the pattern.
:param pattern:
:param adict:
:return: int
"""
mydict = self._filetree if adict is None else adict
k = 0
if isinstance(mydict, dict):
names = mydict.keys()
k += len(filter_list(names, pattern))
for nom in names:
k += self.count_node_match(pattern, mydict[nom])
else:
k = len(filter_list(mydict, pattern))
return k |
def as_float_array(X, copy=True, force_all_finite=True):
"""Converts an array-like to an array of floats
The new dtype will be np.float32 or np.float64, depending on the original
type. The function can create a copy or modify the argument depending
on the argument copy.
Parameters
----------
X : {array-like, sparse matrix}
copy : bool, optional
If True, a copy of X will be created. If False, a copy may still be
returned if X's dtype is not a floating point type.
Returns
-------
XT : {array, sparse matrix}
An array of type np.float
"""
if isinstance(X, np.matrix) or (not isinstance(X, np.ndarray)
and not sp.issparse(X)):
return check_array(X, ['csr', 'csc', 'coo'], dtype=np.float64,
copy=copy, force_all_finite=force_all_finite,
ensure_2d=False)
elif sp.issparse(X) and X.dtype in [np.float32, np.float64]:
return X.copy() if copy else X
elif X.dtype in [np.float32, np.float64]: # is numpy array
return X.copy('F' if X.flags['F_CONTIGUOUS'] else 'C') if copy else X
else:
return X.astype(np.float32 if X.dtype == np.int32 else np.float64) |
def _num_samples(x):
"""Return number of samples in array-like x."""
if not hasattr(x, '__len__') and not hasattr(x, 'shape'):
if hasattr(x, '__array__'):
x = np.asarray(x)
else:
raise TypeError("Expected sequence or array-like, got %r" % x)
return x.shape[0] if hasattr(x, 'shape') else len(x) |
def check_consistent_length(*arrays):
"""Check that all arrays have consistent first dimensions.
Checks whether all objects in arrays have the same shape or length.
Parameters
----------
arrays : list or tuple of input objects.
Objects that will be checked for consistent length.
"""
uniques = np.unique([_num_samples(X) for X in arrays if X is not None])
if len(uniques) > 1:
raise ValueError("Found arrays with inconsistent numbers of samples: %s"
% str(uniques)) |
def indexable(*iterables):
"""Make arrays indexable for cross-validation.
Checks consistent length, passes through None, and ensures that everything
can be indexed by converting sparse matrices to csr and converting
non-interable objects to arrays.
Parameters
----------
iterables : lists, dataframes, arrays, sparse matrices
List of objects to ensure sliceability.
"""
result = []
for X in iterables:
if sp.issparse(X):
result.append(X.tocsr())
elif hasattr(X, "__getitem__") or hasattr(X, "iloc"):
result.append(X)
elif X is None:
result.append(X)
else:
result.append(np.array(X))
check_consistent_length(*result)
return result |
def check_array(array, accept_sparse=None, dtype=None, order=None, copy=False,
force_all_finite=True, ensure_2d=True, allow_nd=False):
"""Input validation on an array, list, sparse matrix or similar.
By default, the input is converted to an at least 2nd numpy array.
Parameters
----------
array : object
Input object to check / convert.
accept_sparse : string, list of string or None (default=None)
String[s] representing allowed sparse matrix formats, such as 'csc',
'csr', etc. None means that sparse matrix input will raise an error.
If the input is sparse but not in the allowed format, it will be
converted to the first listed format.
order : 'F', 'C' or None (default=None)
Whether an array will be forced to be fortran or c-style.
copy : boolean (default=False)
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X.
ensure_2d : boolean (default=True)
Whether to make X at least 2d.
allow_nd : boolean (default=False)
Whether to allow X.ndim > 2.
Returns
-------
X_converted : object
The converted and validated X.
"""
if isinstance(accept_sparse, str):
accept_sparse = [accept_sparse]
if sp.issparse(array):
array = _ensure_sparse_format(array, accept_sparse, dtype, order,
copy, force_all_finite)
else:
if ensure_2d:
array = np.atleast_2d(array)
array = np.array(array, dtype=dtype, order=order, copy=copy)
if not allow_nd and array.ndim >= 3:
raise ValueError("Found array with dim %d. Expected <= 2" %
array.ndim)
if force_all_finite:
_assert_all_finite(array)
return array |
def check_X_y(X, y, accept_sparse=None, dtype=None, order=None, copy=False,
force_all_finite=True, ensure_2d=True, allow_nd=False,
multi_output=False):
"""Input validation for standard estimators.
Checks X and y for consistent length, enforces X 2d and y 1d.
Standard input checks are only applied to y. For multi-label y,
set multi_ouput=True to allow 2d and sparse y.
Parameters
----------
X : nd-array, list or sparse matrix
Input data.
y : nd-array, list or sparse matrix
Labels.
accept_sparse : string, list of string or None (default=None)
String[s] representing allowed sparse matrix formats, such as 'csc',
'csr', etc. None means that sparse matrix input will raise an error.
If the input is sparse but not in the allowed format, it will be
converted to the first listed format.
order : 'F', 'C' or None (default=None)
Whether an array will be forced to be fortran or c-style.
copy : boolean (default=False)
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X.
ensure_2d : boolean (default=True)
Whether to make X at least 2d.
allow_nd : boolean (default=False)
Whether to allow X.ndim > 2.
Returns
-------
X_converted : object
The converted and validated X.
"""
X = check_array(X, accept_sparse, dtype, order, copy, force_all_finite,
ensure_2d, allow_nd)
if multi_output:
y = check_array(y, 'csr', force_all_finite=True, ensure_2d=False)
else:
y = column_or_1d(y, warn=True)
_assert_all_finite(y)
check_consistent_length(X, y)
return X, y |
def column_or_1d(y, warn=False):
""" Ravel column or 1d numpy array, else raises an error
Parameters
----------
y : array-like
Returns
-------
y : array
"""
shape = np.shape(y)
if len(shape) == 1:
return np.ravel(y)
if len(shape) == 2 and shape[1] == 1:
if warn:
warnings.warn("A column-vector y was passed when a 1d array was"
" expected. Please change the shape of y to "
"(n_samples, ), for example using ravel().",
DataConversionWarning, stacklevel=2)
return np.ravel(y)
raise ValueError("bad input shape {0}".format(shape)) |
def warn_if_not_float(X, estimator='This algorithm'):
"""Warning utility function to check that data type is floating point.
Returns True if a warning was raised (i.e. the input is not float) and
False otherwise, for easier input validation.
"""
if not isinstance(estimator, str):
estimator = estimator.__class__.__name__
if X.dtype.kind != 'f':
warnings.warn("%s assumes floating point values as input, "
"got %s" % (estimator, X.dtype))
return True
return False |
def as_ndarray(arr, copy=False, dtype=None, order='K'):
"""Convert an arbitrary array to numpy.ndarray.
In the case of a memmap array, a copy is automatically made to break the
link with the underlying file (whatever the value of the "copy" keyword).
The purpose of this function is mainly to get rid of memmap objects, but
it can be used for other purposes. In particular, combining copying and
casting can lead to performance improvements in some cases, by avoiding
unnecessary copies.
If not specified, input array order is preserved, in all cases, even when
a copy is requested.
Caveat: this function does not copy during bool to/from 1-byte dtype
conversions. This can lead to some surprising results in some rare cases.
Example:
a = numpy.asarray([0, 1, 2], dtype=numpy.int8)
b = as_ndarray(a, dtype=bool) # array([False, True, True], dtype=bool)
c = as_ndarray(b, dtype=numpy.int8) # array([0, 1, 2], dtype=numpy.int8)
The usually expected result for the last line would be array([0, 1, 1])
because True evaluates to 1. Since there is no copy made here, the original
array is recovered.
Parameters
----------
arr: array-like
input array. Any value accepted by numpy.asarray is valid.
copy: bool
if True, force a copy of the array. Always True when arr is a memmap.
dtype: any numpy dtype
dtype of the returned array. Performing copy and type conversion at the
same time can in some cases avoid an additional copy.
order: string
gives the order of the returned array.
Valid values are: "C", "F", "A", "K", None.
default is "K". See ndarray.copy() for more information.
Returns
-------
ret: np.ndarray
Numpy array containing the same data as arr, always of class
numpy.ndarray, and with no link to any underlying file.
"""
if order not in ('C', 'F', 'A', 'K', None):
raise ValueError("Invalid value for 'order': {}".format(str(order)))
if isinstance(arr, np.memmap):
if dtype is None:
if order in ('K', 'A', None):
ret = np.array(np.asarray(arr), copy=True)
else:
ret = np.array(np.asarray(arr), copy=True, order=order)
else:
if order in ('K', 'A', None):
# always copy (even when dtype does not change)
ret = np.asarray(arr).astype(dtype)
else:
# load data from disk without changing order
# Changing order while reading through a memmap is incredibly
# inefficient.
ret = _asarray(np.array(arr, copy=True), dtype=dtype, order=order)
elif isinstance(arr, np.ndarray):
ret = _asarray(arr, dtype=dtype, order=order)
# In the present cas, np.may_share_memory result is always reliable.
if np.may_share_memory(ret, arr) and copy:
# order-preserving copy
ret = ret.T.copy().T if ret.flags['F_CONTIGUOUS'] else ret.copy()
elif isinstance(arr, (list, tuple)):
if order in ("A", "K"):
ret = np.asarray(arr, dtype=dtype)
else:
ret = np.asarray(arr, dtype=dtype, order=order)
else:
raise ValueError("Type not handled: {}".format(arr.__class__))
return ret |
def xfm_atlas_to_functional(atlas_filepath, anatbrain_filepath, meanfunc_filepath,
atlas2anat_nonlin_xfm_filepath, is_atlas2anat_inverted,
anat2func_lin_xfm_filepath,
atlasinanat_out_filepath, atlasinfunc_out_filepath,
interp='nn', rewrite=True, parallel=False):
"""Call FSL tools to apply transformations to a given atlas to a functional image.
Given the transformation matrices.
Parameters
----------
atlas_filepath: str
Path to the 3D atlas volume file.
anatbrain_filepath: str
Path to the anatomical brain volume file (skull-stripped and registered to the same space as the atlas,
e.g., MNI).
meanfunc_filepath: str
Path to the average functional image to be used as reference in the last applywarp step.
atlas2anat_nonlin_xfm_filepath: str
Path to the atlas to anatomical brain linear transformation .mat file.
If you have the inverse transformation, i.e., anatomical brain to atlas, set is_atlas2anat_inverted to True.
is_atlas2anat_inverted: bool
If False will have to calculate the inverse atlas2anat transformation to apply the transformations.
This step will be performed with FSL invwarp.
anat2func_lin_xfm_filepath: str
Path to the anatomical to functional .mat linear transformation file.
atlasinanat_out_filepath: str
Path to output file which will contain the 3D atlas in the subject anatomical space.
atlasinfunc_out_filepath: str
Path to output file which will contain the 3D atlas in the subject functional space.
verbose: bool
If verbose will show DEBUG log info.
rewrite: bool
If True will re-run all the commands overwriting any existing file. Otherwise will check if
each file exists and if it does won't run the command.
parallel: bool
If True will launch the commands using ${FSLDIR}/fsl_sub to use the cluster infrastructure you have setup
with FSL (SGE or HTCondor).
"""
if is_atlas2anat_inverted:
# I already have the inverted fields I need
anat_to_mni_nl_inv = atlas2anat_nonlin_xfm_filepath
else:
# I am creating the inverted fields then...need output file path:
output_dir = op.abspath (op.dirname(atlasinanat_out_filepath))
ext = get_extension(atlas2anat_nonlin_xfm_filepath)
anat_to_mni_nl_inv = op.join(output_dir, remove_ext(op.basename(atlas2anat_nonlin_xfm_filepath)) + '_inv' + ext)
# setup the commands to be called
invwarp_cmd = op.join('${FSLDIR}', 'bin', 'invwarp')
applywarp_cmd = op.join('${FSLDIR}', 'bin', 'applywarp')
fslsub_cmd = op.join('${FSLDIR}', 'bin', 'fsl_sub')
# add fsl_sub before the commands
if parallel:
invwarp_cmd = fslsub_cmd + ' ' + invwarp_cmd
applywarp_cmd = fslsub_cmd + ' ' + applywarp_cmd
# create the inverse fields
if rewrite or (not is_atlas2anat_inverted and not op.exists(anat_to_mni_nl_inv)):
log.debug('Creating {}.\n'.format(anat_to_mni_nl_inv))
cmd = invwarp_cmd + ' '
cmd += '-w {} '.format(atlas2anat_nonlin_xfm_filepath)
cmd += '-o {} '.format(anat_to_mni_nl_inv)
cmd += '-r {} '.format(anatbrain_filepath)
log.debug('Running {}'.format(cmd))
check_call(cmd)
# transform the atlas to anatomical space
if rewrite or not op.exists(atlasinanat_out_filepath):
log.debug('Creating {}.\n'.format(atlasinanat_out_filepath))
cmd = applywarp_cmd + ' '
cmd += '--in={} '.format(atlas_filepath)
cmd += '--ref={} '.format(anatbrain_filepath)
cmd += '--warp={} '.format(anat_to_mni_nl_inv)
cmd += '--interp={} '.format(interp)
cmd += '--out={} '.format(atlasinanat_out_filepath)
log.debug('Running {}'.format(cmd))
check_call(cmd)
# transform the atlas to functional space
if rewrite or not op.exists(atlasinfunc_out_filepath):
log.debug('Creating {}.\n'.format(atlasinfunc_out_filepath))
cmd = applywarp_cmd + ' '
cmd += '--in={} '.format(atlasinanat_out_filepath)
cmd += '--ref={} '.format(meanfunc_filepath)
cmd += '--premat={} '.format(anat2func_lin_xfm_filepath)
cmd += '--interp={} '.format(interp)
cmd += '--out={} '.format(atlasinfunc_out_filepath)
log.debug('Running {}'.format(cmd))
check_call(cmd) |
def fwhm2sigma(fwhm):
"""Convert a FWHM value to sigma in a Gaussian kernel.
Parameters
----------
fwhm: float or numpy.array
fwhm value or values
Returns
-------
fwhm: float or numpy.array
sigma values
"""
fwhm = np.asarray(fwhm)
return fwhm / np.sqrt(8 * np.log(2)) |
def sigma2fwhm(sigma):
"""Convert a sigma in a Gaussian kernel to a FWHM value.
Parameters
----------
sigma: float or numpy.array
sigma value or values
Returns
-------
fwhm: float or numpy.array
fwhm values corresponding to `sigma` values
"""
sigma = np.asarray(sigma)
return np.sqrt(8 * np.log(2)) * sigma |
def _smooth_data_array(arr, affine, fwhm, copy=True):
"""Smooth images with a a Gaussian filter.
Apply a Gaussian filter along the three first dimensions of arr.
Parameters
----------
arr: numpy.ndarray
3D or 4D array, with image number as last dimension.
affine: numpy.ndarray
Image affine transformation matrix for image.
fwhm: scalar, numpy.ndarray
Smoothing kernel size, as Full-Width at Half Maximum (FWHM) in millimeters.
If a scalar is given, kernel width is identical on all three directions.
A numpy.ndarray must have 3 elements, giving the FWHM along each axis.
copy: bool
if True, will make a copy of the input array. Otherwise will directly smooth the input array.
Returns
-------
smooth_arr: numpy.ndarray
"""
if arr.dtype.kind == 'i':
if arr.dtype == np.int64:
arr = arr.astype(np.float64)
else:
arr = arr.astype(np.float32)
if copy:
arr = arr.copy()
# Zeroe possible NaNs and Inf in the image.
arr[np.logical_not(np.isfinite(arr))] = 0
try:
# Keep the 3D part of the affine.
affine = affine[:3, :3]
# Convert from FWHM in mm to a sigma.
fwhm_sigma_ratio = np.sqrt(8 * np.log(2))
vox_size = np.sqrt(np.sum(affine ** 2, axis=0))
sigma = fwhm / (fwhm_sigma_ratio * vox_size)
for n, s in enumerate(sigma):
ndimage.gaussian_filter1d(arr, s, output=arr, axis=n)
except:
raise ValueError('Error smoothing the array.')
else:
return arr |
def smooth_imgs(images, fwhm):
"""Smooth images using a Gaussian filter.
Apply a Gaussian filter along the three first dimensions of each image in images.
In all cases, non-finite values in input are zeroed.
Parameters
----------
imgs: str or img-like object or iterable of img-like objects
See boyle.nifti.read.read_img
Image(s) to smooth.
fwhm: scalar or numpy.ndarray
Smoothing kernel size, as Full-Width at Half Maximum (FWHM) in millimeters.
If a scalar is given, kernel width is identical on all three directions.
A numpy.ndarray must have 3 elements, giving the FWHM along each axis.
Returns
-------
smooth_imgs: nibabel.Nifti1Image or list of.
Smooth input image/s.
"""
if fwhm <= 0:
return images
if not isinstance(images, string_types) and hasattr(images, '__iter__'):
only_one = False
else:
only_one = True
images = [images]
result = []
for img in images:
img = check_img(img)
affine = img.get_affine()
smooth = _smooth_data_array(img.get_data(), affine, fwhm=fwhm, copy=True)
result.append(nib.Nifti1Image(smooth, affine))
if only_one:
return result[0]
else:
return result |
def _smooth_array(arr, affine, fwhm=None, ensure_finite=True, copy=True, **kwargs):
"""Smooth images by applying a Gaussian filter.
Apply a Gaussian filter along the three first dimensions of arr.
This is copied and slightly modified from nilearn:
https://github.com/nilearn/nilearn/blob/master/nilearn/image/image.py
Added the **kwargs argument.
Parameters
==========
arr: numpy.ndarray
4D array, with image number as last dimension. 3D arrays are also
accepted.
affine: numpy.ndarray
(4, 4) matrix, giving affine transformation for image. (3, 3) matrices
are also accepted (only these coefficients are used).
If fwhm='fast', the affine is not used and can be None
fwhm: scalar, numpy.ndarray, 'fast' or None
Smoothing strength, as a full-width at half maximum, in millimeters.
If a scalar is given, width is identical on all three directions.
A numpy.ndarray must have 3 elements, giving the FWHM along each axis.
If fwhm == 'fast', a fast smoothing will be performed with
a filter [0.2, 1, 0.2] in each direction and a normalisation
to preserve the local average value.
If fwhm is None, no filtering is performed (useful when just removal
of non-finite values is needed).
ensure_finite: bool
if True, replace every non-finite values (like NaNs) by zero before
filtering.
copy: bool
if True, input array is not modified. False by default: the filtering
is performed in-place.
kwargs: keyword-arguments
Arguments for the ndimage.gaussian_filter1d function.
Returns
=======
filtered_arr: numpy.ndarray
arr, filtered.
Notes
=====
This function is most efficient with arr in C order.
"""
if arr.dtype.kind == 'i':
if arr.dtype == np.int64:
arr = arr.astype(np.float64)
else:
# We don't need crazy precision
arr = arr.astype(np.float32)
if copy:
arr = arr.copy()
if ensure_finite:
# SPM tends to put NaNs in the data outside the brain
arr[np.logical_not(np.isfinite(arr))] = 0
if fwhm == 'fast':
arr = _fast_smooth_array(arr)
elif fwhm is not None:
# Keep only the scale part.
affine = affine[:3, :3]
# Convert from a FWHM to a sigma:
fwhm_over_sigma_ratio = np.sqrt(8 * np.log(2))
vox_size = np.sqrt(np.sum(affine ** 2, axis=0))
sigma = fwhm / (fwhm_over_sigma_ratio * vox_size)
for n, s in enumerate(sigma):
ndimage.gaussian_filter1d(arr, s, output=arr, axis=n, **kwargs)
return arr |
def smooth_img(imgs, fwhm, **kwargs):
"""Smooth images by applying a Gaussian filter.
Apply a Gaussian filter along the three first dimensions of arr.
In all cases, non-finite values in input image are replaced by zeros.
This is copied and slightly modified from nilearn:
https://github.com/nilearn/nilearn/blob/master/nilearn/image/image.py
Added the **kwargs argument.
Parameters
==========
imgs: Niimg-like object or iterable of Niimg-like objects
See http://nilearn.github.io/manipulating_images/manipulating_images.html#niimg.
Image(s) to smooth.
fwhm: scalar, numpy.ndarray, 'fast' or None
Smoothing strength, as a Full-Width at Half Maximum, in millimeters.
If a scalar is given, width is identical on all three directions.
A numpy.ndarray must have 3 elements, giving the FWHM along each axis.
If fwhm == 'fast', a fast smoothing will be performed with
a filter [0.2, 1, 0.2] in each direction and a normalisation
to preserve the scale.
If fwhm is None, no filtering is performed (useful when just removal
of non-finite values is needed)
Returns
=======
filtered_img: nibabel.Nifti1Image or list of.
Input image, filtered. If imgs is an iterable, then filtered_img is a
list.
"""
# Use hasattr() instead of isinstance to workaround a Python 2.6/2.7 bug
# See http://bugs.python.org/issue7624
if hasattr(imgs, "__iter__") \
and not isinstance(imgs, string_types):
single_img = False
else:
single_img = True
imgs = [imgs]
ret = []
for img in imgs:
img = check_niimg(img)
affine = img.get_affine()
filtered = _smooth_array(img.get_data(), affine, fwhm=fwhm,
ensure_finite=True, copy=True, **kwargs)
ret.append(new_img_like(img, filtered, affine, copy_header=True))
if single_img:
return ret[0]
else:
return ret |
def signed_session(self, session=None):
"""Create requests session with any required auth headers
applied.
:rtype: requests.Session.
"""
if session:
session = super(ClientCertAuthentication, self).signed_session(session)
else:
session = super(ClientCertAuthentication, self).signed_session()
if self.cert is not None:
session.cert = self.cert
if self.ca_cert is not None:
session.verify = self.ca_cert
if self.no_verify:
session.verify = False
return session |
def signed_session(self, session=None):
"""Create requests session with AAD auth headers
:rtype: requests.Session.
"""
from sfctl.config import (aad_metadata, aad_cache)
if session:
session = super(AdalAuthentication, self).signed_session(session)
else:
session = super(AdalAuthentication, self).signed_session()
if self.no_verify:
session.verify = False
authority_uri, cluster_id, client_id = aad_metadata()
existing_token, existing_cache = aad_cache()
context = adal.AuthenticationContext(authority_uri,
cache=existing_cache)
new_token = context.acquire_token(cluster_id,
existing_token['userId'], client_id)
header = "{} {}".format("Bearer", new_token['accessToken'])
session.headers['Authorization'] = header
return session |
def voxspace_to_mmspace(img):
""" Return a grid with coordinates in 3D physical space for `img`."""
shape, affine = img.shape[:3], img.affine
coords = np.array(np.meshgrid(*(range(i) for i in shape), indexing='ij'))
coords = np.rollaxis(coords, 0, len(shape) + 1)
mm_coords = nib.affines.apply_affine(affine, coords)
return mm_coords |
def voxcoord_to_mm(cm, i, j, k):
'''
Parameters
----------
cm: nipy.core.reference.coordinate_map.CoordinateMap
i, j, k: floats
Voxel coordinates
Returns
-------
Triplet with real 3D world coordinates
'''
try:
mm = cm([i, j, k])
except Exception as exc:
raise Exception('Error on converting coordinates.') from exc
else:
return mm |
def mm_to_voxcoord(cm, x, y, z):
'''
Parameters
----------
cm: nipy.core.reference.coordinate_map.CoordinateMap
x, y, z: floats
Physical coordinates
Returns
-------
Triplet with 3D voxel coordinates
'''
try:
vox = cm.inverse()([x, y, z])
except Exception as exc:
raise Exception('Error on converting coordinates') from exc
else:
return vox |
def get_3D_coordmap(img):
'''
Gets a 3D CoordinateMap from img.
Parameters
----------
img: nib.Nifti1Image or nipy Image
Returns
-------
nipy.core.reference.coordinate_map.CoordinateMap
'''
if isinstance(img, nib.Nifti1Image):
img = nifti2nipy(img)
if img.ndim == 4:
from nipy.core.reference.coordinate_map import drop_io_dim
cm = drop_io_dim(img.coordmap, 3)
else:
cm = img.coordmap
return cm |
def get_img_info(image):
"""Return the header and affine matrix from a Nifti file.
Parameters
----------
image: img-like object or str
Can either be:
- a file path to a Nifti image
- any object with get_data() and get_affine() methods, e.g., nibabel.Nifti1Image.
If niimg is a string, consider it as a path to Nifti image and
call nibabel.load on it. If it is an object, check if get_data()
and get_affine() methods are present, raise TypeError otherwise.
Returns
-------
hdr, aff
"""
try:
img = check_img(image)
except Exception as exc:
raise Exception('Error reading file {0}.'.format(repr_imgs(image))) from exc
else:
return img.get_header(), img.get_affine() |
def get_img_data(image, copy=True):
"""Return the voxel matrix of the Nifti file.
If safe_mode will make a copy of the img before returning the data, so the input image is not modified.
Parameters
----------
image: img-like object or str
Can either be:
- a file path to a Nifti image
- any object with get_data() and get_affine() methods, e.g., nibabel.Nifti1Image.
If niimg is a string, consider it as a path to Nifti image and
call nibabel.load on it. If it is an object, check if get_data()
and get_affine() methods are present, raise TypeError otherwise.
copy: bool
If safe_mode will make a copy of the img before returning the data, so the input image is not modified.
Returns
-------
array_like
"""
try:
img = check_img(image)
if copy:
return get_data(img)
else:
return img.get_data()
except Exception as exc:
raise Exception('Error when reading file {0}.'.format(repr_imgs(image))) from exc |
def load_nipy_img(nii_file):
"""Read a Nifti file and return as nipy.Image
Parameters
----------
param nii_file: str
Nifti file path
Returns
-------
nipy.Image
"""
# delayed import because could not install nipy on Python 3 on OSX
import nipy
if not os.path.exists(nii_file):
raise FileNotFound(nii_file)
try:
return nipy.load_image(nii_file)
except Exception as exc:
raise Exception('Reading file {0}.'.format(repr_imgs(nii_file))) from exc |
def niftilist_to_array(img_filelist, outdtype=None):
"""
From the list of absolute paths to nifti files, creates a Numpy array
with the data.
Parameters
----------
img_filelist: list of str
List of absolute file paths to nifti files. All nifti files must have
the same shape.
outdtype: dtype
Type of the elements of the array, if not set will obtain the dtype from
the first nifti file.
Returns
-------
outmat: Numpy array with shape N x prod(vol.shape)
containing the N files as flat vectors.
vol_shape: Tuple with shape of the volumes, for reshaping.
"""
try:
first_img = img_filelist[0]
vol = get_img_data(first_img)
except IndexError as ie:
raise Exception('Error getting the first item of img_filelis: {}'.format(repr_imgs(img_filelist[0]))) from ie
if not outdtype:
outdtype = vol.dtype
outmat = np.zeros((len(img_filelist), np.prod(vol.shape)), dtype=outdtype)
try:
for i, img_file in enumerate(img_filelist):
vol = get_img_data(img_file)
outmat[i, :] = vol.flatten()
except Exception as exc:
raise Exception('Error on reading file {0}.'.format(img_file)) from exc
return outmat, vol.shape |
def _crop_img_to(image, slices, copy=True):
"""Crops image to a smaller size
Crop img to size indicated by slices and modify the affine accordingly.
Parameters
----------
image: img-like object or str
Can either be:
- a file path to a Nifti image
- any object with get_data() and get_affine() methods, e.g., nibabel.Nifti1Image.
If niimg is a string, consider it as a path to Nifti image and
call nibabel.load on it. If it is an object, check if get_data()
and get_affine() methods are present, raise TypeError otherwise.
Image to be cropped.
slices: list of slices
Defines the range of the crop.
E.g. [slice(20, 200), slice(40, 150), slice(0, 100)]
defines a 3D cube
If slices has less entries than image has dimensions,
the slices will be applied to the first len(slices) dimensions.
copy: boolean
Specifies whether cropped data is to be copied or not.
Default: True
Returns
-------
cropped_img: img-like object
Cropped version of the input image
"""
img = check_img(image)
data = img.get_data()
affine = img.get_affine()
cropped_data = data[slices]
if copy:
cropped_data = cropped_data.copy()
linear_part = affine[:3, :3]
old_origin = affine[:3, 3]
new_origin_voxel = np.array([s.start for s in slices])
new_origin = old_origin + linear_part.dot(new_origin_voxel)
new_affine = np.eye(4)
new_affine[:3, :3] = linear_part
new_affine[:3, 3] = new_origin
new_img = nib.Nifti1Image(cropped_data, new_affine)
return new_img |
def crop_img(image, rtol=1e-8, copy=True):
"""Crops img as much as possible
Will crop img, removing as many zero entries as possible
without touching non-zero entries. Will leave one voxel of
zero padding around the obtained non-zero area in order to
avoid sampling issues later on.
Parameters
----------
image: img-like object or str
Can either be:
- a file path to a Nifti image
- any object with get_data() and get_affine() methods, e.g., nibabel.Nifti1Image.
If niimg is a string, consider it as a path to Nifti image and
call nibabel.load on it. If it is an object, check if get_data()
and get_affine() methods are present, raise TypeError otherwise.
Image to be cropped.
rtol: float
relative tolerance (with respect to maximal absolute
value of the image), under which values are considered
negligeable and thus croppable.
copy: boolean
Specifies whether cropped data is copied or not.
Returns
-------
cropped_img: image
Cropped version of the input image
"""
img = check_img(image)
data = img.get_data()
infinity_norm = max(-data.min(), data.max())
passes_threshold = np.logical_or(data < -rtol * infinity_norm,
data > rtol * infinity_norm)
if data.ndim == 4:
passes_threshold = np.any(passes_threshold, axis=-1)
coords = np.array(np.where(passes_threshold))
start = coords.min(axis=1)
end = coords.max(axis=1) + 1
# pad with one voxel to avoid resampling problems
start = np.maximum(start - 1, 0)
end = np.minimum(end + 1, data.shape[:3])
slices = [slice(s, e) for s, e in zip(start, end)]
return _crop_img_to(img, slices, copy=copy) |
def new_img_like(ref_niimg, data, affine=None, copy_header=False):
"""Create a new image of the same class as the reference image
Parameters
----------
ref_niimg: image
Reference image. The new image will be of the same type.
data: numpy array
Data to be stored in the image
affine: 4x4 numpy array, optional
Transformation matrix
copy_header: boolean, optional
Indicated if the header of the reference image should be used to
create the new image
Returns
-------
new_img: image
A loaded image with the same type (and header) as the reference image.
"""
# Hand-written loading code to avoid too much memory consumption
if not (hasattr(ref_niimg, 'get_data')
and hasattr(ref_niimg,'get_affine')):
if isinstance(ref_niimg, _basestring):
ref_niimg = nib.load(ref_niimg)
elif operator.isSequenceType(ref_niimg):
ref_niimg = nib.load(ref_niimg[0])
else:
raise TypeError(('The reference image should be a niimg, %r '
'was passed') % ref_niimg )
if affine is None:
affine = ref_niimg.get_affine()
if data.dtype == bool:
default_dtype = np.int8
if (LooseVersion(nib.__version__) >= LooseVersion('1.2.0') and
isinstance(ref_niimg, nib.freesurfer.mghformat.MGHImage)):
default_dtype = np.uint8
data = as_ndarray(data, dtype=default_dtype)
header = None
if copy_header:
header = copy.copy(ref_niimg.get_header())
header['scl_slope'] = 0.
header['scl_inter'] = 0.
header['glmax'] = 0.
header['cal_max'] = np.max(data) if data.size > 0 else 0.
header['cal_max'] = np.min(data) if data.size > 0 else 0.
return ref_niimg.__class__(data, affine, header=header) |
def save_variables_to_hdf5(file_path, variables, mode='w', h5path='/'):
"""
Parameters
----------
file_path: str
variables: dict
Dictionary with objects. Object name -> object
mode: str
HDF5 file access mode
See h5py documentation for details.
r Readonly, file must exist
r+ Read/write, file must exist
w Create file, truncate if exists
w- Create file, fail if exists
a Read/write if exists, create otherwise (default)
Notes
-----
It is recommended to use numpy arrays as objects.
List or tuples of strings won't work, convert them into numpy.arrays before.
"""
if not isinstance(variables, dict):
raise ValueError('Expected `variables` to be a dict, got a {}.'.format(type(variables)))
if not variables:
raise ValueError('Expected `variables` to be a non-empty dict.')
h5file = h5py.File(file_path, mode=mode)
h5group = h5file.require_group(h5path)
for vn in variables:
data = variables[vn]
# fix for string numpy arrays
if hasattr(data, 'dtype') and (data.dtype.type is np.string_ or data.dtype.type is np.unicode_):
dt = h5py.special_dtype(vlen=str)
data = data.astype(dt)
if isinstance(data, dict):
for key in data:
#h5group.create_dataset(str(key))
#import ipdb
#ipdb.set_trace()
h5group[str(key)] = data[key]
elif isinstance(data, list):
for idx, item in enumerate(data):
#h5group.create_dataset(str(idx))
h5group[str(idx)] = item
else:
h5group[vn] = data
h5file.close() |
def get_h5file(file_path, mode='r'):
""" Return the h5py.File given its file path.
Parameters
----------
file_path: string
HDF5 file path
mode: string
r Readonly, file must exist
r+ Read/write, file must exist
w Create file, truncate if exists
w- Create file, fail if exists
a Read/write if exists, create otherwise (default)
Returns
-------
h5file: h5py.File
"""
if not op.exists(file_path):
raise IOError('Could not find file {}.'.format(file_path))
try:
h5file = h5py.File(file_path, mode=mode)
except:
raise
else:
return h5file |
def extract_datasets(h5file, h5path='/'):
""" Return all dataset contents from h5path group in h5file in an OrderedDict.
Parameters
----------
h5file: h5py.File
HDF5 file object
h5path: str
HDF5 group path to read datasets from
Returns
-------
datasets: OrderedDict
Dict with variables contained in file_path/h5path
"""
if isinstance(h5file, str):
_h5file = h5py.File(h5file, mode='r')
else:
_h5file = h5file
_datasets = get_datasets(_h5file, h5path)
datasets = OrderedDict()
try:
for ds in _datasets:
datasets[ds.name.split('/')[-1]] = ds[:]
except:
raise RuntimeError('Error reading datasets in {}/{}.'.format(_h5file.filename, h5path))
finally:
if isinstance(h5file, str):
_h5file.close()
return datasets |
def _get_node_names(h5file, h5path='/', node_type=h5py.Dataset):
"""Return the node of type node_type names within h5path of h5file.
Parameters
----------
h5file: h5py.File
HDF5 file object
h5path: str
HDF5 group path to get the group names from
node_type: h5py object type
HDF5 object type
Returns
-------
names: list of str
List of names
"""
if isinstance(h5file, str):
_h5file = get_h5file(h5file, mode='r')
else:
_h5file = h5file
if not h5path.startswith('/'):
h5path = '/' + h5path
names = []
try:
h5group = _h5file.require_group(h5path)
for node in _hdf5_walk(h5group, node_type=node_type):
names.append(node.name)
except:
raise RuntimeError('Error getting node names from {}/{}.'.format(_h5file.filename, h5path))
finally:
if isinstance(h5file, str):
_h5file.close()
return names |
def mask(self, image):
""" self.mask setter
Parameters
----------
image: str or img-like object.
See NeuroImage constructor docstring.
"""
if image is None:
self._mask = None
try:
mask = load_mask(image)
except Exception as exc:
raise Exception('Could not load mask image {}.'.format(image)) from exc
else:
self._mask = mask |
def check_compatibility(self, one_img, another_img=None):
"""
Parameters
----------
one_img: str or img-like object.
See NeuroImage constructor docstring.
anoter_img: str or img-like object.
See NeuroImage constructor docstring.
If None will use the first image of self.images, if there is any.
Raises
------
NiftiFilesNotCompatible
If one_img and another_img aren't compatible.
ValueError
If another_img is None and there are no other images in this set.
"""
if another_img is None:
if len(self.items) > 0:
another_img = self.items[0]
else:
raise ValueError('self.items is empty, need an image to compare '
'with {}'.format(repr_imgs(one_img)))
try:
if self.all_compatible:
check_img_compatibility(one_img, another_img)
if self.mask is not None:
check_img_compatibility(one_img, self.mask, only_check_3d=True)
except:
raise |
def set_labels(self, labels):
"""
Parameters
----------
labels: list of int or str
This list will be checked to have the same size as
Raises
------
ValueError
if len(labels) != self.n_subjs
"""
if not isinstance(labels, string_types) and len(labels) != self.n_subjs:
raise ValueError('The number of given labels ({}) is not the same '
'as the number of subjects ({}).'.format(len(labels), self.n_subjs))
self.labels = labels |
def _load_images_and_labels(self, images, labels=None):
"""Read the images, load them into self.items and set the labels."""
if not isinstance(images, (list, tuple)):
raise ValueError('Expected an iterable (list or tuple) of strings or img-like objects. '
'Got a {}.'.format(type(images)))
if not len(images) > 0:
raise ValueError('Expected an iterable (list or tuple) of strings or img-like objects '
'of size higher than 0. Got {} items.'.format(len(images)))
if labels is not None and len(labels) != len(images):
raise ValueError('Expected the same length for image set ({}) and '
'labels list ({}).'.format(len(images), len(labels)))
first_file = images[0]
if first_file:
first_img = NeuroImage(first_file)
else:
raise('Error reading image {}.'.format(repr_imgs(first_file)))
for idx, image in enumerate(images):
try:
img = NeuroImage(image)
self.check_compatibility(img, first_img)
except:
log.exception('Error reading image {}.'.format(repr_imgs(image)))
raise
else:
self.items.append(img)
self.set_labels(labels) |
def to_matrix(self, smooth_fwhm=0, outdtype=None):
"""Return numpy.ndarray with the masked or flatten image data and
the relevant information (mask indices and volume shape).
Parameters
----------
smooth__fwhm: int
Integer indicating the size of the FWHM Gaussian smoothing kernel
to smooth the subject volumes before creating the data matrix
outdtype: dtype
Type of the elements of the array, if None will obtain the dtype from
the first nifti file.
Returns
-------
outmat, mask_indices, vol_shape
outmat: Numpy array with shape N x prod(vol.shape)
containing the N files as flat vectors.
mask_indices: matrix with indices of the voxels in the mask
vol_shape: Tuple with shape of the volumes, for reshaping.
"""
if not self.all_compatible:
raise ValueError("`self.all_compatible` must be True in order to use this function.")
if not outdtype:
outdtype = self.items[0].dtype
# extract some info from the mask
n_voxels = None
mask_indices = None
mask_shape = self.items[0].shape[:3]
if self.has_mask:
mask_arr = self.mask.get_data()
mask_indices = np.nonzero(mask_arr)
mask_shape = self.mask.shape
n_voxels = np.count_nonzero(mask_arr)
# if the mask is empty will use the whole image
if n_voxels is None:
log.debug('Non-zero voxels have not been found in mask {}'.format(self.mask))
n_voxels = np.prod(mask_shape)
mask_indices = None
# get the shape of the flattened subject data
ndims = self.items[0].ndim
if ndims == 3:
subj_flat_shape = (n_voxels, )
elif ndims == 4:
subj_flat_shape = (n_voxels, self.items[0].shape[3])
else:
raise NotImplementedError('The subject images have {} dimensions. '
'Still have not implemented t_matrix for this shape.'.format(ndims))
# create and fill the big matrix
outmat = np.zeros((self.n_subjs, ) + subj_flat_shape, dtype=outdtype)
try:
for i, image in enumerate(self.items):
if smooth_fwhm > 0:
image.fwhm = smooth_fwhm
if self.has_mask:
image.set_mask(self.mask)
outmat[i, :], _, _ = image.mask_and_flatten()
image.clear_data()
except Exception as exc:
raise Exception('Error flattening file {0}'.format(image)) from exc
else:
return outmat, mask_indices, mask_shape |
def to_file(self, output_file, smooth_fwhm=0, outdtype=None):
"""Save the Numpy array created from to_matrix function to the output_file.
Will save into the file: outmat, mask_indices, vol_shape and self.others (put here whatever you want)
data: Numpy array with shape N x prod(vol.shape)
containing the N files as flat vectors.
mask_indices: matrix with indices of the voxels in the mask
vol_shape: Tuple with shape of the volumes, for reshaping.
Parameters
----------
output_file: str
Path to the output file. The extension of the file will be taken into account for the file format.
Choices of extensions: '.pyshelf' or '.shelf' (Python shelve)
'.mat' (Matlab archive),
'.hdf5' or '.h5' (HDF5 file)
smooth_fwhm: int
Integer indicating the size of the FWHM Gaussian smoothing kernel
to smooth the subject volumes before creating the data matrix
outdtype: dtype
Type of the elements of the array, if None will obtain the dtype from
the first nifti file.
"""
outmat, mask_indices, mask_shape = self.to_matrix(smooth_fwhm, outdtype)
exporter = ExportData()
content = {'data': outmat,
'labels': self.labels,
'mask_indices': mask_indices,
'mask_shape': mask_shape, }
if self.others:
content.update(self.others)
log.debug('Creating content in file {}.'.format(output_file))
try:
exporter.save_variables(output_file, content)
except Exception as exc:
raise Exception('Error saving variables to file {}.'.format(output_file)) from exc |
def _init_subj_data(self, subj_files):
"""
Parameters
----------
subj_files: list or dict of str
file_path -> int/str
"""
try:
if isinstance(subj_files, list):
self.from_list(subj_files)
elif isinstance(subj_files, dict):
self.from_dict(subj_files)
else:
raise ValueError('Could not recognize subj_files argument variable type.')
except Exception as exc:
raise Exception('Cannot read subj_files input argument.') from exc |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.