Code
stringlengths 103
85.9k
| Summary
listlengths 0
94
|
---|---|
Please provide a description of the function:def auto_model(layout, scan_length=None, one_vs_rest=False):
'''Create a simple default model for each of the tasks in a BIDSLayout.
Contrasts each trial type against all other trial types and trial types
at the run level and then uses t-tests at each other level present to
aggregate these results up.
Args:
layout (BIDSLayout) A BIDSLayout instance
scan_length (Int) Scan length for loading event varibles in cases
where the scan length can not be read from the nifti.
Primarily for testing.
one_vs_rest (Bool) Set to True if you would like to autogenerate
contrasts of each trial type against everyother trialtype.
Returns:
models (list) list of model dictionaries for each task
'''
base_name = split(layout.root)[-1]
tasks = layout.entities['task'].unique()
task_models = []
for task_name in tasks:
# Populate model meta-data
model = OrderedDict()
model["Name"] = "_".join([base_name, task_name])
model["Description"] = ("Autogenerated model for the %s task from %s" %
(task_name, base_name))
model["Input"] = {"Task": task_name}
steps = []
# Make run level block
transformations = OrderedDict(Name='Factor', Input=['trial_type'])
run = OrderedDict(Level='Run', Name='Run',
Transformations=[transformations])
# Get trial types
run_nodes = load_variables(layout, task=task_name, levels=['run'],
scan_length=scan_length)
evs = []
for n in run_nodes.nodes:
evs.extend(n.variables['trial_type'].values.values)
trial_types = np.unique(evs)
trial_type_factors = ["trial_type." + tt for tt in trial_types]
# Add HRF
run['Transformations'].append(
OrderedDict(Name='Convolve', Input=trial_type_factors))
run_model = OrderedDict(X=trial_type_factors)
run["Model"] = run_model
if one_vs_rest:
# if there are multiple trial types, build contrasts
contrasts = []
for i, tt in enumerate(trial_types):
cdict = OrderedDict()
if len(trial_types) > 1:
cdict["Name"] = "run_" + tt + "_vs_others"
else:
cdict["Name"] = "run_" + tt
cdict["ConditionList"] = trial_type_factors
# Calculate weights for contrast
weights = np.ones(len(trial_types))
try:
weights[trial_types != tt] = -1.0 / (len(trial_types) - 1)
except ZeroDivisionError:
pass
cdict["Weights"] = list(weights)
cdict["Type"] = "t"
contrasts.append(cdict)
run["Contrasts"] = contrasts
steps.append(run)
if one_vs_rest:
# if there are multiple sessions, t-test run level contrasts at
# session level
sessions = layout.get_sessions()
if len(sessions) > 1:
# get contrasts names from previous block
contrast_names = [cc["Name"] for cc in steps[-1]["Contrasts"]]
steps.append(_make_passthrough_contrast("Session",
contrast_names))
subjects = layout.get_subjects()
if len(subjects) > 1:
# get contrasts names from previous block
contrast_names = [cc["Name"] for cc in steps[-1]["Contrasts"]]
steps.append(_make_passthrough_contrast("Subject",
contrast_names))
# get contrasts names from previous block
contrast_names = [cc["Name"] for cc in steps[-1]["Contrasts"]]
steps.append(_make_passthrough_contrast("Dataset",
contrast_names))
model["Steps"] = steps
task_models.append(model)
return task_models
|
[] |
Please provide a description of the function:def merge_variables(variables, name=None, **kwargs):
'''Merge/concatenate a list of variables along the row axis.
Parameters
----------
variables : :obj:`list`
A list of Variables to merge.
name : :obj:`str`
Optional name to assign to the output Variable. By default, uses the
same name as the input variables.
kwargs
Optional keyword arguments to pass onto the class-specific merge() call.
Possible args:
- sampling_rate (int, str): The sampling rate to use if resampling
of DenseRunVariables is necessary for harmonization. If 'auto',
the highest sampling rate found will be used. This argument is
only used when passing DenseRunVariables in the variables list.
Returns
-------
A single BIDSVariable of the same class as the input variables.
Notes
-----
- Currently, this function only support homogenously-typed lists. In
future, it may be extended to support implicit conversion.
- Variables in the list must all share the same name (i.e., it is not
possible to merge two different variables into a single variable.)
'''
classes = set([v.__class__ for v in variables])
if len(classes) > 1:
raise ValueError("Variables of different classes cannot be merged. "
"Variables passed are of classes: %s" % classes)
sources = set([v.source for v in variables])
if len(sources) > 1:
raise ValueError("Variables extracted from different types of files "
"cannot be merged. Sources found: %s" % sources)
return list(classes)[0].merge(variables, **kwargs)
|
[] |
Please provide a description of the function:def split(self, grouper):
''' Split the current SparseRunVariable into multiple columns.
Args:
grouper (iterable): list to groupby, where each unique value will
be taken as the name of the resulting column.
Returns:
A list of SparseRunVariables, one per unique value in the
grouper.
'''
data = self.to_df(condition=True, entities=True)
data = data.drop('condition', axis=1)
subsets = []
for i, (name, g) in enumerate(data.groupby(grouper)):
name = '%s.%s' % (self.name, name)
col = self.__class__(name=name, data=g, source=self.source,
run_info=getattr(self, 'run_info', None))
subsets.append(col)
return subsets
|
[] |
Please provide a description of the function:def select_rows(self, rows):
''' Truncate internal arrays to keep only the specified rows.
Args:
rows (array): An integer or boolean array identifying the indices
of rows to keep.
'''
self.values = self.values.iloc[rows]
self.index = self.index.iloc[rows, :]
for prop in self._property_columns:
vals = getattr(self, prop)[rows]
setattr(self, prop, vals)
|
[] |
Please provide a description of the function:def to_dense(self, sampling_rate):
''' Convert the current sparse column to a dense representation.
Returns: A DenseRunVariable.
Args:
sampling_rate (int, str): Sampling rate (in Hz) to use when
constructing the DenseRunVariable.
Returns:
A DenseRunVariable.
'''
duration = int(math.ceil(sampling_rate * self.get_duration()))
ts = np.zeros(duration, dtype=self.values.dtype)
onsets = np.round(self.onset * sampling_rate).astype(int)
durations = np.round(self.duration * sampling_rate).astype(int)
run_i, start, last_ind = 0, 0, 0
for i, val in enumerate(self.values.values):
if onsets[i] < last_ind:
start += self.run_info[run_i].duration * sampling_rate
run_i += 1
_onset = int(start + onsets[i])
_offset = int(_onset + durations[i])
if _onset >= duration:
warnings.warn("The onset time of a variable seems to exceed the runs"
"duration, hence runs are incremented by one internally.")
ts[_onset:_offset] = val
last_ind = onsets[i]
run_info = list(self.run_info)
return DenseRunVariable(
name=self.name,
values=ts,
run_info=run_info,
source=self.source,
sampling_rate=sampling_rate)
|
[] |
Please provide a description of the function:def split(self, grouper):
'''Split the current DenseRunVariable into multiple columns.
Parameters
----------
grouper : :obj:`pandas.DataFrame`
Binary DF specifying the design matrix to use for splitting. Number
of rows must match current ``DenseRunVariable``; a new ``DenseRunVariable``
will be generated for each column in the grouper.
Returns
-------
A list of DenseRunVariables, one per unique value in the grouper.
'''
values = grouper.values * self.values.values
df = pd.DataFrame(values, columns=grouper.columns)
return [DenseRunVariable(name='%s.%s' % (self.name, name),
values=df[name].values,
run_info=self.run_info,
source=self.source,
sampling_rate=self.sampling_rate)
for i, name in enumerate(df.columns)]
|
[] |
Please provide a description of the function:def _build_entity_index(self, run_info, sampling_rate):
''' Build the entity index from run information. '''
index = []
interval = int(round(1000. / sampling_rate))
_timestamps = []
for run in run_info:
reps = int(math.ceil(run.duration * sampling_rate))
ent_vals = list(run.entities.values())
df = pd.DataFrame([ent_vals] * reps, columns=list(run.entities.keys()))
ts = pd.date_range(0, periods=len(df), freq='%sms' % interval)
_timestamps.append(ts.to_series())
index.append(df)
self.timestamps = pd.concat(_timestamps, axis=0, sort=True)
return pd.concat(index, axis=0, sort=True).reset_index(drop=True)
|
[] |
Please provide a description of the function:def resample(self, sampling_rate, inplace=False, kind='linear'):
'''Resample the Variable to the specified sampling rate.
Parameters
----------
sampling_rate : :obj:`int`, :obj:`float`
Target sampling rate (in Hz).
inplace : :obj:`bool`, optional
If True, performs resampling in-place. If False, returns a resampled
copy of the current Variable. Default is False.
kind : {'linear', 'nearest', 'zero', 'slinear', 'quadratic', 'cubic'}
Argument to pass to :obj:`scipy.interpolate.interp1d`; indicates
the kind of interpolation approach to use. See interp1d docs for
valid values. Default is 'linear'.
'''
if not inplace:
var = self.clone()
var.resample(sampling_rate, True, kind)
return var
if sampling_rate == self.sampling_rate:
return
old_sr = self.sampling_rate
n = len(self.index)
self.index = self._build_entity_index(self.run_info, sampling_rate)
x = np.arange(n)
num = len(self.index)
from scipy.interpolate import interp1d
f = interp1d(x, self.values.values.ravel(), kind=kind)
x_new = np.linspace(0, n - 1, num=num)
self.values = pd.DataFrame(f(x_new))
assert len(self.values) == len(self.index)
self.sampling_rate = sampling_rate
|
[] |
Please provide a description of the function:def to_df(self, condition=True, entities=True, timing=True, sampling_rate=None):
'''Convert to a DataFrame, with columns for name and entities.
Parameters
----------
condition : :obj:`bool`
If True, adds a column for condition name, and names the amplitude
column 'amplitude'. If False, returns just onset, duration, and
amplitude, and gives the amplitude column the current column name.
entities : :obj:`bool`
If True, adds extra columns for all entities.
timing : :obj:`bool`
If True, includes onset and duration columns (even though events are
sampled uniformly). If False, omits them.
'''
if sampling_rate not in (None, self.sampling_rate):
return self.resample(sampling_rate).to_df(condition, entities)
df = super(DenseRunVariable, self).to_df(condition, entities)
if timing:
df['onset'] = self.timestamps.values.astype(float) / 1e+9
df['duration'] = 1. / self.sampling_rate
return df
|
[] |
Please provide a description of the function:def get_collections(self, unit, names=None, merge=False,
sampling_rate=None, **entities):
''' Retrieve variable data for a specified level in the Dataset.
Args:
unit (str): The unit of analysis to return variables for. Must be
one of 'run', 'session', 'subject', or 'dataset'.
names (list): Optional list of variables names to return. If
None, all available variables are returned.
merge (bool): If True, variables are merged across all observations
of the current unit. E.g., if unit='subject' and return_type=
'collection', variablesfrom all subjects will be merged into a
single collection. If False, each observation is handled
separately, and the result is returned as a list.
sampling_rate (int, str): If unit='run', the sampling rate to
pass onto the returned BIDSRunVariableCollection.
entities: Optional constraints used to limit what gets returned.
Returns:
'''
nodes = self.get_nodes(unit, entities)
var_sets = []
for n in nodes:
var_set = list(n.variables.values())
var_set = [v for v in var_set if v.matches_entities(entities)]
if names is not None:
var_set = [v for v in var_set if v.name in names]
# Additional filtering on Variables past run level, because their
# contents are extracted from TSV files containing rows from
# multiple observations
if unit != 'run':
var_set = [v.filter(entities) for v in var_set]
var_sets.append(var_set)
if merge:
var_sets = [list(chain(*var_sets))]
results = []
for vs in var_sets:
if not vs:
continue
if unit == 'run':
vs = clc.BIDSRunVariableCollection(vs, sampling_rate)
else:
vs = clc.BIDSVariableCollection(vs)
results.append(vs)
if merge:
return results[0] if results else None
return results
|
[] |
Please provide a description of the function:def get_or_create_node(self, level, entities, *args, **kwargs):
''' Retrieves a child Node based on the specified criteria, creating a
new Node if necessary.
Args:
entities (dict): Dictionary of entities specifying which Node to
return.
args, kwargs: Optional positional or named arguments to pass onto
class-specific initializers. These arguments are only used if
a Node that matches the passed entities doesn't already exist,
and a new one must be created.
Returns:
A Node instance.
'''
result = self.get_nodes(level, entities)
if result:
if len(result) > 1:
raise ValueError("More than one matching Node found! If you're"
" expecting more than one Node, use "
"get_nodes() instead of get_or_create_node()."
)
return result[0]
# Create Node
if level == 'run':
node = RunNode(entities, *args, **kwargs)
else:
node = Node(level, entities)
entities = dict(entities, node_index=len(self.nodes), level=level)
self.nodes.append(node)
node_row = pd.Series(entities)
self.index = self.index.append(node_row, ignore_index=True)
return node
|
[] |
Please provide a description of the function:def merge_collections(collections, force_dense=False, sampling_rate='auto'):
''' Merge two or more collections at the same level of analysis.
Args:
collections (list): List of Collections to merge.
sampling_rate (int, str): Sampling rate to use if it becomes necessary
to resample DenseRunVariables. Either an integer or 'auto' (see
merge_variables docstring for further explanation).
Returns:
A BIDSVariableCollection or BIDSRunVariableCollection, depending
on the type of the input collections.
'''
if len(listify(collections)) == 1:
return collections
levels = set([c.level for c in collections])
if len(levels) > 1:
raise ValueError("At the moment, it's only possible to merge "
"Collections at the same level of analysis. You "
"passed collections at levels: %s." % levels)
variables = list(chain(*[c.variables.values() for c in collections]))
cls = collections[0].__class__
variables = cls.merge_variables(variables, sampling_rate=sampling_rate)
if isinstance(collections[0], BIDSRunVariableCollection):
if sampling_rate == 'auto':
rates = [var.sampling_rate for var in variables
if isinstance(var, DenseRunVariable)]
sampling_rate = rates[0] if rates else None
return cls(variables, sampling_rate)
return cls(variables)
|
[] |
Please provide a description of the function:def merge_variables(variables, **kwargs):
''' Concatenates Variables along row axis.
Args:
variables (list): List of Variables to merge. Variables can have
different names (and all Variables that share a name will be
concatenated together).
Returns:
A list of Variables.
'''
var_dict = OrderedDict()
for v in variables:
if v.name not in var_dict:
var_dict[v.name] = []
var_dict[v.name].append(v)
return [merge_variables(vars_, **kwargs)
for vars_ in list(var_dict.values())]
|
[] |
Please provide a description of the function:def to_df(self, variables=None, format='wide', fillna=np.nan, **kwargs):
''' Merge variables into a single pandas DataFrame.
Args:
variables (list): Optional list of column names to retain; if None,
all variables are returned.
format (str): Whether to return a DataFrame in 'wide' or 'long'
format. In 'wide' format, each row is defined by a unique
onset/duration, and each variable is in a separate column. In
'long' format, each row is a unique combination of onset,
duration, and variable name, and a single 'amplitude' column
provides the value.
fillna: Replace missing values with the specified value.
kwargs: Optional keyword arguments to pass onto each Variable's
to_df() call (e.g., condition, entities, and timing).
Returns: A pandas DataFrame.
'''
if variables is None:
variables = list(self.variables.keys())
# Can receive already-selected Variables from sub-classes
if not isinstance(variables[0], BIDSVariable):
variables = [v for v in self.variables.values()
if v.name in variables]
dfs = [v.to_df(**kwargs) for v in variables]
df = pd.concat(dfs, axis=0, sort=True)
if format == 'long':
return df.reset_index(drop=True).fillna(fillna)
ind_cols = list(set(df.columns) - {'condition', 'amplitude'})
df['amplitude'] = df['amplitude'].fillna('n/a')
df = df.pivot_table(index=ind_cols, columns='condition',
values='amplitude', aggfunc='first')
df = df.reset_index().replace('n/a', fillna)
df.columns.name = None
return df
|
[] |
Please provide a description of the function:def from_df(cls, data, entities=None, source='contrast'):
''' Create a Collection from a pandas DataFrame.
Args:
df (DataFrame): The DataFrame to convert to a Collection. Each
column will be converted to a SimpleVariable.
entities (DataFrame): An optional second DataFrame containing
entity information.
source (str): The value to set as the source for all Variables.
Returns:
A BIDSVariableCollection.
'''
variables = []
for col in data.columns:
_data = pd.DataFrame(data[col].values, columns=['amplitude'])
if entities is not None:
_data = pd.concat([_data, entities], axis=1, sort=True)
variables.append(SimpleVariable(name=col, data=_data, source=source))
return BIDSVariableCollection(variables)
|
[] |
Please provide a description of the function:def clone(self):
''' Returns a shallow copy of the current instance, except that all
variables are deep-cloned.
'''
clone = copy(self)
clone.variables = {k: v.clone() for (k, v) in self.variables.items()}
return clone
|
[] |
Please provide a description of the function:def _index_entities(self):
''' Sets current instance's entities based on the existing index.
Note: Only entity key/value pairs common to all rows in all contained
Variables are returned. E.g., if a Collection contains Variables
extracted from runs 1, 2 and 3 from subject '01', the returned dict
will be {'subject': '01'}; the runs will be excluded as they vary
across the Collection contents.
'''
all_ents = pd.DataFrame.from_records(
[v.entities for v in self.variables.values()])
constant = all_ents.apply(lambda x: x.nunique() == 1)
if constant.empty:
self.entities = {}
else:
keep = all_ents.columns[constant]
ents = {k: all_ents[k].dropna().iloc[0] for k in keep}
self.entities = {k: v for k, v in ents.items() if pd.notnull(v)}
|
[] |
Please provide a description of the function:def match_variables(self, pattern, return_type='name'):
''' Return columns whose names match the provided regex pattern.
Args:
pattern (str): A regex pattern to match all variable names against.
return_type (str): What to return. Must be one of:
'name': Returns a list of names of matching variables.
'variable': Returns a list of Variable objects whose names
match.
'''
pattern = re.compile(pattern)
vars_ = [v for v in self.variables.values() if pattern.search(v.name)]
return vars_ if return_type.startswith('var') \
else [v.name for v in vars_]
|
[] |
Please provide a description of the function:def resample(self, sampling_rate=None, variables=None, force_dense=False,
in_place=False, kind='linear'):
''' Resample all dense variables (and optionally, sparse ones) to the
specified sampling rate.
Args:
sampling_rate (int, float): Target sampling rate (in Hz). If None,
uses the instance sampling rate.
variables (list): Optional list of Variables to resample. If None,
all variables are resampled.
force_dense (bool): if True, all sparse variables will be forced to
dense.
in_place (bool): When True, all variables are overwritten in-place.
When False, returns resampled versions of all variables.
kind (str): Argument to pass to scipy's interp1d; indicates the
kind of interpolation approach to use. See interp1d docs for
valid values.
'''
# Store old sampling rate-based variables
sampling_rate = sampling_rate or self.sampling_rate
_variables = {}
for name, var in self.variables.items():
if variables is not None and name not in variables:
continue
if isinstance(var, SparseRunVariable):
if force_dense and is_numeric_dtype(var.values):
_variables[name] = var.to_dense(sampling_rate)
else:
# None if in_place; no update needed
_var = var.resample(sampling_rate,
inplace=in_place,
kind=kind)
if not in_place:
_variables[name] = _var
if in_place:
for k, v in _variables.items():
self.variables[k] = v
self.sampling_rate = sampling_rate
else:
return _variables
|
[] |
Please provide a description of the function:def to_df(self, variables=None, format='wide', sparse=True,
sampling_rate=None, include_sparse=True, include_dense=True,
**kwargs):
''' Merge columns into a single pandas DataFrame.
Args:
variables (list): Optional list of variable names to retain;
if None, all variables are written out.
format (str): Whether to return a DataFrame in 'wide' or 'long'
format. In 'wide' format, each row is defined by a unique
onset/duration, and each variable is in a separate column. In
'long' format, each row is a unique combination of onset,
duration, and variable name, and a single 'amplitude' column
provides the value.
sparse (bool): If True, variables will be kept in a sparse
format provided they are all internally represented as such.
If False, a dense matrix (i.e., uniform sampling rate for all
events) will be exported. Will be ignored if at least one
variable is dense.
sampling_rate (float): If a dense matrix is written out, the
sampling rate (in Hz) to use for downsampling. Defaults to the
value currently set in the instance.
kwargs: Optional keyword arguments to pass onto each Variable's
to_df() call (e.g., condition, entities, and timing).
include_sparse (bool): Whether or not to include sparse Variables.
include_dense (bool): Whether or not to include dense Variables.
Returns: A pandas DataFrame.
'''
if not include_sparse and not include_dense:
raise ValueError("You can't exclude both dense and sparse "
"variables! That leaves nothing!")
if variables is None:
variables = list(self.variables.keys())
if not include_sparse:
variables = [v for v in variables if
isinstance(self.variables[v], DenseRunVariable)]
if not include_dense:
variables = [v for v in variables if not
isinstance(self.variables[v], DenseRunVariable)]
if not variables:
return None
_vars = [self.variables[v] for v in variables]
if sparse and all(isinstance(v, SimpleVariable) for v in _vars):
variables = _vars
else:
sampling_rate = sampling_rate or self.sampling_rate
# Make sure all variables have the same sampling rate
variables = list(self.resample(sampling_rate, variables,
force_dense=True,
in_place=False).values())
return super(BIDSRunVariableCollection, self).to_df(variables, format,
**kwargs)
|
[] |
Please provide a description of the function:def _transform(self, var):
''' Rename happens automatically in the base class, so all we need to
do is unset the original variable in the collection. '''
self.collection.variables.pop(var.name)
return var.values
|
[] |
Please provide a description of the function:def replace_entities(entities, pattern):
ents = re.findall(r'\{(.*?)\}', pattern)
new_path = pattern
for ent in ents:
match = re.search(r'([^|<]+)(<.*?>)?(\|.*)?', ent)
if match is None:
return None
name, valid, default = match.groups()
default = default[1:] if default is not None else default
if name in entities and valid is not None:
ent_val = str(entities[name])
if not re.match(valid[1:-1], ent_val):
if default is None:
return None
entities[name] = default
ent_val = entities.get(name, default)
if ent_val is None:
return None
new_path = new_path.replace('{%s}' % ent, str(ent_val))
return new_path
|
[
"\n Replaces all entity names in a given pattern with the corresponding\n values provided by entities.\n\n Args:\n entities (dict): A dictionary mapping entity names to entity values.\n pattern (str): A path pattern that contains entity names denoted\n by curly braces. Optional portions denoted by square braces.\n For example: 'sub-{subject}/[var-{name}/]{id}.csv'\n Accepted entity values, using regex matching, denoted within angle\n brackets.\n For example: 'sub-{subject<01|02>}/{task}.csv'\n\n Returns:\n A new string with the entity values inserted where entity names\n were denoted in the provided pattern.\n "
] |
Please provide a description of the function:def build_path(entities, path_patterns, strict=False):
path_patterns = listify(path_patterns)
# Loop over available patherns, return first one that matches all
for pattern in path_patterns:
# If strict, all entities must be contained in the pattern
if strict:
defined = re.findall(r'\{(.*?)(?:<[^>]+>)?\}', pattern)
if set(entities.keys()) - set(defined):
continue
# Iterate through the provided path patterns
new_path = pattern
optional_patterns = re.findall(r'\[(.*?)\]', pattern)
# First build from optional patterns if possible
for optional_pattern in optional_patterns:
optional_chunk = replace_entities(entities, optional_pattern) or ''
new_path = new_path.replace('[%s]' % optional_pattern,
optional_chunk)
# Replace remaining entities
new_path = replace_entities(entities, new_path)
if new_path:
return new_path
return None
|
[
"\n Constructs a path given a set of entities and a list of potential\n filename patterns to use.\n\n Args:\n entities (dict): A dictionary mapping entity names to entity values.\n path_patterns (str, list): One or more filename patterns to write\n the file to. Entities should be represented by the name\n surrounded by curly braces. Optional portions of the patterns\n should be denoted by square brackets. Entities that require a\n specific value for the pattern to match can pass them inside\n carets. Default values can be assigned by specifying a string after\n the pipe operator. E.g., (e.g., {type<image>|bold} would only match\n the pattern if the entity 'type' was passed and its value is\n \"image\", otherwise the default value \"bold\" will be used).\n Example 1: 'sub-{subject}/[var-{name}/]{id}.csv'\n Result 2: 'sub-01/var-SES/1045.csv'\n strict (bool): If True, all passed entities must be matched inside a\n pattern in order to be a valid match. If False, extra entities will\n be ignored so long as all mandatory entities are found.\n\n Returns:\n A constructed path for this file based on the provided patterns.\n "
] |
Please provide a description of the function:def write_contents_to_file(path, contents=None, link_to=None,
content_mode='text', root=None, conflicts='fail'):
if root is None and not isabs(path):
root = os.getcwd()
if root:
path = join(root, path)
if exists(path) or islink(path):
if conflicts == 'fail':
msg = 'A file at path {} already exists.'
raise ValueError(msg.format(path))
elif conflicts == 'skip':
msg = 'A file at path {} already exists, skipping writing file.'
logging.warn(msg.format(path))
return
elif conflicts == 'overwrite':
if isdir(path):
logging.warn('New path is a directory, not going to '
'overwrite it, skipping instead.')
return
os.remove(path)
elif conflicts == 'append':
i = 1
while i < sys.maxsize:
path_splits = splitext(path)
path_splits[0] = path_splits[0] + '_%d' % i
appended_filename = os.extsep.join(path_splits)
if not exists(appended_filename) and \
not islink(appended_filename):
path = appended_filename
break
i += 1
else:
raise ValueError('Did not provide a valid conflicts parameter')
if not exists(dirname(path)):
os.makedirs(dirname(path))
if link_to:
os.symlink(link_to, path)
elif contents:
mode = 'wb' if content_mode == 'binary' else 'w'
with open(path, mode) as f:
f.write(contents)
else:
raise ValueError('One of contents or link_to must be provided.')
|
[
"\n Uses provided filename patterns to write contents to a new path, given\n a corresponding entity map.\n\n Args:\n path (str): Destination path of the desired contents.\n contents (str): Raw text or binary encoded string of contents to write\n to the new path.\n link_to (str): Optional path with which to create a symbolic link to.\n Used as an alternative to and takes priority over the contents\n argument.\n content_mode (str): Either 'text' or 'binary' to indicate the writing\n mode for the new file. Only relevant if contents is provided.\n root (str): Optional root directory that all patterns are relative\n to. Defaults to current working directory.\n conflicts (str): One of 'fail', 'skip', 'overwrite', or 'append'\n that defines the desired action when the output path already\n exists. 'fail' raises an exception; 'skip' does nothing;\n 'overwrite' overwrites the existing file; 'append' adds a suffix\n to each file copy, starting with 1. Default is 'fail'.\n "
] |
Please provide a description of the function:def generate(self, **kwargs):
descriptions = []
subjs = self.layout.get_subjects(**kwargs)
kwargs = {k: v for k, v in kwargs.items() if k != 'subject'}
for sid in subjs:
descriptions.append(self._report_subject(subject=sid, **kwargs))
counter = Counter(descriptions)
print('Number of patterns detected: {0}'.format(len(counter.keys())))
print(utils.reminder())
return counter
|
[
"Generate the methods section.\n\n Parameters\n ----------\n task_converter : :obj:`dict`, optional\n A dictionary with information for converting task names from BIDS\n filename format to human-readable strings.\n\n Returns\n -------\n counter : :obj:`collections.Counter`\n A dictionary of unique descriptions across subjects in the dataset,\n along with the number of times each pattern occurred. In cases\n where all subjects underwent the same protocol, the most common\n pattern is most likely the most complete. In cases where the\n dataset contains multiple protocols, each pattern will need to be\n inspected manually.\n\n Examples\n --------\n >>> from os.path import join\n >>> from bids.layout import BIDSLayout\n >>> from bids.reports import BIDSReport\n >>> from bids.tests import get_test_data_path\n >>> layout = BIDSLayout(join(get_test_data_path(), 'synthetic'))\n >>> report = BIDSReport(layout)\n >>> counter = report.generate(session='01')\n >>> counter.most_common()[0][0]\n "
] |
Please provide a description of the function:def _report_subject(self, subject, **kwargs):
description_list = []
# Remove sess from kwargs if provided, else set sess as all available
sessions = kwargs.pop('session',
self.layout.get_sessions(subject=subject,
**kwargs))
if not sessions:
sessions = [None]
elif not isinstance(sessions, list):
sessions = [sessions]
for ses in sessions:
niftis = self.layout.get(subject=subject, extensions='nii.gz',
**kwargs)
if niftis:
description_list.append('For session {0}:'.format(ses))
description_list += parsing.parse_niftis(self.layout, niftis,
subject, self.config,
session=ses)
metadata = self.layout.get_metadata(niftis[0].path)
else:
raise Exception('No niftis for subject {0}'.format(subject))
# Assume all data were converted the same way and use the last nifti
# file's json for conversion information.
if 'metadata' not in vars():
raise Exception('No valid jsons found. Cannot generate final '
'paragraph.')
description = '\n\t'.join(description_list)
description = description.replace('\tFor session', '\nFor session')
description += '\n\n{0}'.format(parsing.final_paragraph(metadata))
return description
|
[
"Write a report for a single subject.\n\n Parameters\n ----------\n subject : :obj:`str`\n Subject ID.\n\n Attributes\n ----------\n layout : :obj:`bids.layout.BIDSLayout`\n Layout object for a BIDS dataset.\n config : :obj:`dict`\n Configuration info for methods generation.\n\n Returns\n -------\n description : :obj:`str`\n A publication-ready report of the dataset's data acquisition\n information. Each scan type is given its own paragraph.\n "
] |
Please provide a description of the function:def _gamma_difference_hrf(tr, oversampling=50, time_length=32., onset=0.,
delay=6, undershoot=16., dispersion=1.,
u_dispersion=1., ratio=0.167):
from scipy.stats import gamma
dt = tr / oversampling
time_stamps = np.linspace(0, time_length, np.rint(float(time_length) / dt).astype(np.int))
time_stamps -= onset
hrf = gamma.pdf(time_stamps, delay / dispersion, dt / dispersion) -\
ratio * gamma.pdf(
time_stamps, undershoot / u_dispersion, dt / u_dispersion)
hrf /= hrf.sum()
return hrf
|
[
" Compute an hrf as the difference of two gamma functions\n\n Parameters\n ----------\n\n tr : float\n scan repeat time, in seconds\n\n oversampling : int, optional (default=16)\n temporal oversampling factor\n\n time_length : float, optional (default=32)\n hrf kernel length, in seconds\n\n onset: float\n onset time of the hrf\n\n delay: float, optional\n delay parameter of the hrf (in s.)\n\n undershoot: float, optional\n undershoot parameter of the hrf (in s.)\n\n dispersion : float, optional\n dispersion parameter for the first gamma function\n\n u_dispersion : float, optional\n dispersion parameter for the second gamma function\n\n ratio : float, optional\n ratio of the two gamma components\n\n Returns\n -------\n hrf : array of shape(length / tr * oversampling, dtype=float)\n hrf sampling on the oversampled time grid\n "
] |
Please provide a description of the function:def spm_hrf(tr, oversampling=50, time_length=32., onset=0.):
return _gamma_difference_hrf(tr, oversampling, time_length, onset)
|
[
" Implementation of the SPM hrf model\n\n Parameters\n ----------\n tr : float\n scan repeat time, in seconds\n\n oversampling : int, optional\n temporal oversampling factor\n\n time_length : float, optional\n hrf kernel length, in seconds\n\n onset : float, optional\n hrf onset time, in seconds\n\n Returns\n -------\n hrf: array of shape(length / tr * oversampling, dtype=float)\n hrf sampling on the oversampled time grid\n "
] |
Please provide a description of the function:def glover_hrf(tr, oversampling=50, time_length=32., onset=0.):
return _gamma_difference_hrf(tr, oversampling, time_length, onset,
delay=6, undershoot=12., dispersion=.9,
u_dispersion=.9, ratio=.35)
|
[
" Implementation of the Glover hrf model\n\n Parameters\n ----------\n tr : float\n scan repeat time, in seconds\n\n oversampling : int, optional\n temporal oversampling factor\n\n time_length : float, optional\n hrf kernel length, in seconds\n\n onset : float, optional\n onset of the response\n\n Returns\n -------\n hrf: array of shape(length / tr * oversampling, dtype=float)\n hrf sampling on the oversampled time grid\n "
] |
Please provide a description of the function:def spm_time_derivative(tr, oversampling=50, time_length=32., onset=0.):
do = .1
dhrf = 1. / do * (spm_hrf(tr, oversampling, time_length, onset) -
spm_hrf(tr, oversampling, time_length, onset + do))
return dhrf
|
[
"Implementation of the SPM time derivative hrf (dhrf) model\n\n Parameters\n ----------\n tr: float\n scan repeat time, in seconds\n\n oversampling: int, optional\n temporal oversampling factor, optional\n\n time_length: float, optional\n hrf kernel length, in seconds\n\n onset: float, optional\n onset of the response in seconds\n\n Returns\n -------\n dhrf: array of shape(length / tr, dtype=float)\n dhrf sampling on the provided grid\n "
] |
Please provide a description of the function:def glover_time_derivative(tr, oversampling=50, time_length=32., onset=0.):
do = .1
dhrf = 1. / do * (glover_hrf(tr, oversampling, time_length, onset) -
glover_hrf(tr, oversampling, time_length, onset + do))
return dhrf
|
[
"Implementation of the Glover time derivative hrf (dhrf) model\n\n Parameters\n ----------\n tr: float\n scan repeat time, in seconds\n oversampling: int,\n temporal oversampling factor, optional\n time_length: float,\n hrf kernel length, in seconds\n onset: float,\n onset of the response\n\n Returns\n -------\n dhrf: array of shape(length / tr), dtype=float\n dhrf sampling on the provided grid\n "
] |
Please provide a description of the function:def spm_dispersion_derivative(tr, oversampling=50, time_length=32., onset=0.):
dd = .01
dhrf = 1. / dd * (
- _gamma_difference_hrf(tr, oversampling, time_length,
onset, dispersion=1. + dd)
+ _gamma_difference_hrf(tr, oversampling, time_length, onset))
return dhrf
|
[
"Implementation of the SPM dispersion derivative hrf model\n\n Parameters\n ----------\n tr: float\n scan repeat time, in seconds\n\n oversampling: int, optional\n temporal oversampling factor in seconds\n\n time_length: float, optional\n hrf kernel length, in seconds\n\n onset : float, optional\n onset of the response in seconds\n\n Returns\n -------\n dhrf: array of shape(length / tr * oversampling), dtype=float\n dhrf sampling on the oversampled time grid\n "
] |
Please provide a description of the function:def _sample_condition(exp_condition, frame_times, oversampling=50,
min_onset=-24):
# Find the high-resolution frame_times
n = frame_times.size
min_onset = float(min_onset)
n_hr = ((n - 1) * 1. / (frame_times.max() - frame_times.min()) *
(frame_times.max() * (1 + 1. / (n - 1)) - frame_times.min() -
min_onset) * oversampling) + 1
hr_frame_times = np.linspace(frame_times.min() + min_onset,
frame_times.max() * (1 + 1. / (n - 1)),
np.rint(n_hr).astype(np.int))
# Get the condition information
onsets, durations, values = tuple(map(np.asanyarray, exp_condition))
if (onsets < frame_times[0] + min_onset).any():
warnings.warn(('Some stimulus onsets are earlier than %s in the'
' experiment and are thus not considered in the model'
% (frame_times[0] + min_onset)), UserWarning)
# Set up the regressor timecourse
tmax = len(hr_frame_times)
regressor = np.zeros_like(hr_frame_times).astype(np.float)
t_onset = np.minimum(np.searchsorted(hr_frame_times, onsets), tmax - 1)
regressor[t_onset] += values
t_offset = np.minimum(
np.searchsorted(hr_frame_times, onsets + durations),
tmax - 1)
# Handle the case where duration is 0 by offsetting at t + 1
for i, t in enumerate(t_offset):
if t < (tmax - 1) and t == t_onset[i]:
t_offset[i] += 1
regressor[t_offset] -= values
regressor = np.cumsum(regressor)
return regressor, hr_frame_times
|
[
"Make a possibly oversampled event regressor from condition information.\n\n Parameters\n ----------\n exp_condition : arraylike of shape (3, n_events)\n yields description of events for this condition as a\n (onsets, durations, amplitudes) triplet\n\n frame_times : array of shape(n_scans)\n sample time points\n\n over_sampling : int, optional\n factor for oversampling event regressor\n\n min_onset : float, optional\n minimal onset relative to frame_times[0] (in seconds)\n events that start before frame_times[0] + min_onset are not considered\n\n Returns\n -------\n regressor: array of shape(over_sampling * n_scans)\n possibly oversampled event regressor\n hr_frame_times : array of shape(over_sampling * n_scans)\n time points used for regressor sampling\n "
] |
Please provide a description of the function:def _resample_regressor(hr_regressor, hr_frame_times, frame_times):
from scipy.interpolate import interp1d
f = interp1d(hr_frame_times, hr_regressor)
return f(frame_times).T
|
[
" this function sub-samples the regressors at frame times\n\n Parameters\n ----------\n hr_regressor : array of shape(n_samples),\n the regressor time course sampled at high temporal resolution\n\n hr_frame_times : array of shape(n_samples),\n the corresponding time stamps\n\n frame_times: array of shape(n_scans),\n the desired time stamps\n\n Returns\n -------\n regressor: array of shape(n_scans)\n the resampled regressor\n "
] |
Please provide a description of the function:def _orthogonalize(X):
if X.size == X.shape[0]:
return X
from scipy.linalg import pinv, norm
for i in range(1, X.shape[1]):
X[:, i] -= np.dot(np.dot(X[:, i], X[:, :i]), pinv(X[:, :i]))
# X[:, i] /= norm(X[:, i])
return X
|
[
" Orthogonalize every column of design `X` w.r.t preceding columns\n\n Parameters\n ----------\n X: array of shape(n, p)\n the data to be orthogonalized\n\n Returns\n -------\n X: array of shape(n, p)\n the data after orthogonalization\n\n Notes\n -----\n X is changed in place. The columns are not normalized\n "
] |
Please provide a description of the function:def _regressor_names(con_name, hrf_model, fir_delays=None):
if hrf_model in ['glover', 'spm', None]:
return [con_name]
elif hrf_model in ["glover + derivative", 'spm + derivative']:
return [con_name, con_name + "_derivative"]
elif hrf_model in ['spm + derivative + dispersion',
'glover + derivative + dispersion']:
return [con_name, con_name + "_derivative", con_name + "_dispersion"]
elif hrf_model == 'fir':
return [con_name + "_delay_%d" % i for i in fir_delays]
|
[
" Returns a list of regressor names, computed from con-name and hrf type\n\n Parameters\n ----------\n con_name: string\n identifier of the condition\n\n hrf_model: string or None,\n hrf model chosen\n\n fir_delays: 1D array_like, optional,\n Delays used in case of an FIR model\n\n Returns\n -------\n names: list of strings,\n regressor names\n "
] |
Please provide a description of the function:def _hrf_kernel(hrf_model, tr, oversampling=50, fir_delays=None):
acceptable_hrfs = [
'spm', 'spm + derivative', 'spm + derivative + dispersion', 'fir',
'glover', 'glover + derivative', 'glover + derivative + dispersion',
None]
if hrf_model == 'spm':
hkernel = [spm_hrf(tr, oversampling)]
elif hrf_model == 'spm + derivative':
hkernel = [spm_hrf(tr, oversampling),
spm_time_derivative(tr, oversampling)]
elif hrf_model == 'spm + derivative + dispersion':
hkernel = [spm_hrf(tr, oversampling),
spm_time_derivative(tr, oversampling),
spm_dispersion_derivative(tr, oversampling)]
elif hrf_model == 'glover':
hkernel = [glover_hrf(tr, oversampling)]
elif hrf_model == 'glover + derivative':
hkernel = [glover_hrf(tr, oversampling),
glover_time_derivative(tr, oversampling)]
elif hrf_model == 'glover + derivative + dispersion':
hkernel = [glover_hrf(tr, oversampling),
glover_time_derivative(tr, oversampling),
glover_dispersion_derivative(tr, oversampling)]
elif hrf_model == 'fir':
hkernel = [np.hstack((np.zeros(f * oversampling),
np.ones(oversampling)))
for f in fir_delays]
elif hrf_model is None:
hkernel = [np.hstack((1, np.zeros(oversampling - 1)))]
else:
raise ValueError('"{0}" is not a known hrf model. Use one of {1}'.
format(hrf_model, acceptable_hrfs))
return hkernel
|
[
" Given the specification of the hemodynamic model and time parameters,\n return the list of matching kernels\n\n Parameters\n ----------\n hrf_model : string or None,\n identifier of the hrf model\n\n tr : float\n the repetition time in seconds\n\n oversampling : int, optional\n temporal oversampling factor to have a smooth hrf\n\n fir_delays : list of floats,\n list of delays for finite impulse response models\n\n Returns\n -------\n hkernel : list of arrays\n samples of the hrf (the number depends on the hrf_model used)\n "
] |
Please provide a description of the function:def compute_regressor(exp_condition, hrf_model, frame_times, con_id='cond',
oversampling=50, fir_delays=None, min_onset=-24):
# this is the average tr in this session, not necessarily the true tr
tr = float(frame_times.max()) / (np.size(frame_times) - 1)
# 1. create the high temporal resolution regressor
hr_regressor, hr_frame_times = _sample_condition(
exp_condition, frame_times, oversampling, min_onset)
# 2. create the hrf model(s)
hkernel = _hrf_kernel(hrf_model, tr, oversampling, fir_delays)
# 3. convolve the regressor and hrf, and downsample the regressor
conv_reg = np.array([np.convolve(hr_regressor, h)[:hr_regressor.size]
for h in hkernel])
# 4. temporally resample the regressors
computed_regressors = _resample_regressor(
conv_reg, hr_frame_times, frame_times)
# 5. ortogonalize the regressors
if hrf_model != 'fir':
computed_regressors = _orthogonalize(computed_regressors)
# 6 generate regressor names
reg_names = _regressor_names(con_id, hrf_model, fir_delays=fir_delays)
return computed_regressors, reg_names
|
[
" This is the main function to convolve regressors with hrf model\n\n Parameters\n ----------\n exp_condition : array-like of shape (3, n_events)\n yields description of events for this condition as a\n (onsets, durations, amplitudes) triplet\n\n hrf_model : {'spm', 'spm + derivative', 'spm + derivative + dispersion',\n 'glover', 'glover + derivative', 'fir', None}\n Name of the hrf model to be used\n\n frame_times : array of shape (n_scans)\n the desired sampling times\n\n con_id : string\n optional identifier of the condition\n\n oversampling : int, optional\n oversampling factor to perform the convolution\n\n fir_delays : 1D-array-like, optional\n delays (in seconds) used in case of a finite impulse reponse model\n\n min_onset : float, optional\n minimal onset relative to frame_times[0] (in seconds)\n events that start before frame_times[0] + min_onset are not considered\n\n Returns\n -------\n computed_regressors: array of shape(n_scans, n_reg)\n computed regressors sampled at frame times\n\n reg_names: list of strings\n corresponding regressor names\n\n Notes\n -----\n The different hemodynamic models can be understood as follows:\n 'spm': this is the hrf model used in SPM\n 'spm + derivative': SPM model plus its time derivative (2 regressors)\n 'spm + time + dispersion': idem, plus dispersion derivative (3 regressors)\n 'glover': this one corresponds to the Glover hrf\n 'glover + derivative': the Glover hrf + time derivative (2 regressors)\n 'glover + derivative + dispersion': idem + dispersion derivative\n (3 regressors)\n 'fir': finite impulse response basis, a set of delayed dirac models\n with arbitrary length. This one currently assumes regularly spaced\n frame times (i.e. fixed time of repetition).\n It is expected that spm standard and Glover model would not yield\n large differences in most cases.\n\n In case of glover and spm models, the derived regressors are\n orthogonalized wrt the main one.\n "
] |
Please provide a description of the function:def matches_entities(obj, entities, strict=False):
''' Checks whether an object's entities match the input. '''
if strict and set(obj.entities.keys()) != set(entities.keys()):
return False
comm_ents = list(set(obj.entities.keys()) & set(entities.keys()))
for k in comm_ents:
current = obj.entities[k]
target = entities[k]
if isinstance(target, (list, tuple)):
if current not in target:
return False
elif current != target:
return False
return True
|
[] |
Please provide a description of the function:def natural_sort(l, field=None):
'''
based on snippet found at http://stackoverflow.com/a/4836734/2445984
'''
convert = lambda text: int(text) if text.isdigit() else text.lower()
def alphanum_key(key):
if field is not None:
key = getattr(key, field)
if not isinstance(key, str):
key = str(key)
return [convert(c) for c in re.split('([0-9]+)', key)]
return sorted(l, key=alphanum_key)
|
[] |
Please provide a description of the function:def convert_JSON(j):
def camel_to_snake(s):
a = re.compile('((?<=[a-z0-9])[A-Z]|(?!^)[A-Z](?=[a-z]))')
return a.sub(r'_\1', s).lower()
def convertArray(a):
newArr = []
for i in a:
if isinstance(i,list):
newArr.append(convertArray(i))
elif isinstance(i, dict):
newArr.append(convert_JSON(i))
else:
newArr.append(i)
return newArr
out = {}
for k, value in j.items():
newK = camel_to_snake(k)
if isinstance(value, dict):
out[newK] = convert_JSON(value)
elif isinstance(value, list):
out[newK] = convertArray(value)
else:
out[newK] = value
return out
|
[
" Recursively convert CamelCase keys to snake_case.\n From: https://stackoverflow.com/questions/17156078/converting-identifier-naming-between-camelcase-and-underscores-during-json-seria\n "
] |
Please provide a description of the function:def splitext(path):
li = []
path_without_extensions = os.path.join(os.path.dirname(path),
os.path.basename(path).split(os.extsep)[0])
extensions = os.path.basename(path).split(os.extsep)[1:]
li.append(path_without_extensions)
# li.append(extensions) if you want extensions in another list inside the list that is returned.
li.extend(extensions)
return li
|
[
"splitext for paths with directories that may contain dots.\n From https://stackoverflow.com/questions/5930036/separating-file-extensions-using-python-os-path-module"
] |
Please provide a description of the function:def check_path_matches_patterns(path, patterns):
''' Check if the path matches at least one of the provided patterns. '''
path = os.path.abspath(path)
for patt in patterns:
if isinstance(patt, six.string_types):
if path == patt:
return True
elif patt.search(path):
return True
return False
|
[] |
Please provide a description of the function:def count(self, files=False):
return len(self.files) if files else len(self.unique())
|
[
" Returns a count of unique values or files.\n\n Args:\n files (bool): When True, counts all files mapped to the Entity.\n When False, counts all unique values.\n Returns: an int.\n "
] |
Please provide a description of the function:def _matches(self, entities=None, extensions=None, regex_search=False):
if extensions is not None:
extensions = map(re.escape, listify(extensions))
extensions = '(' + '|'.join(extensions) + ')$'
if re.search(extensions, self.filename) is None:
return False
if entities is None:
return True
for name, val in entities.items():
if (name not in self.entities) ^ (val is None):
return False
if val is None:
continue
def make_patt(x):
patt = str(x)
if not regex_search:
patt = re.escape(patt)
if isinstance(x, (int, float)):
# allow for leading zeros if a number was specified
# regardless of regex_search
patt = '0*' + patt
if not regex_search:
patt = '^{}$'.format(patt)
return patt
ent_patts = [make_patt(x) for x in listify(val)]
patt = '|'.join(ent_patts)
if re.search(patt, str(self.entities[name])) is None:
return False
return True
|
[
"\n Checks whether the file matches all of the passed entities and\n extensions.\n\n Args:\n entities (dict): A dictionary of entity names -> regex patterns.\n extensions (str, list): One or more file extensions to allow.\n regex_search (bool): Whether to require exact match (False) or\n regex search (True) when comparing the query string to each\n entity.\n Returns:\n True if _all_ entities and extensions match; False otherwise.\n "
] |
Please provide a description of the function:def _get_child_class(self, path):
if self._child_entity is None:
return BIDSNode
for i, child_ent in enumerate(listify(self._child_entity)):
template = self.available_entities[child_ent].directory
if template is None:
return BIDSNode
template = self.root_path + template
# Construct regex search pattern from target directory template
to_rep = re.findall(r'\{(.*?)\}', template)
for ent in to_rep:
patt = self.available_entities[ent].pattern
template = template.replace('{%s}' % ent, patt)
template += r'[^\%s]*$' % os.path.sep
if re.match(template, path):
return listify(self._child_class)[i]
return BIDSNode
|
[
" Return the appropriate child class given a subdirectory path.\n \n Args:\n path (str): The path to the subdirectory.\n \n Returns: An uninstantiated BIDSNode or one of its subclasses.\n "
] |
Please provide a description of the function:def index(self):
config_list = self.config
layout = self.layout
for (dirpath, dirnames, filenames) in os.walk(self.path):
# If layout configuration file exists, delete it
layout_file = self.layout.config_filename
if layout_file in filenames:
filenames.remove(layout_file)
for f in filenames:
abs_fn = os.path.join(self.path, f)
# Skip files that fail validation, unless forcibly indexing
if not self.force_index and not layout._validate_file(abs_fn):
continue
bf = BIDSFile(abs_fn, self)
# Extract entity values
match_vals = {}
for e in self.available_entities.values():
m = e.match_file(bf)
if m is None and e.mandatory:
break
if m is not None:
match_vals[e.name] = (e, m)
# Create Entity <=> BIDSFile mappings
if match_vals:
for name, (ent, val) in match_vals.items():
bf.entities[name] = val
ent.add_file(bf.path, val)
self.files.append(bf)
# Also add to the Layout's master list
self.layout.files[bf.path] = bf
root_node = self if self.root is None else self.root
for d in dirnames:
d = os.path.join(dirpath, d)
# Derivative directories must always be added separately and
# passed as their own root, so terminate if passed.
if d.startswith(os.path.join(self.layout.root, 'derivatives')):
continue
# Skip directories that fail validation, unless force_index
# is defined, in which case we have to keep scanning, in the
# event that a file somewhere below the current level matches.
# Unfortunately we probably can't do much better than this
# without a lot of additional work, because the elements of
# .force_index can be SRE_Patterns that match files below in
# unpredictable ways.
if check_path_matches_patterns(d, self.layout.force_index):
self.force_index = True
else:
valid_dir = layout._validate_dir(d)
# Note the difference between self.force_index and
# self.layout.force_index.
if not valid_dir and not self.layout.force_index:
continue
child_class = self._get_child_class(d)
# TODO: filter the config files based on include/exclude rules
child = child_class(d, config_list, root_node, self,
force_index=self.force_index)
if self.force_index or valid_dir:
self.children.append(child)
# prevent subdirectory traversal
break
|
[
" Index all files/directories below the current BIDSNode. "
] |
Please provide a description of the function:def general_acquisition_info(metadata):
out_str = ('MR data were acquired using a {tesla}-Tesla {manu} {model} '
'MRI scanner.')
out_str = out_str.format(tesla=metadata.get('MagneticFieldStrength',
'UNKNOWN'),
manu=metadata.get('Manufacturer', 'MANUFACTURER'),
model=metadata.get('ManufacturersModelName',
'MODEL'))
return out_str
|
[
"\n General sentence on data acquisition. Should be first sentence in MRI data\n acquisition section.\n\n Parameters\n ----------\n metadata : :obj:`dict`\n The metadata for the dataset.\n\n Returns\n -------\n out_str : :obj:`str`\n Output string with scanner information.\n "
] |
Please provide a description of the function:def func_info(task, n_runs, metadata, img, config):
if metadata.get('MultibandAccelerationFactor', 1) > 1:
mb_str = '; MB factor={}'.format(metadata['MultibandAccelerationFactor'])
else:
mb_str = ''
if metadata.get('ParallelReductionFactorInPlane', 1) > 1:
pr_str = ('; in-plane acceleration factor='
'{}'.format(metadata['ParallelReductionFactorInPlane']))
else:
pr_str = ''
if 'SliceTiming' in metadata.keys():
so_str = ' in {0} order'.format(get_slice_info(metadata['SliceTiming']))
else:
so_str = ''
if 'EchoTime' in metadata.keys():
if isinstance(metadata['EchoTime'], list):
te = [num_to_str(t*1000) for t in metadata['EchoTime']]
te_temp = ', '.join(te[:-1])
te_temp += ', and {}'.format(te[-1])
te = te_temp
me_str = 'multi-echo '
else:
te = num_to_str(metadata['EchoTime']*1000)
me_str = 'single-echo '
else:
te = 'UNKNOWN'
me_str = 'UNKNOWN-echo'
task_name = metadata.get('TaskName', task+' task')
seqs, variants = get_seqstr(config, metadata)
n_slices, vs_str, ms_str, fov_str = get_sizestr(img)
tr = metadata['RepetitionTime']
n_tps = img.shape[3]
run_secs = math.ceil(n_tps * tr)
mins, secs = divmod(run_secs, 60)
length = '{0}:{1:02.0f}'.format(int(mins), int(secs))
if n_runs == 1:
run_str = '{0} run'.format(num2words(n_runs).title())
else:
run_str = '{0} runs'.format(num2words(n_runs).title())
desc = '''
{run_str} of {task} {variants} {seqs} {me_str} fMRI data were
collected ({n_slices} slices{so_str}; repetition time, TR={tr}ms;
echo time, TE={te}ms; flip angle, FA={fa}<deg>;
field of view, FOV={fov}mm; matrix size={ms};
voxel size={vs}mm{mb_str}{pr_str}).
Each run was {length} minutes in length, during which
{n_vols} functional volumes were acquired.
'''.format(run_str=run_str,
task=task_name,
variants=variants,
seqs=seqs,
me_str=me_str,
n_slices=n_slices,
so_str=so_str,
tr=num_to_str(tr*1000),
te=te,
fa=metadata.get('FlipAngle', 'UNKNOWN'),
vs=vs_str,
fov=fov_str,
ms=ms_str,
length=length,
n_vols=n_tps,
mb_str=mb_str,
pr_str=pr_str
)
desc = desc.replace('\n', ' ').lstrip()
while ' ' in desc:
desc = desc.replace(' ', ' ')
return desc
|
[
"\n Generate a paragraph describing T2*-weighted functional scans.\n\n Parameters\n ----------\n task : :obj:`str`\n The name of the task.\n n_runs : :obj:`int`\n The number of runs acquired for this task.\n metadata : :obj:`dict`\n The metadata for the scan from the json associated with the scan.\n img : :obj:`nibabel.Nifti1Image`\n Image corresponding to one of the runs.\n config : :obj:`dict`\n A dictionary with relevant information regarding sequences, sequence\n variants, phase encoding directions, and task names.\n\n Returns\n -------\n desc : :obj:`str`\n A description of the scan's acquisition information.\n "
] |
Please provide a description of the function:def anat_info(suffix, metadata, img, config):
n_slices, vs_str, ms_str, fov_str = get_sizestr(img)
seqs, variants = get_seqstr(config, metadata)
if 'EchoTime' in metadata.keys():
te = num_to_str(metadata['EchoTime']*1000)
else:
te = 'UNKNOWN'
desc = '''
{suffix} {variants} {seqs} structural MRI data were collected
({n_slices} slices; repetition time, TR={tr}ms;
echo time, TE={te}ms; flip angle, FA={fa}<deg>;
field of view, FOV={fov}mm; matrix size={ms}; voxel size={vs}mm).
'''.format(suffix=suffix,
variants=variants,
seqs=seqs,
n_slices=n_slices,
tr=num_to_str(metadata['RepetitionTime']*1000),
te=te,
fa=metadata.get('FlipAngle', 'UNKNOWN'),
vs=vs_str,
fov=fov_str,
ms=ms_str,
)
desc = desc.replace('\n', ' ').lstrip()
while ' ' in desc:
desc = desc.replace(' ', ' ')
return desc
|
[
"\n Generate a paragraph describing T1- and T2-weighted structural scans.\n\n Parameters\n ----------\n suffix : :obj:`str`\n T1 or T2.\n metadata : :obj:`dict`\n Data from the json file associated with the scan, in dictionary\n form.\n img : :obj:`nibabel.Nifti1Image`\n The nifti image of the scan.\n config : :obj:`dict`\n A dictionary with relevant information regarding sequences, sequence\n variants, phase encoding directions, and task names.\n\n Returns\n -------\n desc : :obj:`str`\n A description of the scan's acquisition information.\n "
] |
Please provide a description of the function:def dwi_info(bval_file, metadata, img, config):
# Parse bval file
with open(bval_file, 'r') as file_object:
d = file_object.read().splitlines()
bvals = [item for sublist in [l.split(' ') for l in d] for item in sublist]
bvals = sorted([int(v) for v in set(bvals)])
bvals = [str(v) for v in bvals]
if len(bvals) == 1:
bval_str = bvals[0]
elif len(bvals) == 2:
bval_str = ' and '.join(bvals)
else:
bval_str = ', '.join(bvals[:-1])
bval_str += ', and {0}'.format(bvals[-1])
if metadata.get('MultibandAccelerationFactor', 1) > 1:
mb_str = '; MB factor={0}'.format(metadata['MultibandAccelerationFactor'])
else:
mb_str = ''
if 'SliceTiming' in metadata.keys():
so_str = ' in {0} order'.format(get_slice_info(metadata['SliceTiming']))
else:
so_str = ''
if 'EchoTime' in metadata.keys():
te = num_to_str(metadata['EchoTime']*1000)
else:
te = 'UNKNOWN'
n_slices, vs_str, ms_str, fov_str = get_sizestr(img)
n_vecs = img.shape[3]
seqs, variants = get_seqstr(config, metadata)
desc = '''
One run of {variants} {seqs} diffusion-weighted (dMRI) data were collected
({n_slices} slices{so_str}; repetition time, TR={tr}ms;
echo time, TE={te}ms; flip angle, FA={fa}<deg>;
field of view, FOV={fov}mm; matrix size={ms}; voxel size={vs}mm;
b-values of {bval_str} acquired;
{n_vecs} diffusion directions{mb_str}).
'''.format(variants=variants,
seqs=seqs,
n_slices=n_slices,
so_str=so_str,
tr=num_to_str(metadata['RepetitionTime']*1000),
te=te,
fa=metadata.get('FlipAngle', 'UNKNOWN'),
vs=vs_str,
fov=fov_str,
ms=ms_str,
bval_str=bval_str,
n_vecs=n_vecs,
mb_str=mb_str
)
desc = desc.replace('\n', ' ').lstrip()
while ' ' in desc:
desc = desc.replace(' ', ' ')
return desc
|
[
"\n Generate a paragraph describing DWI scan acquisition information.\n\n Parameters\n ----------\n bval_file : :obj:`str`\n File containing b-vals associated with DWI scan.\n metadata : :obj:`dict`\n Data from the json file associated with the DWI scan, in dictionary\n form.\n img : :obj:`nibabel.Nifti1Image`\n The nifti image of the DWI scan.\n config : :obj:`dict`\n A dictionary with relevant information regarding sequences, sequence\n variants, phase encoding directions, and task names.\n\n Returns\n -------\n desc : :obj:`str`\n A description of the DWI scan's acquisition information.\n "
] |
Please provide a description of the function:def fmap_info(metadata, img, config, layout):
dir_ = config['dir'][metadata['PhaseEncodingDirection']]
n_slices, vs_str, ms_str, fov_str = get_sizestr(img)
seqs, variants = get_seqstr(config, metadata)
if 'EchoTime' in metadata.keys():
te = num_to_str(metadata['EchoTime']*1000)
else:
te = 'UNKNOWN'
if 'IntendedFor' in metadata.keys():
scans = metadata['IntendedFor']
run_dict = {}
for scan in scans:
fn = basename(scan)
iff_file = [f for f in layout.get(extensions='nii.gz') if fn in f.path][0]
run_num = int(iff_file.run)
ty = iff_file.entities['suffix'].upper()
if ty == 'BOLD':
iff_meta = layout.get_metadata(iff_file.path)
task = iff_meta.get('TaskName', iff_file.entities['task'])
ty_str = '{0} {1} scan'.format(task, ty)
else:
ty_str = '{0} scan'.format(ty)
if ty_str not in run_dict.keys():
run_dict[ty_str] = []
run_dict[ty_str].append(run_num)
for scan in run_dict.keys():
run_dict[scan] = [num2words(r, ordinal=True) for r in sorted(run_dict[scan])]
out_list = []
for scan in run_dict.keys():
if len(run_dict[scan]) > 1:
s = 's'
else:
s = ''
run_str = list_to_str(run_dict[scan])
string = '{rs} run{s} of the {sc}'.format(rs=run_str,
s=s,
sc=scan)
out_list.append(string)
for_str = ' for the {0}'.format(list_to_str(out_list))
else:
for_str = ''
desc = '''
A {variants} {seqs} field map (phase encoding:
{dir_}; {n_slices} slices; repetition time, TR={tr}ms;
echo time, TE={te}ms; flip angle, FA={fa}<deg>;
field of view, FOV={fov}mm; matrix size={ms};
voxel size={vs}mm) was acquired{for_str}.
'''.format(variants=variants,
seqs=seqs,
dir_=dir_,
for_str=for_str,
n_slices=n_slices,
tr=num_to_str(metadata['RepetitionTime']*1000),
te=te,
fa=metadata.get('FlipAngle', 'UNKNOWN'),
vs=vs_str,
fov=fov_str,
ms=ms_str)
desc = desc.replace('\n', ' ').lstrip()
while ' ' in desc:
desc = desc.replace(' ', ' ')
return desc
|
[
"\n Generate a paragraph describing field map acquisition information.\n\n Parameters\n ----------\n metadata : :obj:`dict`\n Data from the json file associated with the field map, in dictionary\n form.\n img : :obj:`nibabel.Nifti1Image`\n The nifti image of the field map.\n config : :obj:`dict`\n A dictionary with relevant information regarding sequences, sequence\n variants, phase encoding directions, and task names.\n\n Returns\n -------\n desc : :obj:`str`\n A description of the field map's acquisition information.\n "
] |
Please provide a description of the function:def final_paragraph(metadata):
if 'ConversionSoftware' in metadata.keys():
soft = metadata['ConversionSoftware']
vers = metadata['ConversionSoftwareVersion']
software_str = ' using {soft} ({conv_vers})'.format(soft=soft, conv_vers=vers)
else:
software_str = ''
desc = '''
Dicoms were converted to NIfTI-1 format{software_str}.
This section was (in part) generated
automatically using pybids ({meth_vers}).
'''.format(software_str=software_str,
meth_vers=__version__)
desc = desc.replace('\n', ' ').lstrip()
while ' ' in desc:
desc = desc.replace(' ', ' ')
return desc
|
[
"\n Describes dicom-to-nifti conversion process and methods generation.\n\n Parameters\n ----------\n metadata : :obj:`dict`\n The metadata for the scan.\n\n Returns\n -------\n desc : :obj:`str`\n Output string with scanner information.\n "
] |
Please provide a description of the function:def parse_niftis(layout, niftis, subj, config, **kwargs):
kwargs = {k: v for k, v in kwargs.items() if v is not None}
description_list = []
skip_task = {} # Only report each task once
for nifti_struct in niftis:
nii_file = nifti_struct.path
metadata = layout.get_metadata(nii_file)
if not metadata:
LOGGER.warning('No json file found for %s', nii_file)
else:
import nibabel as nib
img = nib.load(nii_file)
# Assume all data were acquired the same way.
if not description_list:
description_list.append(general_acquisition_info(metadata))
if nifti_struct.entities['datatype'] == 'func':
if not skip_task.get(nifti_struct.entities['task'], False):
echos = layout.get_echoes(subject=subj, extensions='nii.gz',
task=nifti_struct.entities['task'],
**kwargs)
n_echos = len(echos)
if n_echos > 0:
metadata['EchoTime'] = []
for echo in sorted(echos):
echo_struct = layout.get(subject=subj, echo=echo,
extensions='nii.gz',
task=nifti_struct.entities['task'],
**kwargs)[0]
echo_file = echo_struct.path
echo_meta = layout.get_metadata(echo_file)
metadata['EchoTime'].append(echo_meta['EchoTime'])
n_runs = len(layout.get_runs(subject=subj,
task=nifti_struct.entities['task'],
**kwargs))
description_list.append(func_info(nifti_struct.entities['task'],
n_runs, metadata, img,
config))
skip_task[nifti_struct.entities['task']] = True
elif nifti_struct.entities['datatype'] == 'anat':
suffix = nifti_struct.entities['suffix']
if suffix.endswith('w'):
suffix = suffix[:-1] + '-weighted'
description_list.append(anat_info(suffix, metadata, img,
config))
elif nifti_struct.entities['datatype'] == 'dwi':
bval_file = nii_file.replace('.nii.gz', '.bval')
description_list.append(dwi_info(bval_file, metadata, img,
config))
elif nifti_struct.entities['datatype'] == 'fmap':
description_list.append(fmap_info(metadata, img, config,
layout))
return description_list
|
[
"\n Loop through niftis in a BIDSLayout and generate the appropriate description\n type for each scan. Compile all of the descriptions into a list.\n\n Parameters\n ----------\n layout : :obj:`bids.layout.BIDSLayout`\n Layout object for a BIDS dataset.\n niftis : :obj:`list` or :obj:`grabbit.core.File`\n List of nifti files in layout corresponding to subject/session combo.\n subj : :obj:`str`\n Subject ID.\n config : :obj:`dict`\n Configuration info for methods generation.\n "
] |
Please provide a description of the function:def track_pageview(self, name, url, duration=0, properties=None, measurements=None):
data = channel.contracts.PageViewData()
data.name = name or NULL_CONSTANT_STRING
data.url = url
data.duration = duration
if properties:
data.properties = properties
if measurements:
data.measurements = measurements
self.track(data, self._context)
|
[
"Send information about the page viewed in the application (a web page for instance).\n\n Args:\n name (str). the name of the page that was viewed.\\n\n url (str). the URL of the page that was viewed.\\n\n duration (int). the duration of the page view in milliseconds. (defaults to: 0)\\n\n properties (dict). the set of custom properties the client wants attached to this data item. (defaults to: None)\\n\n measurements (dict). the set of custom measurements the client wants to attach to this data item. (defaults to: None)\n "
] |
Please provide a description of the function:def track_exception(self, type=None, value=None, tb=None, properties=None, measurements=None):
if not type or not value or not tb:
type, value, tb = sys.exc_info()
if not type or not value or not tb:
try:
raise Exception(NULL_CONSTANT_STRING)
except:
type, value, tb = sys.exc_info()
details = channel.contracts.ExceptionDetails()
details.id = 1
details.outer_id = 0
details.type_name = type.__name__
details.message = str(value)
details.has_full_stack = True
counter = 0
for tb_frame_file, tb_frame_line, tb_frame_function, tb_frame_text in traceback.extract_tb(tb):
frame = channel.contracts.StackFrame()
frame.assembly = 'Unknown'
frame.file_name = tb_frame_file
frame.level = counter
frame.line = tb_frame_line
frame.method = tb_frame_function
details.parsed_stack.append(frame)
counter += 1
details.parsed_stack.reverse()
data = channel.contracts.ExceptionData()
data.handled_at = 'UserCode'
data.exceptions.append(details)
if properties:
data.properties = properties
if measurements:
data.measurements = measurements
self.track(data, self._context)
|
[
" Send information about a single exception that occurred in the application.\n\n Args:\n type (Type). the type of the exception that was thrown.\\n\n value (:class:`Exception`). the exception that the client wants to send.\\n\n tb (:class:`Traceback`). the traceback information as returned by :func:`sys.exc_info`.\\n\n properties (dict). the set of custom properties the client wants attached to this data item. (defaults to: None)\\n\n measurements (dict). the set of custom measurements the client wants to attach to this data item. (defaults to: None)\n "
] |
Please provide a description of the function:def track_event(self, name, properties=None, measurements=None):
data = channel.contracts.EventData()
data.name = name or NULL_CONSTANT_STRING
if properties:
data.properties = properties
if measurements:
data.measurements = measurements
self.track(data, self._context)
|
[
" Send information about a single event that has occurred in the context of the application.\n\n Args:\n name (str). the data to associate to this event.\\n\n properties (dict). the set of custom properties the client wants attached to this data item. (defaults to: None)\\n\n measurements (dict). the set of custom measurements the client wants to attach to this data item. (defaults to: None)\n "
] |
Please provide a description of the function:def track_metric(self, name, value, type=None, count=None, min=None, max=None, std_dev=None, properties=None):
dataPoint = channel.contracts.DataPoint()
dataPoint.name = name or NULL_CONSTANT_STRING
dataPoint.value = value or 0
dataPoint.kind = type or channel.contracts.DataPointType.aggregation
dataPoint.count = count
dataPoint.min = min
dataPoint.max = max
dataPoint.std_dev = std_dev
data = channel.contracts.MetricData()
data.metrics.append(dataPoint)
if properties:
data.properties = properties
self.track(data, self._context)
|
[
"Send information about a single metric data point that was captured for the application.\n\n Args:\n name (str). the name of the metric that was captured.\\n\n value (float). the value of the metric that was captured.\\n\n type (:class:`channel.contracts.DataPointType`). the type of the metric. (defaults to: :func:`channel.contracts.DataPointType.aggregation`)\\n\n count (int). the number of metrics that were aggregated into this data point. (defaults to: None)\\n\n min (float). the minimum of all metrics collected that were aggregated into this data point. (defaults to: None)\\n\n max (float). the maximum of all metrics collected that were aggregated into this data point. (defaults to: None)\\n\n std_dev (float). the standard deviation of all metrics collected that were aggregated into this data point. (defaults to: None)\\n\n properties (dict). the set of custom properties the client wants attached to this data item. (defaults to: None)\n "
] |
Please provide a description of the function:def track_trace(self, name, properties=None, severity=None):
data = channel.contracts.MessageData()
data.message = name or NULL_CONSTANT_STRING
if properties:
data.properties = properties
if severity is not None:
data.severity_level = channel.contracts.MessageData.PYTHON_LOGGING_LEVELS.get(severity)
self.track(data, self._context)
|
[
"Sends a single trace statement.\n\n Args:\n name (str). the trace statement.\\n\n properties (dict). the set of custom properties the client wants attached to this data item. (defaults to: None)\\n\n severity (str). the severity level of this trace, one of DEBUG, INFO, WARNING, ERROR, CRITICAL\n "
] |
Please provide a description of the function:def track_request(self, name, url, success, start_time=None, duration=None, response_code=None, http_method=None, properties=None, measurements=None, request_id=None):
data = channel.contracts.RequestData()
data.id = request_id or str(uuid.uuid4())
data.name = name
data.url = url
data.success = success
data.start_time = start_time or datetime.datetime.utcnow().isoformat() + 'Z'
data.duration = self.__ms_to_duration(duration)
data.response_code = str(response_code) or '200'
data.http_method = http_method or 'GET'
if properties:
data.properties = properties
if measurements:
data.measurements = measurements
self.track(data, self._context)
|
[
"Sends a single request that was captured for the application.\n\n Args:\n name (str). the name for this request. All requests with the same name will be grouped together.\\n\n url (str). the actual URL for this request (to show in individual request instances).\\n\n success (bool). true if the request ended in success, false otherwise.\\n\n start_time (str). the start time of the request. The value should look the same as the one returned by :func:`datetime.isoformat()` (defaults to: None)\\n\n duration (int). the number of milliseconds that this request lasted. (defaults to: None)\\n\n response_code (str). the response code that this request returned. (defaults to: None)\\n\n http_method (str). the HTTP method that triggered this request. (defaults to: None)\\n\n properties (dict). the set of custom properties the client wants attached to this data item. (defaults to: None)\\n\n measurements (dict). the set of custom measurements the client wants to attach to this data item. (defaults to: None)\\n\n request_id (str). the id for this request. If None, a new uuid will be generated. (defaults to: None)\n "
] |
Please provide a description of the function:def track_dependency(self, name, data, type=None, target=None, duration=None, success=None, result_code=None, properties=None, measurements=None, dependency_id=None):
dependency_data = channel.contracts.RemoteDependencyData()
dependency_data.id = dependency_id or str(uuid.uuid4())
dependency_data.name = name
dependency_data.data = data
dependency_data.type = type
dependency_data.target = target
dependency_data.duration = self.__ms_to_duration(duration)
dependency_data.success = success
dependency_data.result_code = str(result_code) or '200'
if properties:
dependency_data.properties = properties
if measurements:
dependency_data.measurements = measurements
self.track(dependency_data, self._context)
|
[
"Sends a single dependency telemetry that was captured for the application.\n\n Args:\n name (str). the name of the command initiated with this dependency call. Low cardinality value. Examples are stored procedure name and URL path template.\\n\n data (str). the command initiated by this dependency call. Examples are SQL statement and HTTP URL with all query parameters.\\n\n type (str). the dependency type name. Low cardinality value for logical grouping of dependencies and interpretation of other fields like commandName and resultCode. Examples are SQL, Azure table, and HTTP. (default to: None)\\n\n target (str). the target site of a dependency call. Examples are server name, host address. (default to: None)\\n\n duration (int). the number of milliseconds that this dependency call lasted. (defaults to: None)\\n\n success (bool). true if the dependency call ended in success, false otherwise. (defaults to: None)\\n\n result_code (str). the result code of a dependency call. Examples are SQL error code and HTTP status code. (defaults to: None)\\n\n properties (dict). the set of custom properties the client wants attached to this data item. (defaults to: None)\\n\n measurements (dict). the set of custom measurements the client wants to attach to this data item. (defaults to: None)\\n\n id (str). the id for this dependency call. If None, a new uuid will be generated. (defaults to: None)\n "
] |
Please provide a description of the function:def severity_level(self, value):
if value == self._defaults['severityLevel'] and 'severityLevel' in self._values:
del self._values['severityLevel']
else:
self._values['severityLevel'] = value
|
[
"The severity_level property.\n \n Args:\n value (int). the property value.\n "
] |
Please provide a description of the function:def problem_id(self, value):
if value == self._defaults['problemId'] and 'problemId' in self._values:
del self._values['problemId']
else:
self._values['problemId'] = value
|
[
"The problem_id property.\n \n Args:\n value (string). the property value.\n "
] |
Please provide a description of the function:def properties(self):
if 'properties' in self._values:
return self._values['properties']
self._values['properties'] = copy.deepcopy(self._defaults['properties'])
return self._values['properties']
|
[
"The properties property.\n \n Returns:\n (hash). the property value. (defaults to: {})\n "
] |
Please provide a description of the function:def properties(self, value):
if value == self._defaults['properties'] and 'properties' in self._values:
del self._values['properties']
else:
self._values['properties'] = value
|
[
"The properties property.\n \n Args:\n value (hash). the property value.\n "
] |
Please provide a description of the function:def measurements(self):
if 'measurements' in self._values:
return self._values['measurements']
self._values['measurements'] = copy.deepcopy(self._defaults['measurements'])
return self._values['measurements']
|
[
"The measurements property.\n \n Returns:\n (hash). the property value. (defaults to: {})\n "
] |
Please provide a description of the function:def measurements(self, value):
if value == self._defaults['measurements'] and 'measurements' in self._values:
del self._values['measurements']
else:
self._values['measurements'] = value
|
[
"The measurements property.\n \n Args:\n value (hash). the property value.\n "
] |
Please provide a description of the function:def id(self, value):
if value == self._defaults['ai.device.id'] and 'ai.device.id' in self._values:
del self._values['ai.device.id']
else:
self._values['ai.device.id'] = value
|
[
"The id property.\n \n Args:\n value (string). the property value.\n "
] |
Please provide a description of the function:def locale(self, value):
if value == self._defaults['ai.device.locale'] and 'ai.device.locale' in self._values:
del self._values['ai.device.locale']
else:
self._values['ai.device.locale'] = value
|
[
"The locale property.\n \n Args:\n value (string). the property value.\n "
] |
Please provide a description of the function:def model(self, value):
if value == self._defaults['ai.device.model'] and 'ai.device.model' in self._values:
del self._values['ai.device.model']
else:
self._values['ai.device.model'] = value
|
[
"The model property.\n \n Args:\n value (string). the property value.\n "
] |
Please provide a description of the function:def oem_name(self, value):
if value == self._defaults['ai.device.oemName'] and 'ai.device.oemName' in self._values:
del self._values['ai.device.oemName']
else:
self._values['ai.device.oemName'] = value
|
[
"The oem_name property.\n \n Args:\n value (string). the property value.\n "
] |
Please provide a description of the function:def os_version(self, value):
if value == self._defaults['ai.device.osVersion'] and 'ai.device.osVersion' in self._values:
del self._values['ai.device.osVersion']
else:
self._values['ai.device.osVersion'] = value
|
[
"The os_version property.\n \n Args:\n value (string). the property value.\n "
] |
Please provide a description of the function:def put(self, item):
QueueBase.put(self, item)
if self.sender:
self.sender.start()
|
[
"Adds the passed in item object to the queue and notifies the :func:`sender` to start an asynchronous\n send operation by calling :func:`start`.\n\n Args:\n item (:class:`contracts.Envelope`) the telemetry envelope object to send to the service.\n "
] |
Please provide a description of the function:def flush(self):
self._flush_notification.set()
if self.sender:
self.sender.start()
|
[
"Flushes the current queue by notifying the :func:`sender` via the :func:`flush_notification` event.\n "
] |
Please provide a description of the function:def role(self, value):
if value == self._defaults['ai.cloud.role'] and 'ai.cloud.role' in self._values:
del self._values['ai.cloud.role']
else:
self._values['ai.cloud.role'] = value
|
[
"The role property.\n \n Args:\n value (string). the property value.\n "
] |
Please provide a description of the function:def role_instance(self, value):
if value == self._defaults['ai.cloud.roleInstance'] and 'ai.cloud.roleInstance' in self._values:
del self._values['ai.cloud.roleInstance']
else:
self._values['ai.cloud.roleInstance'] = value
|
[
"The role_instance property.\n \n Args:\n value (string). the property value.\n "
] |
Please provide a description of the function:def send(self, data_to_send):
request_payload = json.dumps([ a.write() for a in data_to_send ])
request = HTTPClient.Request(self._service_endpoint_uri, bytearray(request_payload, 'utf-8'), { 'Accept': 'application/json', 'Content-Type' : 'application/json; charset=utf-8' })
try:
response = HTTPClient.urlopen(request, timeout=self._timeout)
status_code = response.getcode()
if 200 <= status_code < 300:
return
except HTTPError as e:
if e.getcode() == 400:
return
except Exception as e:
pass
# Add our unsent data back on to the queue
for data in data_to_send:
self._queue.put(data)
|
[
" Immediately sends the data passed in to :func:`service_endpoint_uri`. If the service request fails, the\n passed in items are pushed back to the :func:`queue`.\n\n Args:\n data_to_send (Array): an array of :class:`contracts.Envelope` objects to send to the service.\n "
] |
Please provide a description of the function:def dummy_client(reason):
sender = applicationinsights.channel.NullSender()
queue = applicationinsights.channel.SynchronousQueue(sender)
channel = applicationinsights.channel.TelemetryChannel(None, queue)
return applicationinsights.TelemetryClient("00000000-0000-0000-0000-000000000000", channel)
|
[
"Creates a dummy channel so even if we're not logging telemetry, we can still send\n along the real object to things that depend on it to exist"
] |
Please provide a description of the function:def enable(instrumentation_key, *args, **kwargs):
if not instrumentation_key:
raise Exception('Instrumentation key was required but not provided')
global original_excepthook
global telemetry_channel
telemetry_channel = kwargs.get('telemetry_channel')
if not original_excepthook:
original_excepthook = sys.excepthook
sys.excepthook = intercept_excepthook
if instrumentation_key not in enabled_instrumentation_keys:
enabled_instrumentation_keys.append(instrumentation_key)
|
[
"Enables the automatic collection of unhandled exceptions. Captured exceptions will be sent to the Application\n Insights service before being re-thrown. Multiple calls to this function with different instrumentation keys result\n in multiple instances being submitted, one for each key.\n\n .. code:: python\n\n from applicationinsights.exceptions import enable\n\n # set up exception capture\n enable('<YOUR INSTRUMENTATION KEY GOES HERE>')\n\n # raise an exception (this will be sent to the Application Insights service as an exception telemetry object)\n raise Exception('Boom!')\n\n Args:\n instrumentation_key (str). the instrumentation key to use while sending telemetry to the service.\n "
] |
Please provide a description of the function:def init_app(self, app):
self._key = app.config.get(CONF_KEY) or getenv(CONF_KEY)
if not self._key:
return
self._endpoint_uri = app.config.get(CONF_ENDPOINT_URI)
sender = AsynchronousSender(self._endpoint_uri)
queue = AsynchronousQueue(sender)
self._channel = TelemetryChannel(None, queue)
self._init_request_logging(app)
self._init_trace_logging(app)
self._init_exception_logging(app)
|
[
"\n Initializes the extension for the provided Flask application.\n\n Args:\n app (flask.Flask). the Flask application for which to initialize the extension.\n "
] |
Please provide a description of the function:def _init_request_logging(self, app):
enabled = not app.config.get(CONF_DISABLE_REQUEST_LOGGING, False)
if not enabled:
return
self._requests_middleware = WSGIApplication(
self._key, app.wsgi_app, telemetry_channel=self._channel)
app.wsgi_app = self._requests_middleware
|
[
"\n Sets up request logging unless ``APPINSIGHTS_DISABLE_REQUEST_LOGGING``\n is set in the Flask config.\n\n Args:\n app (flask.Flask). the Flask application for which to initialize the extension.\n "
] |
Please provide a description of the function:def _init_trace_logging(self, app):
enabled = not app.config.get(CONF_DISABLE_TRACE_LOGGING, False)
if not enabled:
return
self._trace_log_handler = LoggingHandler(
self._key, telemetry_channel=self._channel)
app.logger.addHandler(self._trace_log_handler)
|
[
"\n Sets up trace logging unless ``APPINSIGHTS_DISABLE_TRACE_LOGGING`` is\n set in the Flask config.\n\n Args:\n app (flask.Flask). the Flask application for which to initialize the extension.\n "
] |
Please provide a description of the function:def _init_exception_logging(self, app):
enabled = not app.config.get(CONF_DISABLE_EXCEPTION_LOGGING, False)
if not enabled:
return
exception_telemetry_client = TelemetryClient(
self._key, telemetry_channel=self._channel)
@app.errorhandler(Exception)
def exception_handler(exception):
if HTTPException and isinstance(exception, HTTPException):
return exception
try:
raise exception
except Exception:
exception_telemetry_client.track_exception()
finally:
raise exception
self._exception_telemetry_client = exception_telemetry_client
|
[
"\n Sets up exception logging unless ``APPINSIGHTS_DISABLE_EXCEPTION_LOGGING``\n is set in the Flask config.\n\n Args:\n app (flask.Flask). the Flask application for which to initialize the extension.\n "
] |
Please provide a description of the function:def flush(self):
if self._requests_middleware:
self._requests_middleware.flush()
if self._trace_log_handler:
self._trace_log_handler.flush()
if self._exception_telemetry_client:
self._exception_telemetry_client.flush()
|
[
"Flushes the queued up telemetry to the service.\n "
] |
Please provide a description of the function:def run_location(self, value):
if value == self._defaults['runLocation'] and 'runLocation' in self._values:
del self._values['runLocation']
else:
self._values['runLocation'] = value
|
[
"The run_location property.\n \n Args:\n value (string). the property value.\n "
] |
Please provide a description of the function:def message(self, value):
if value == self._defaults['message'] and 'message' in self._values:
del self._values['message']
else:
self._values['message'] = value
|
[
"The message property.\n \n Args:\n value (string). the property value.\n "
] |
Please provide a description of the function:def parent_id(self, value):
if value == self._defaults['ai.operation.parentId'] and 'ai.operation.parentId' in self._values:
del self._values['ai.operation.parentId']
else:
self._values['ai.operation.parentId'] = value
|
[
"The parent_id property.\n \n Args:\n value (string). the property value.\n "
] |
Please provide a description of the function:def synthetic_source(self, value):
if value == self._defaults['ai.operation.syntheticSource'] and 'ai.operation.syntheticSource' in self._values:
del self._values['ai.operation.syntheticSource']
else:
self._values['ai.operation.syntheticSource'] = value
|
[
"The synthetic_source property.\n \n Args:\n value (string). the property value.\n "
] |
Please provide a description of the function:def correlation_vector(self, value):
if value == self._defaults['ai.operation.correlationVector'] and 'ai.operation.correlationVector' in self._values:
del self._values['ai.operation.correlationVector']
else:
self._values['ai.operation.correlationVector'] = value
|
[
"The correlation_vector property.\n \n Args:\n value (string). the property value.\n "
] |
Please provide a description of the function:def put(self, item):
if not item:
return
self._queue.put(item)
if self._queue.qsize() >= self._max_queue_length:
self.flush()
|
[
"Adds the passed in item object to the queue and calls :func:`flush` if the size of the queue is larger\n than :func:`max_queue_length`. This method does nothing if the passed in item is None.\n\n Args:\n item (:class:`contracts.Envelope`) item the telemetry envelope object to send to the service.\n "
] |
Please provide a description of the function:def get(self):
try:
item = self._queue.get_nowait()
except (Empty, PersistEmpty):
return None
if self._persistence_path:
self._queue.task_done()
return item
|
[
"Gets a single item from the queue and returns it. If the queue is empty, this method will return None.\n\n Returns:\n :class:`contracts.Envelope`. a telemetry envelope object or None if the queue is empty.\n "
] |
Please provide a description of the function:def enable(instrumentation_key, *args, **kwargs):
if not instrumentation_key:
raise Exception('Instrumentation key was required but not provided')
if instrumentation_key in enabled_instrumentation_keys:
logging.getLogger().removeHandler(enabled_instrumentation_keys[instrumentation_key])
async_ = kwargs.pop('async_', False)
endpoint = kwargs.pop('endpoint', None)
telemetry_channel = kwargs.get('telemetry_channel')
if telemetry_channel and async_:
raise Exception('Incompatible arguments async_ and telemetry_channel')
if telemetry_channel and endpoint:
raise Exception('Incompatible arguments endpoint and telemetry_channel')
if not telemetry_channel:
if async_:
sender, queue = AsynchronousSender, AsynchronousQueue
else:
sender, queue = SynchronousSender, SynchronousQueue
kwargs['telemetry_channel'] = TelemetryChannel(queue=queue(sender(endpoint)))
log_level = kwargs.pop('level', logging.INFO)
handler = LoggingHandler(instrumentation_key, *args, **kwargs)
handler.setLevel(log_level)
enabled_instrumentation_keys[instrumentation_key] = handler
logging.getLogger().addHandler(handler)
return handler
|
[
"Enables the Application Insights logging handler for the root logger for the supplied instrumentation key.\n Multiple calls to this function with different instrumentation keys result in multiple handler instances.\n\n .. code:: python\n\n import logging\n from applicationinsights.logging import enable\n\n # set up logging\n enable('<YOUR INSTRUMENTATION KEY GOES HERE>')\n\n # log something (this will be sent to the Application Insights service as a trace)\n logging.info('This is a message')\n\n # logging shutdown will cause a flush of all un-sent telemetry items\n # alternatively set up an async channel via enable('<YOUR INSTRUMENTATION KEY GOES HERE>', async_=True)\n\n Args:\n instrumentation_key (str). the instrumentation key to use while sending telemetry to the service.\n\n Keyword Args:\n async_ (bool): Whether to use an async channel for the telemetry. Defaults to False.\n endpoint (str): The custom endpoint to which to send the telemetry. Defaults to None.\n level (Union[int, str]): The level to set for the logger. Defaults to INFO.\n\n Returns:\n :class:`ApplicationInsightsHandler`. the newly created or existing handler.\n "
] |
Please provide a description of the function:def emit(self, record):
# the set of properties that will ride with the record
properties = {
'process': record.processName,
'module': record.module,
'fileName': record.filename,
'lineNumber': record.lineno,
'level': record.levelname,
}
# if we have exec_info, we will use it as an exception
if record.exc_info:
self.client.track_exception(*record.exc_info, properties=properties)
return
# if we don't simply format the message and send the trace
formatted_message = self.format(record)
self.client.track_trace(formatted_message, properties=properties, severity=record.levelname)
|
[
"Emit a record.\n\n If a formatter is specified, it is used to format the record. If exception information is present, an Exception\n telemetry object is sent instead of a Trace telemetry object.\n\n Args:\n record (:class:`logging.LogRecord`). the record to format and send.\n "
] |
Please provide a description of the function:def flush(self):
local_sender = self.sender
if not local_sender:
return
while True:
# get at most send_buffer_size items and send them
data = []
while len(data) < local_sender.send_buffer_size:
item = self.get()
if not item:
break
data.append(item)
if len(data) == 0:
break
local_sender.send(data)
|
[
"Flushes the current queue by by calling :func:`sender`'s :func:`send` method.\n "
] |
Please provide a description of the function:def write(self, data, context=None):
local_context = context or self._context
if not local_context:
raise Exception('Context was required but not provided')
if not data:
raise Exception('Data was required but not provided')
envelope = contracts.Envelope()
envelope.name = data.ENVELOPE_TYPE_NAME
envelope.time = datetime.datetime.utcnow().isoformat() + 'Z'
envelope.ikey = local_context.instrumentation_key
tags = envelope.tags
for prop_context in [self._context, context]:
if not prop_context:
continue
for key, value in self._write_tags(prop_context):
tags[key] = value
envelope.data = contracts.Data()
envelope.data.base_type = data.DATA_TYPE_NAME
for prop_context in [context, self._context]:
if not prop_context:
continue
if hasattr(data, 'properties') and prop_context.properties:
properties = data.properties
for key in prop_context.properties:
if key not in properties:
properties[key] = prop_context.properties[key]
envelope.data.base_data = data
self._queue.put(envelope)
|
[
"Enqueues the passed in data to the :func:`queue`. If the caller specifies a context as well, it will\r\n take precedence over the instance in :func:`context`.\r\n\r\n Args:\r\n data (object). data the telemetry data to send. This will be wrapped in an :class:`contracts.Envelope`\r\n before being enqueued to the :func:`queue`.\r\n context (:class:`TelemetryContext`). context the override context to use when constructing the\r\n :class:`contracts.Envelope`.\r\n "
] |
Please provide a description of the function:def base_type(self, value):
if value == self._defaults['baseType'] and 'baseType' in self._values:
del self._values['baseType']
else:
self._values['baseType'] = value
|
[
"The base_type property.\n \n Args:\n value (string). the property value.\n "
] |
Please provide a description of the function:def ip(self, value):
if value == self._defaults['ai.location.ip'] and 'ai.location.ip' in self._values:
del self._values['ai.location.ip']
else:
self._values['ai.location.ip'] = value
|
[
"The ip property.\n \n Args:\n value (string). the property value.\n "
] |
Please provide a description of the function:def ver(self, value):
if value == self._defaults['ver'] and 'ver' in self._values:
del self._values['ver']
else:
self._values['ver'] = value
|
[
"The ver property.\n \n Args:\n value (int). the property value.\n "
] |
Please provide a description of the function:def sample_rate(self, value):
if value == self._defaults['sampleRate'] and 'sampleRate' in self._values:
del self._values['sampleRate']
else:
self._values['sampleRate'] = value
|
[
"The sample_rate property.\n \n Args:\n value (float). the property value.\n "
] |
Please provide a description of the function:def seq(self, value):
if value == self._defaults['seq'] and 'seq' in self._values:
del self._values['seq']
else:
self._values['seq'] = value
|
[
"The seq property.\n \n Args:\n value (string). the property value.\n "
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.