Search is not available for this dataset
text
stringlengths 75
104k
|
---|
def beat(ref, est, **kwargs):
r'''Beat tracking evaluation
Parameters
----------
ref : jams.Annotation
Reference annotation object
est : jams.Annotation
Estimated annotation object
kwargs
Additional keyword arguments
Returns
-------
scores : dict
Dictionary of scores, where the key is the metric name (str) and
the value is the (float) score achieved.
See Also
--------
mir_eval.beat.evaluate
Examples
--------
>>> # Load in the JAMS objects
>>> ref_jam = jams.load('reference.jams')
>>> est_jam = jams.load('estimated.jams')
>>> # Select the first relevant annotations
>>> ref_ann = ref_jam.search(namespace='beat')[0]
>>> est_ann = est_jam.search(namespace='beat')[0]
>>> scores = jams.eval.beat(ref_ann, est_ann)
'''
namespace = 'beat'
ref = coerce_annotation(ref, namespace)
est = coerce_annotation(est, namespace)
ref_times, _ = ref.to_event_values()
est_times, _ = est.to_event_values()
return mir_eval.beat.evaluate(ref_times, est_times, **kwargs) |
def chord(ref, est, **kwargs):
r'''Chord evaluation
Parameters
----------
ref : jams.Annotation
Reference annotation object
est : jams.Annotation
Estimated annotation object
kwargs
Additional keyword arguments
Returns
-------
scores : dict
Dictionary of scores, where the key is the metric name (str) and
the value is the (float) score achieved.
See Also
--------
mir_eval.chord.evaluate
Examples
--------
>>> # Load in the JAMS objects
>>> ref_jam = jams.load('reference.jams')
>>> est_jam = jams.load('estimated.jams')
>>> # Select the first relevant annotations
>>> ref_ann = ref_jam.search(namespace='chord')[0]
>>> est_ann = est_jam.search(namespace='chord')[0]
>>> scores = jams.eval.chord(ref_ann, est_ann)
'''
namespace = 'chord'
ref = coerce_annotation(ref, namespace)
est = coerce_annotation(est, namespace)
ref_interval, ref_value = ref.to_interval_values()
est_interval, est_value = est.to_interval_values()
return mir_eval.chord.evaluate(ref_interval, ref_value,
est_interval, est_value, **kwargs) |
def hierarchy_flatten(annotation):
'''Flatten a multi_segment annotation into mir_eval style.
Parameters
----------
annotation : jams.Annotation
An annotation in the `multi_segment` namespace
Returns
-------
hier_intervalss : list
A list of lists of intervals, ordered by increasing specificity.
hier_labels : list
A list of lists of labels, ordered by increasing specificity.
'''
intervals, values = annotation.to_interval_values()
ordering = dict()
for interval, value in zip(intervals, values):
level = value['level']
if level not in ordering:
ordering[level] = dict(intervals=list(), labels=list())
ordering[level]['intervals'].append(interval)
ordering[level]['labels'].append(value['label'])
levels = sorted(list(ordering.keys()))
hier_intervals = [ordering[level]['intervals'] for level in levels]
hier_labels = [ordering[level]['labels'] for level in levels]
return hier_intervals, hier_labels |
def hierarchy(ref, est, **kwargs):
r'''Multi-level segmentation evaluation
Parameters
----------
ref : jams.Annotation
Reference annotation object
est : jams.Annotation
Estimated annotation object
kwargs
Additional keyword arguments
Returns
-------
scores : dict
Dictionary of scores, where the key is the metric name (str) and
the value is the (float) score achieved.
See Also
--------
mir_eval.hierarchy.evaluate
Examples
--------
>>> # Load in the JAMS objects
>>> ref_jam = jams.load('reference.jams')
>>> est_jam = jams.load('estimated.jams')
>>> # Select the first relevant annotations
>>> ref_ann = ref_jam.search(namespace='multi_segment')[0]
>>> est_ann = est_jam.search(namespace='multi_segment')[0]
>>> scores = jams.eval.hierarchy(ref_ann, est_ann)
'''
namespace = 'multi_segment'
ref = coerce_annotation(ref, namespace)
est = coerce_annotation(est, namespace)
ref_hier, ref_hier_lab = hierarchy_flatten(ref)
est_hier, est_hier_lab = hierarchy_flatten(est)
return mir_eval.hierarchy.evaluate(ref_hier, ref_hier_lab,
est_hier, est_hier_lab,
**kwargs) |
def tempo(ref, est, **kwargs):
r'''Tempo evaluation
Parameters
----------
ref : jams.Annotation
Reference annotation object
est : jams.Annotation
Estimated annotation object
kwargs
Additional keyword arguments
Returns
-------
scores : dict
Dictionary of scores, where the key is the metric name (str) and
the value is the (float) score achieved.
See Also
--------
mir_eval.tempo.evaluate
Examples
--------
>>> # Load in the JAMS objects
>>> ref_jam = jams.load('reference.jams')
>>> est_jam = jams.load('estimated.jams')
>>> # Select the first relevant annotations
>>> ref_ann = ref_jam.search(namespace='tempo')[0]
>>> est_ann = est_jam.search(namespace='tempo')[0]
>>> scores = jams.eval.tempo(ref_ann, est_ann)
'''
ref = coerce_annotation(ref, 'tempo')
est = coerce_annotation(est, 'tempo')
ref_tempi = np.asarray([o.value for o in ref])
ref_weight = ref.data[0].confidence
est_tempi = np.asarray([o.value for o in est])
return mir_eval.tempo.evaluate(ref_tempi, ref_weight, est_tempi, **kwargs) |
def melody(ref, est, **kwargs):
r'''Melody extraction evaluation
Parameters
----------
ref : jams.Annotation
Reference annotation object
est : jams.Annotation
Estimated annotation object
kwargs
Additional keyword arguments
Returns
-------
scores : dict
Dictionary of scores, where the key is the metric name (str) and
the value is the (float) score achieved.
See Also
--------
mir_eval.melody.evaluate
Examples
--------
>>> # Load in the JAMS objects
>>> ref_jam = jams.load('reference.jams')
>>> est_jam = jams.load('estimated.jams')
>>> # Select the first relevant annotations
>>> ref_ann = ref_jam.search(namespace='pitch_contour')[0]
>>> est_ann = est_jam.search(namespace='pitch_contour')[0]
>>> scores = jams.eval.melody(ref_ann, est_ann)
'''
namespace = 'pitch_contour'
ref = coerce_annotation(ref, namespace)
est = coerce_annotation(est, namespace)
ref_times, ref_p = ref.to_event_values()
est_times, est_p = est.to_event_values()
ref_freq = np.asarray([p['frequency'] * (-1)**(~p['voiced']) for p in ref_p])
est_freq = np.asarray([p['frequency'] * (-1)**(~p['voiced']) for p in est_p])
return mir_eval.melody.evaluate(ref_times, ref_freq,
est_times, est_freq,
**kwargs) |
def pattern_to_mireval(ann):
'''Convert a pattern_jku annotation object to mir_eval format.
Parameters
----------
ann : jams.Annotation
Must have `namespace='pattern_jku'`
Returns
-------
patterns : list of list of tuples
- `patterns[x]` is a list containing all occurrences of pattern x
- `patterns[x][y]` is a list containing all notes for
occurrence y of pattern x
- `patterns[x][y][z]` contains a time-note tuple
`(time, midi note)`
'''
# It's easier to work with dictionaries, since we can't assume
# sequential pattern or occurrence identifiers
patterns = defaultdict(lambda: defaultdict(list))
# Iterate over the data in interval-value format
for time, observation in zip(*ann.to_event_values()):
pattern_id = observation['pattern_id']
occurrence_id = observation['occurrence_id']
obs = (time, observation['midi_pitch'])
# Push this note observation into the correct pattern/occurrence
patterns[pattern_id][occurrence_id].append(obs)
# Convert to list-list-tuple format for mir_eval
return [list(_.values()) for _ in six.itervalues(patterns)] |
def pattern(ref, est, **kwargs):
r'''Pattern detection evaluation
Parameters
----------
ref : jams.Annotation
Reference annotation object
est : jams.Annotation
Estimated annotation object
kwargs
Additional keyword arguments
Returns
-------
scores : dict
Dictionary of scores, where the key is the metric name (str) and
the value is the (float) score achieved.
See Also
--------
mir_eval.pattern.evaluate
Examples
--------
>>> # Load in the JAMS objects
>>> ref_jam = jams.load('reference.jams')
>>> est_jam = jams.load('estimated.jams')
>>> # Select the first relevant annotations
>>> ref_ann = ref_jam.search(namespace='pattern_jku')[0]
>>> est_ann = est_jam.search(namespace='pattern_jku')[0]
>>> scores = jams.eval.pattern(ref_ann, est_ann)
'''
namespace = 'pattern_jku'
ref = coerce_annotation(ref, namespace)
est = coerce_annotation(est, namespace)
ref_patterns = pattern_to_mireval(ref)
est_patterns = pattern_to_mireval(est)
return mir_eval.pattern.evaluate(ref_patterns, est_patterns, **kwargs) |
def transcription(ref, est, **kwargs):
r'''Note transcription evaluation
Parameters
----------
ref : jams.Annotation
Reference annotation object
est : jams.Annotation
Estimated annotation object
kwargs
Additional keyword arguments
Returns
-------
scores : dict
Dictionary of scores, where the key is the metric name (str) and
the value is the (float) score achieved.
See Also
--------
mir_eval.transcription.evaluate
Examples
--------
>>> # Load in the JAMS objects
>>> ref_jam = jams.load('reference.jams')
>>> est_jam = jams.load('estimated.jams')
>>> # Select the first relevant annotations. You can use any annotation
>>> # type that can be converted to pitch_contour (such as pitch_midi)
>>> ref_ann = ref_jam.search(namespace='pitch_contour')[0]
>>> est_ann = est_jam.search(namespace='note_hz')[0]
>>> scores = jams.eval.transcription(ref_ann, est_ann)
'''
namespace = 'pitch_contour'
ref = coerce_annotation(ref, namespace)
est = coerce_annotation(est, namespace)
ref_intervals, ref_p = ref.to_interval_values()
est_intervals, est_p = est.to_interval_values()
ref_pitches = np.asarray([p['frequency'] * (-1)**(~p['voiced']) for p in ref_p])
est_pitches = np.asarray([p['frequency'] * (-1)**(~p['voiced']) for p in est_p])
return mir_eval.transcription.evaluate(
ref_intervals, ref_pitches, est_intervals, est_pitches, **kwargs) |
def add_namespace(filename):
'''Add a namespace definition to our working set.
Namespace files consist of partial JSON schemas defining the behavior
of the `value` and `confidence` fields of an Annotation.
Parameters
----------
filename : str
Path to json file defining the namespace object
'''
with open(filename, mode='r') as fileobj:
__NAMESPACE__.update(json.load(fileobj)) |
def namespace(ns_key):
'''Construct a validation schema for a given namespace.
Parameters
----------
ns_key : str
Namespace key identifier (eg, 'beat' or 'segment_tut')
Returns
-------
schema : dict
JSON schema of `namespace`
'''
if ns_key not in __NAMESPACE__:
raise NamespaceError('Unknown namespace: {:s}'.format(ns_key))
sch = copy.deepcopy(JAMS_SCHEMA['definitions']['SparseObservation'])
for key in ['value', 'confidence']:
try:
sch['properties'][key] = __NAMESPACE__[ns_key][key]
except KeyError:
pass
return sch |
def namespace_array(ns_key):
'''Construct a validation schema for arrays of a given namespace.
Parameters
----------
ns_key : str
Namespace key identifier
Returns
-------
schema : dict
JSON schema of `namespace` observation arrays
'''
obs_sch = namespace(ns_key)
obs_sch['title'] = 'Observation'
sch = copy.deepcopy(JAMS_SCHEMA['definitions']['SparseObservationList'])
sch['items'] = obs_sch
return sch |
def values(ns_key):
'''Return the allowed values for an enumerated namespace.
Parameters
----------
ns_key : str
Namespace key identifier
Returns
-------
values : list
Raises
------
NamespaceError
If `ns_key` is not found, or does not have enumerated values
Examples
--------
>>> jams.schema.values('tag_gtzan')
['blues', 'classical', 'country', 'disco', 'hip-hop', 'jazz',
'metal', 'pop', 'reggae', 'rock']
'''
if ns_key not in __NAMESPACE__:
raise NamespaceError('Unknown namespace: {:s}'.format(ns_key))
if 'enum' not in __NAMESPACE__[ns_key]['value']:
raise NamespaceError('Namespace {:s} is not enumerated'.format(ns_key))
return copy.copy(__NAMESPACE__[ns_key]['value']['enum']) |
def get_dtypes(ns_key):
'''Get the dtypes associated with the value and confidence fields
for a given namespace.
Parameters
----------
ns_key : str
The namespace key in question
Returns
-------
value_dtype, confidence_dtype : numpy.dtype
Type identifiers for value and confidence fields.
'''
# First, get the schema
if ns_key not in __NAMESPACE__:
raise NamespaceError('Unknown namespace: {:s}'.format(ns_key))
value_dtype = __get_dtype(__NAMESPACE__[ns_key].get('value', {}))
confidence_dtype = __get_dtype(__NAMESPACE__[ns_key].get('confidence', {}))
return value_dtype, confidence_dtype |
def list_namespaces():
'''Print out a listing of available namespaces'''
print('{:30s}\t{:40s}'.format('NAME', 'DESCRIPTION'))
print('-' * 78)
for sch in sorted(__NAMESPACE__):
desc = __NAMESPACE__[sch]['description']
desc = (desc[:44] + '..') if len(desc) > 46 else desc
print('{:30s}\t{:40s}'.format(sch, desc)) |
def __get_dtype(typespec):
'''Get the dtype associated with a jsonschema type definition
Parameters
----------
typespec : dict
The schema definition
Returns
-------
dtype : numpy.dtype
The associated dtype
'''
if 'type' in typespec:
return __TYPE_MAP__.get(typespec['type'], np.object_)
elif 'enum' in typespec:
# Enums map to objects
return np.object_
elif 'oneOf' in typespec:
# Recurse
types = [__get_dtype(v) for v in typespec['oneOf']]
# If they're not all equal, return object
if all([t == types[0] for t in types]):
return types[0]
return np.object_ |
def __load_jams_schema():
'''Load the schema file from the package.'''
schema_file = os.path.join(SCHEMA_DIR, 'jams_schema.json')
jams_schema = None
with open(resource_filename(__name__, schema_file), mode='r') as fdesc:
jams_schema = json.load(fdesc)
if jams_schema is None:
raise JamsError('Unable to load JAMS schema')
return jams_schema |
def import_lab(namespace, filename, infer_duration=True, **parse_options):
r'''Load a .lab file as an Annotation object.
.lab files are assumed to have the following format:
``TIME_START\tTIME_END\tANNOTATION``
By default, .lab files are assumed to have columns separated by one
or more white-space characters, and have no header or index column
information.
If the .lab file contains only two columns, then an empty duration
field is inferred.
If the .lab file contains more than three columns, each row's
annotation value is assigned the contents of last non-empty column.
Parameters
----------
namespace : str
The namespace for the new annotation
filename : str
Path to the .lab file
infer_duration : bool
If `True`, interval durations are inferred from `(start, end)` columns,
or difference between successive times.
If `False`, interval durations are assumed to be explicitly coded as
`(start, duration)` columns. If only one time column is given, then
durations are set to 0.
For instantaneous event annotations (e.g., beats or onsets), this
should be set to `False`.
parse_options : additional keyword arguments
Passed to ``pandas.DataFrame.read_csv``
Returns
-------
annotation : Annotation
The newly constructed annotation object
See Also
--------
pandas.DataFrame.read_csv
'''
# Create a new annotation object
annotation = core.Annotation(namespace)
parse_options.setdefault('sep', r'\s+')
parse_options.setdefault('engine', 'python')
parse_options.setdefault('header', None)
parse_options.setdefault('index_col', False)
# This is a hack to handle potentially ragged .lab data
parse_options.setdefault('names', range(20))
data = pd.read_csv(filename, **parse_options)
# Drop all-nan columns
data = data.dropna(how='all', axis=1)
# Do we need to add a duration column?
# This only applies to event annotations
if len(data.columns) == 2:
# Insert a column of zeros after the timing
data.insert(1, 'duration', 0)
if infer_duration:
data['duration'][:-1] = data.loc[:, 0].diff()[1:].values
else:
# Convert from time to duration
if infer_duration:
data.loc[:, 1] -= data[0]
for row in data.itertuples():
time, duration = row[1:3]
value = [x for x in row[3:] if x is not None][-1]
annotation.append(time=time,
duration=duration,
confidence=1.0,
value=value)
return annotation |
def expand_filepaths(base_dir, rel_paths):
"""Expand a list of relative paths to a give base directory.
Parameters
----------
base_dir : str
The target base directory
rel_paths : list (or list-like)
Collection of relative path strings
Returns
-------
expanded_paths : list
`rel_paths` rooted at `base_dir`
Examples
--------
>>> jams.util.expand_filepaths('/data', ['audio', 'beat', 'seglab'])
['/data/audio', '/data/beat', '/data/seglab']
"""
return [os.path.join(base_dir, os.path.normpath(rp)) for rp in rel_paths] |
def smkdirs(dpath, mode=0o777):
"""Safely make a full directory path if it doesn't exist.
Parameters
----------
dpath : str
Path of directory/directories to create
mode : int [default=0777]
Permissions for the new directories
See also
--------
os.makedirs
"""
if not os.path.exists(dpath):
os.makedirs(dpath, mode=mode) |
def find_with_extension(in_dir, ext, depth=3, sort=True):
"""Naive depth-search into a directory for files with a given extension.
Parameters
----------
in_dir : str
Path to search.
ext : str
File extension to match.
depth : int
Depth of directories to search.
sort : bool
Sort the list alphabetically
Returns
-------
matched : list
Collection of matching file paths.
Examples
--------
>>> jams.util.find_with_extension('Audio', 'wav')
['Audio/LizNelson_Rainfall/LizNelson_Rainfall_MIX.wav',
'Audio/LizNelson_Rainfall/LizNelson_Rainfall_RAW/LizNelson_Rainfall_RAW_01_01.wav',
'Audio/LizNelson_Rainfall/LizNelson_Rainfall_RAW/LizNelson_Rainfall_RAW_02_01.wav',
...
'Audio/Phoenix_ScotchMorris/Phoenix_ScotchMorris_STEMS/Phoenix_ScotchMorris_STEM_02.wav',
'Audio/Phoenix_ScotchMorris/Phoenix_ScotchMorris_STEMS/Phoenix_ScotchMorris_STEM_03.wav',
'Audio/Phoenix_ScotchMorris/Phoenix_ScotchMorris_STEMS/Phoenix_ScotchMorris_STEM_04.wav']
"""
assert depth >= 1
ext = ext.strip(os.extsep)
match = list()
for n in range(1, depth+1):
wildcard = os.path.sep.join(["*"]*n)
search_path = os.path.join(in_dir, os.extsep.join([wildcard, ext]))
match += glob.glob(search_path)
if sort:
match.sort()
return match |
def get_comments(jam, ann):
'''Get the metadata from a jam and an annotation, combined as a string.
Parameters
----------
jam : JAMS
The jams object
ann : Annotation
An annotation object
Returns
-------
comments : str
The jam.file_metadata and ann.annotation_metadata, combined and serialized
'''
jam_comments = jam.file_metadata.__json__
ann_comments = ann.annotation_metadata.__json__
return json.dumps({'metadata': jam_comments,
'annotation metadata': ann_comments},
indent=2) |
def lab_dump(ann, comment, filename, sep, comment_char):
'''Save an annotation as a lab/csv.
Parameters
----------
ann : Annotation
The annotation object
comment : str
The comment string header
filename : str
The output filename
sep : str
The separator string for output
comment_char : str
The character used to denote comments
'''
intervals, values = ann.to_interval_values()
frame = pd.DataFrame(columns=['Time', 'End Time', 'Label'],
data={'Time': intervals[:, 0],
'End Time': intervals[:, 1],
'Label': values})
with open(filename, 'w') as fdesc:
for line in comment.split('\n'):
fdesc.write('{:s} {:s}\n'.format(comment_char, line))
frame.to_csv(path_or_buf=fdesc, index=False, sep=sep) |
def convert_jams(jams_file, output_prefix, csv=False, comment_char='#', namespaces=None):
'''Convert jams to labs.
Parameters
----------
jams_file : str
The path on disk to the jams file in question
output_prefix : str
The file path prefix of the outputs
csv : bool
Whether to output in csv (True) or lab (False) format
comment_char : str
The character used to denote comments
namespaces : list-like
The set of namespace patterns to match for output
'''
if namespaces is None:
raise ValueError('No namespaces provided. Try ".*" for all namespaces.')
jam = jams.load(jams_file)
# Get all the annotations
# Filter down to the unique ones
# For each annotation
# generate the comment string
# generate the output filename
# dump to csv
# Make a counter object for each namespace type
counter = collections.Counter()
annotations = []
for query in namespaces:
annotations.extend(jam.search(namespace=query))
if csv:
suffix = 'csv'
sep = ','
else:
suffix = 'lab'
sep = '\t'
for ann in annotations:
index = counter[ann.namespace]
counter[ann.namespace] += 1
filename = os.path.extsep.join([get_output_name(output_prefix,
ann.namespace,
index),
suffix])
comment = get_comments(jam, ann)
# Dump to disk
lab_dump(ann, comment, filename, sep, comment_char) |
def parse_arguments(args):
'''Parse arguments from the command line'''
parser = argparse.ArgumentParser(description='Convert JAMS to .lab files')
parser.add_argument('-c',
'--comma-separated',
dest='csv',
action='store_true',
default=False,
help='Output in .csv instead of .lab')
parser.add_argument('--comment', dest='comment_char', type=str, default='#',
help='Comment character')
parser.add_argument('-n',
'--namespace',
dest='namespaces',
nargs='+',
default=['.*'],
help='One or more namespaces to output. Default is all.')
parser.add_argument('jams_file',
help='Path to the input jams file')
parser.add_argument('output_prefix', help='Prefix for output files')
return vars(parser.parse_args(args)) |
def _conversion(target, source):
'''A decorator to register namespace conversions.
Usage
-----
>>> @conversion('tag_open', 'tag_.*')
... def tag_to_open(annotation):
... annotation.namespace = 'tag_open'
... return annotation
'''
def register(func):
'''This decorator registers func as mapping source to target'''
__CONVERSION__[target][source] = func
return func
return register |
def convert(annotation, target_namespace):
'''Convert a given annotation to the target namespace.
Parameters
----------
annotation : jams.Annotation
An annotation object
target_namespace : str
The target namespace
Returns
-------
mapped_annotation : jams.Annotation
if `annotation` already belongs to `target_namespace`, then
it is returned directly.
otherwise, `annotation` is copied and automatically converted
to the target namespace.
Raises
------
SchemaError
if the input annotation fails to validate
NamespaceError
if no conversion is possible
Examples
--------
Convert frequency measurements in Hz to MIDI
>>> ann_midi = jams.convert(ann_hz, 'note_midi')
And back to Hz
>>> ann_hz2 = jams.convert(ann_midi, 'note_hz')
'''
# First, validate the input. If this fails, we can't auto-convert.
annotation.validate(strict=True)
# If we're already in the target namespace, do nothing
if annotation.namespace == target_namespace:
return annotation
if target_namespace in __CONVERSION__:
# Otherwise, make a copy to mangle
annotation = deepcopy(annotation)
# Look for a way to map this namespace to the target
for source in __CONVERSION__[target_namespace]:
if annotation.search(namespace=source):
return __CONVERSION__[target_namespace][source](annotation)
# No conversion possible
raise NamespaceError('Unable to convert annotation from namespace='
'"{0}" to "{1}"'.format(annotation.namespace,
target_namespace)) |
def can_convert(annotation, target_namespace):
'''Test if an annotation can be mapped to a target namespace
Parameters
----------
annotation : jams.Annotation
An annotation object
target_namespace : str
The target namespace
Returns
-------
True
if `annotation` can be automatically converted to
`target_namespace`
False
otherwise
'''
# If we're already in the target namespace, do nothing
if annotation.namespace == target_namespace:
return True
if target_namespace in __CONVERSION__:
# Look for a way to map this namespace to the target
for source in __CONVERSION__[target_namespace]:
if annotation.search(namespace=source):
return True
return False |
def pitch_hz_to_contour(annotation):
'''Convert a pitch_hz annotation to a contour'''
annotation.namespace = 'pitch_contour'
data = annotation.pop_data()
for obs in data:
annotation.append(time=obs.time, duration=obs.duration,
confidence=obs.confidence,
value=dict(index=0,
frequency=np.abs(obs.value),
voiced=obs.value > 0))
return annotation |
def note_hz_to_midi(annotation):
'''Convert a pitch_hz annotation to pitch_midi'''
annotation.namespace = 'note_midi'
data = annotation.pop_data()
for obs in data:
annotation.append(time=obs.time, duration=obs.duration,
confidence=obs.confidence,
value=12 * (np.log2(obs.value) - np.log2(440.0)) + 69)
return annotation |
def scaper_to_tag(annotation):
'''Convert scaper annotations to tag_open'''
annotation.namespace = 'tag_open'
data = annotation.pop_data()
for obs in data:
annotation.append(time=obs.time, duration=obs.duration,
confidence=obs.confidence, value=obs.value['label'])
return annotation |
def deprecated(version, version_removed):
'''This is a decorator which can be used to mark functions
as deprecated.
It will result in a warning being emitted when the function is used.'''
def __wrapper(func, *args, **kwargs):
'''Warn the user, and then proceed.'''
code = six.get_function_code(func)
warnings.warn_explicit(
"{:s}.{:s}\n\tDeprecated as of JAMS version {:s}."
"\n\tIt will be removed in JAMS version {:s}."
.format(func.__module__, func.__name__,
version, version_removed),
category=DeprecationWarning,
filename=code.co_filename,
lineno=code.co_firstlineno + 1
)
return func(*args, **kwargs)
return decorator(__wrapper) |
def _open(name_or_fdesc, mode='r', fmt='auto'):
'''An intelligent wrapper for ``open``.
Parameters
----------
name_or_fdesc : string-type or open file descriptor
If a string type, refers to the path to a file on disk.
If an open file descriptor, it is returned as-is.
mode : string
The mode with which to open the file.
See ``open`` for details.
fmt : string ['auto', 'jams', 'json', 'jamz']
The encoding for the input/output stream.
If `auto`, the format is inferred from the filename extension.
Otherwise, use the specified coding.
See Also
--------
open
gzip.open
'''
open_map = {'jams': open,
'json': open,
'jamz': gzip.open,
'gz': gzip.open}
# If we've been given an open descriptor, do the right thing
if hasattr(name_or_fdesc, 'read') or hasattr(name_or_fdesc, 'write'):
yield name_or_fdesc
elif isinstance(name_or_fdesc, six.string_types):
# Infer the opener from the extension
if fmt == 'auto':
_, ext = os.path.splitext(name_or_fdesc)
# Pull off the extension separator
ext = ext[1:]
else:
ext = fmt
try:
ext = ext.lower()
# Force text mode if we're using gzip
if ext in ['jamz', 'gz'] and 't' not in mode:
mode = '{:s}t'.format(mode)
with open_map[ext](name_or_fdesc, mode=mode) as fdesc:
yield fdesc
except KeyError:
raise ParameterError('Unknown JAMS extension '
'format: "{:s}"'.format(ext))
else:
# Don't know how to handle this. Raise a parameter error
raise ParameterError('Invalid filename or '
'descriptor: {}'.format(name_or_fdesc)) |
def load(path_or_file, validate=True, strict=True, fmt='auto'):
r"""Load a JAMS Annotation from a file.
Parameters
----------
path_or_file : str or file-like
Path to the JAMS file to load
OR
An open file handle to load from.
validate : bool
Attempt to validate the JAMS object
strict : bool
if `validate == True`, enforce strict schema validation
fmt : str ['auto', 'jams', 'jamz']
The encoding format of the input
If `auto`, encoding is inferred from the file name.
If the input is an open file handle, `jams` encoding
is used.
Returns
-------
jam : JAMS
The loaded JAMS object
Raises
------
SchemaError
if `validate == True`, `strict==True`, and validation fails
See also
--------
JAMS.validate
JAMS.save
Examples
--------
>>> # Load a jams object from a file name
>>> J = jams.load('data.jams')
>>> # Or from an open file descriptor
>>> with open('data.jams', 'r') as fdesc:
... J = jams.load(fdesc)
>>> # Non-strict validation
>>> J = jams.load('data.jams', strict=False)
>>> # No validation at all
>>> J = jams.load('data.jams', validate=False)
"""
with _open(path_or_file, mode='r', fmt=fmt) as fdesc:
jam = JAMS(**json.load(fdesc))
if validate:
jam.validate(strict=strict)
return jam |
def query_pop(query, prefix, sep='.'):
'''Pop a prefix from a query string.
Parameters
----------
query : str
The query string
prefix : str
The prefix string to pop, if it exists
sep : str
The string to separate fields
Returns
-------
popped : str
`query` with a `prefix` removed from the front (if found)
or `query` if the prefix was not found
Examples
--------
>>> query_pop('Annotation.namespace', 'Annotation')
'namespace'
>>> query_pop('namespace', 'Annotation')
'namespace'
'''
terms = query.split(sep)
if terms[0] == prefix:
terms = terms[1:]
return sep.join(terms) |
def match_query(string, query):
'''Test if a string matches a query.
Parameters
----------
string : str
The string to test
query : string, callable, or object
Either a regular expression, callable function, or object.
Returns
-------
match : bool
`True` if:
- `query` is a callable and `query(string) == True`
- `query` is a regular expression and `re.match(query, string)`
- or `string == query` for any other query
`False` otherwise
'''
if six.callable(query):
return query(string)
elif (isinstance(query, six.string_types) and
isinstance(string, six.string_types)):
return re.match(query, string) is not None
else:
return query == string |
def serialize_obj(obj):
'''Custom serialization functionality for working with advanced data types.
- numpy arrays are converted to lists
- lists are recursively serialized element-wise
'''
if isinstance(obj, np.integer):
return int(obj)
elif isinstance(obj, np.floating):
return float(obj)
elif isinstance(obj, np.ndarray):
return obj.tolist()
elif isinstance(obj, list):
return [serialize_obj(x) for x in obj]
elif isinstance(obj, Observation):
return {k: serialize_obj(v) for k, v in six.iteritems(obj._asdict())}
return obj |
def summary(obj, indent=0):
'''Helper function to format repr strings for JObjects and friends.
Parameters
----------
obj
The object to repr
indent : int >= 0
indent each new line by `indent` spaces
Returns
-------
r : str
If `obj` has a `__summary__` method, it is used.
If `obj` is a `SortedKeyList`, then it returns a description
of the length of the list.
Otherwise, `repr(obj)`.
'''
if hasattr(obj, '__summary__'):
rep = obj.__summary__()
elif isinstance(obj, SortedKeyList):
rep = '<{:d} observations>'.format(len(obj))
else:
rep = repr(obj)
return rep.replace('\n', '\n' + ' ' * indent) |
def update(self, **kwargs):
'''Update the attributes of a JObject.
Parameters
----------
kwargs
Keyword arguments of the form `attribute=new_value`
Examples
--------
>>> J = jams.JObject(foo=5)
>>> J.dumps()
'{"foo": 5}'
>>> J.update(bar='baz')
>>> J.dumps()
'{"foo": 5, "bar": "baz"}'
'''
for name, value in six.iteritems(kwargs):
setattr(self, name, value) |
def search(self, **kwargs):
'''Query this object (and its descendants).
Parameters
----------
kwargs
Each `(key, value)` pair encodes a search field in `key`
and a target value in `value`.
`key` must be a string, and should correspond to a property in
the JAMS object hierarchy, e.g., 'Annotation.namespace` or `email`
`value` must be either an object (tested for equality), a
string describing a search pattern (regular expression), or a
lambda function which evaluates to `True` if the candidate
object matches the search criteria and `False` otherwise.
Returns
-------
match : bool
`True` if any of the search keys match the specified value,
`False` otherwise, or if the search keys do not exist
within the object.
Examples
--------
>>> J = jams.JObject(foo=5, needle='quick brown fox')
>>> J.search(needle='.*brown.*')
True
>>> J.search(needle='.*orange.*')
False
>>> J.search(badger='.*brown.*')
False
>>> J.search(foo=5)
True
>>> J.search(foo=10)
False
>>> J.search(foo=lambda x: x < 10)
True
>>> J.search(foo=lambda x: x > 10)
False
'''
match = False
r_query = {}
myself = self.__class__.__name__
# Pop this object name off the query
for k, value in six.iteritems(kwargs):
k_pop = query_pop(k, myself)
if k_pop:
r_query[k_pop] = value
if not r_query:
return False
for key in r_query:
if hasattr(self, key):
match |= match_query(getattr(self, key), r_query[key])
if not match:
for attr in dir(self):
obj = getattr(self, attr)
if isinstance(obj, JObject):
match |= obj.search(**r_query)
return match |
def validate(self, strict=True):
'''Validate a JObject against its schema
Parameters
----------
strict : bool
Enforce strict schema validation
Returns
-------
valid : bool
True if the jam validates
False if not, and `strict==False`
Raises
------
SchemaError
If `strict==True` and `jam` fails validation
'''
valid = True
try:
jsonschema.validate(self.__json__, self.__schema__)
except jsonschema.ValidationError as invalid:
if strict:
raise SchemaError(str(invalid))
else:
warnings.warn(str(invalid))
valid = False
return valid |
def append(self, time=None, duration=None, value=None, confidence=None):
'''Append an observation to the data field
Parameters
----------
time : float >= 0
duration : float >= 0
The time and duration of the new observation, in seconds
value
confidence
The value and confidence of the new observations.
Types and values should conform to the namespace of the
Annotation object.
Examples
--------
>>> ann = jams.Annotation(namespace='chord')
>>> ann.append(time=3, duration=2, value='E#')
'''
self.data.add(Observation(time=float(time),
duration=float(duration),
value=value,
confidence=confidence)) |
def append_records(self, records):
'''Add observations from row-major storage.
This is primarily useful for deserializing sparsely packed data.
Parameters
----------
records : iterable of dicts or Observations
Each element of `records` corresponds to one observation.
'''
for obs in records:
if isinstance(obs, Observation):
self.append(**obs._asdict())
else:
self.append(**obs) |
def append_columns(self, columns):
'''Add observations from column-major storage.
This is primarily used for deserializing densely packed data.
Parameters
----------
columns : dict of lists
Keys must be `time, duration, value, confidence`,
and each much be a list of equal length.
'''
self.append_records([dict(time=t, duration=d, value=v, confidence=c)
for (t, d, v, c)
in six.moves.zip(columns['time'],
columns['duration'],
columns['value'],
columns['confidence'])]) |
def validate(self, strict=True):
'''Validate this annotation object against the JAMS schema,
and its data against the namespace schema.
Parameters
----------
strict : bool
If `True`, then schema violations will cause an Exception.
If `False`, then schema violations will issue a warning.
Returns
-------
valid : bool
`True` if the object conforms to schema.
`False` if the object fails to conform to schema,
but `strict == False`.
Raises
------
SchemaError
If `strict == True` and the object fails validation
See Also
--------
JObject.validate
'''
# Get the schema for this annotation
ann_schema = schema.namespace_array(self.namespace)
valid = True
try:
jsonschema.validate(self.__json_light__(data=False),
schema.JAMS_SCHEMA)
# validate each record in the frame
data_ser = [serialize_obj(obs) for obs in self.data]
jsonschema.validate(data_ser, ann_schema)
except jsonschema.ValidationError as invalid:
if strict:
raise SchemaError(str(invalid))
else:
warnings.warn(str(invalid))
valid = False
return valid |
def trim(self, start_time, end_time, strict=False):
'''
Trim the annotation and return as a new `Annotation` object.
Trimming will result in the new annotation only containing observations
that occur in the intersection of the time range spanned by the
annotation and the time range specified by the user. The new annotation
will span the time range ``[trim_start, trim_end]`` where
``trim_start = max(self.time, start_time)`` and ``trim_end =
min(self.time + self.duration, end_time)``.
If ``strict=False`` (default) observations that start before
``trim_start`` and end after it will be trimmed such that they start at
``trim_start``, and similarly observations that start before
``trim_end`` and end after it will be trimmed to end at ``trim_end``.
If ``strict=True`` such borderline observations will be discarded.
The new duration of the annotation will be ``trim_end - trim_start``.
Note that if the range defined by ``[start_time, end_time]``
doesn't intersect with the original time range spanned by the
annotation the resulting annotation will contain no observations, will
have the same start time as the original annotation and have duration
0.
This function also copies over all the annotation metadata from the
original annotation and documents the trim operation by adding a list
of tuples to the annotation's sandbox keyed by
``Annotation.sandbox.trim`` which documents each trim operation with a
tuple ``(start_time, end_time, trim_start, trim_end)``.
Parameters
----------
start_time : float
The desired start time for the trimmed annotation in seconds.
end_time
The desired end time for the trimmed annotation in seconds. Must be
greater than ``start_time``.
strict : bool
When ``False`` (default) observations that lie at the boundaries of
the trimming range (given by ``[trim_start, trim_end]`` as
described above), i.e. observations that start before and end after
either the trim start or end time, will have their time and/or
duration adjusted such that only the part of the observation that
lies within the trim range is kept. When ``True`` such observations
are discarded and not included in the trimmed annotation.
Returns
-------
ann_trimmed : Annotation
The trimmed annotation, returned as a new jams.Annotation object.
If the trim range specified by ``[start_time, end_time]`` does not
intersect at all with the original time range of the annotation a
warning will be issued and the returned annotation will be empty.
Raises
------
ParameterError
If ``end_time`` is not greater than ``start_time``.
Examples
--------
>>> ann = jams.Annotation(namespace='tag_open', time=2, duration=8)
>>> ann.append(time=2, duration=2, value='one')
>>> ann.append(time=4, duration=2, value='two')
>>> ann.append(time=6, duration=2, value='three')
>>> ann.append(time=7, duration=2, value='four')
>>> ann.append(time=8, duration=2, value='five')
>>> ann_trim = ann.trim(5, 8, strict=False)
>>> print(ann_trim.time, ann_trim.duration)
(5, 3)
>>> ann_trim.to_dataframe()
time duration value confidence
0 5 1 two None
1 6 2 three None
2 7 1 four None
>>> ann_trim_strict = ann.trim(5, 8, strict=True)
>>> print(ann_trim_strict.time, ann_trim_strict.duration)
(5, 3)
>>> ann_trim_strict.to_dataframe()
time duration value confidence
0 6 2 three None
'''
# Check for basic start_time and end_time validity
if end_time <= start_time:
raise ParameterError(
'end_time must be greater than start_time.')
# If the annotation does not have a set duration value, we'll assume
# trimming is possible (up to the user to ensure this is valid).
if self.duration is None:
orig_time = start_time
orig_duration = end_time - start_time
warnings.warn(
"Annotation.duration is not defined, cannot check "
"for temporal intersection, assuming the annotation "
"is valid between start_time and end_time.")
else:
orig_time = self.time
orig_duration = self.duration
# Check whether there is intersection between the trim range and
# annotation: if not raise a warning and set trim_start and trim_end
# appropriately.
if start_time > (orig_time + orig_duration) or (end_time < orig_time):
warnings.warn(
'Time range defined by [start_time,end_time] does not '
'intersect with the time range spanned by this annotation, '
'the trimmed annotation will be empty.')
trim_start = self.time
trim_end = trim_start
else:
# Determine new range
trim_start = max(orig_time, start_time)
trim_end = min(orig_time + orig_duration, end_time)
# Create new annotation with same namespace/metadata
ann_trimmed = Annotation(
self.namespace,
data=None,
annotation_metadata=self.annotation_metadata,
sandbox=self.sandbox,
time=trim_start,
duration=trim_end - trim_start)
# Selectively add observations based on their start time / duration
# We do this rather than copying and directly manipulating the
# annotation' data frame (which might be faster) since this way trim is
# independent of the internal data representation.
for obs in self.data:
obs_start = obs.time
obs_end = obs_start + obs.duration
if obs_start < trim_end and obs_end > trim_start:
new_start = max(obs_start, trim_start)
new_end = min(obs_end, trim_end)
new_duration = new_end - new_start
if ((not strict) or
(new_start == obs_start and new_end == obs_end)):
ann_trimmed.append(time=new_start,
duration=new_duration,
value=obs.value,
confidence=obs.confidence)
if 'trim' not in ann_trimmed.sandbox.keys():
ann_trimmed.sandbox.update(
trim=[{'start_time': start_time, 'end_time': end_time,
'trim_start': trim_start, 'trim_end': trim_end}])
else:
ann_trimmed.sandbox.trim.append(
{'start_time': start_time, 'end_time': end_time,
'trim_start': trim_start, 'trim_end': trim_end})
return ann_trimmed |
def slice(self, start_time, end_time, strict=False):
'''
Slice the annotation and return as a new `Annotation` object.
Slicing has the same effect as trimming (see `Annotation.trim`) except
that while trimming does not modify the start time of the annotation or
the observations it contains, slicing will set the new annotation's
start time to ``max(0, trimmed_annotation.time - start_time)`` and the
start time of its observations will be set with respect to this new
reference start time.
This function documents the slice operation by adding a list of tuples
to the annotation's sandbox keyed by ``Annotation.sandbox.slice`` which
documents each slice operation with a tuple
``(start_time, end_time, slice_start, slice_end)``, where
``slice_start`` and ``slice_end`` are given by ``trim_start`` and
``trim_end`` (see `Annotation.trim`).
Since slicing is implemented using trimming, the trimming operation
will also be documented in ``Annotation.sandbox.trim`` as described in
`Annotation.trim`.
This function is useful for example when trimming an audio file,
allowing the user to trim the annotation while ensuring all time
information matches the new trimmed audio file.
Parameters
----------
start_time : float
The desired start time for slicing in seconds.
end_time
The desired end time for slicing in seconds. Must be greater than
``start_time``.
strict : bool
When ``False`` (default) observations that lie at the boundaries of
the slice (see `Annotation.trim` for details) will have their time
and/or duration adjusted such that only the part of the observation
that lies within the slice range is kept. When ``True`` such
observations are discarded and not included in the sliced
annotation.
Returns
-------
sliced_ann : Annotation
The sliced annotation.
See Also
--------
Annotation.trim
Examples
--------
>>> ann = jams.Annotation(namespace='tag_open', time=2, duration=8)
>>> ann.append(time=2, duration=2, value='one')
>>> ann.append(time=4, duration=2, value='two')
>>> ann.append(time=6, duration=2, value='three')
>>> ann.append(time=7, duration=2, value='four')
>>> ann.append(time=8, duration=2, value='five')
>>> ann_slice = ann.slice(5, 8, strict=False)
>>> print(ann_slice.time, ann_slice.duration)
(0, 3)
>>> ann_slice.to_dataframe()
time duration value confidence
0 0.0 1.0 two None
1 1.0 2.0 three None
2 2.0 1.0 four None
>>> ann_slice_strict = ann.slice(5, 8, strict=True)
>>> print(ann_slice_strict.time, ann_slice_strict.duration)
(0, 3)
>>> ann_slice_strict.to_dataframe()
time duration value confidence
0 1.0 2.0 three None
'''
# start by trimming the annotation
sliced_ann = self.trim(start_time, end_time, strict=strict)
raw_data = sliced_ann.pop_data()
# now adjust the start time of the annotation and the observations it
# contains.
for obs in raw_data:
new_time = max(0, obs.time - start_time)
# if obs.time > start_time,
# duration doesn't change
# if obs.time < start_time,
# duration shrinks by start_time - obs.time
sliced_ann.append(time=new_time,
duration=obs.duration,
value=obs.value,
confidence=obs.confidence)
ref_time = sliced_ann.time
slice_start = ref_time
slice_end = ref_time + sliced_ann.duration
if 'slice' not in sliced_ann.sandbox.keys():
sliced_ann.sandbox.update(
slice=[{'start_time': start_time, 'end_time': end_time,
'slice_start': slice_start, 'slice_end': slice_end}])
else:
sliced_ann.sandbox.slice.append(
{'start_time': start_time, 'end_time': end_time,
'slice_start': slice_start, 'slice_end': slice_end})
# Update the timing for the sliced annotation
sliced_ann.time = max(0, ref_time - start_time)
return sliced_ann |
def pop_data(self):
'''Replace this observation's data with a fresh container.
Returns
-------
annotation_data : SortedKeyList
The original annotation data container
'''
data = self.data
self.data = SortedKeyList(key=self._key)
return data |
def to_interval_values(self):
'''Extract observation data in a `mir_eval`-friendly format.
Returns
-------
intervals : np.ndarray [shape=(n, 2), dtype=float]
Start- and end-times of all valued intervals
`intervals[i, :] = [time[i], time[i] + duration[i]]`
labels : list
List view of value field.
'''
ints, vals = [], []
for obs in self.data:
ints.append([obs.time, obs.time + obs.duration])
vals.append(obs.value)
if not ints:
return np.empty(shape=(0, 2), dtype=float), []
return np.array(ints), vals |
def to_event_values(self):
'''Extract observation data in a `mir_eval`-friendly format.
Returns
-------
times : np.ndarray [shape=(n,), dtype=float]
Start-time of all observations
labels : list
List view of value field.
'''
ints, vals = [], []
for obs in self.data:
ints.append(obs.time)
vals.append(obs.value)
return np.array(ints), vals |
def to_samples(self, times, confidence=False):
'''Sample the annotation at specified times.
Parameters
----------
times : np.ndarray, non-negative, ndim=1
The times (in seconds) to sample the annotation
confidence : bool
If `True`, return both values and confidences.
If `False` (default) only return values.
Returns
-------
values : list
`values[i]` is a list of observation values for intervals
that cover `times[i]`.
confidence : list (optional)
`confidence` values corresponding to `values`
'''
times = np.asarray(times)
if times.ndim != 1 or np.any(times < 0):
raise ParameterError('times must be 1-dimensional and non-negative')
idx = np.argsort(times)
samples = times[idx]
values = [list() for _ in samples]
confidences = [list() for _ in samples]
for obs in self.data:
start = np.searchsorted(samples, obs.time)
end = np.searchsorted(samples, obs.time + obs.duration, side='right')
for i in range(start, end):
values[idx[i]].append(obs.value)
confidences[idx[i]].append(obs.confidence)
if confidence:
return values, confidences
else:
return values |
def to_html(self, max_rows=None):
'''Render this annotation list in HTML
Returns
-------
rendered : str
An HTML table containing this annotation's data.
'''
n = len(self.data)
div_id = _get_divid(self)
out = r''' <div class="panel panel-default">
<div class="panel-heading" role="tab" id="heading-{0}">
<button
type="button"
data-toggle="collapse"
data-parent="#accordion"
href="#{0}"
aria-expanded="false"
class="collapsed btn btn-info btn-block"
aria-controls="{0}">
{1:s}
<span class="badge pull-right">{2:d}</span>
</button>
</div>'''.format(div_id, self.namespace, n)
out += r''' <div id="{0}" class="panel-collapse collapse"
role="tabpanel" aria-labelledby="heading-{0}">
<div class="panel-body">'''.format(div_id)
out += r'''<div class="pull-right">
{}
</div>'''.format(self.annotation_metadata._repr_html_())
out += r'''<div class="pull-right clearfix">
{}
</div>'''.format(self.sandbox._repr_html_())
# -- Annotation content starts here
out += r'''<div><table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>time</th>
<th>duration</th>
<th>value</th>
<th>confidence</th>
</tr>
</thead>'''.format(self.namespace, n)
out += r'''<tbody>'''
if max_rows is None or n <= max_rows:
out += self._fmt_rows(0, n)
else:
out += self._fmt_rows(0, max_rows//2)
out += r'''<tr>
<th>...</th>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
</tr>'''
out += self._fmt_rows(n-max_rows//2, n)
out += r'''</tbody>'''
out += r'''</table></div>'''
out += r'''</div></div></div>'''
return out |
def _key(cls, obs):
'''Provides sorting index for Observation objects'''
if not isinstance(obs, Observation):
raise JamsError('{} must be of type jams.Observation'.format(obs))
return obs.time |
def search(self, **kwargs):
'''Filter the annotation array down to only those Annotation
objects matching the query.
Parameters
----------
kwargs : search parameters
See JObject.search
Returns
-------
results : AnnotationArray
An annotation array of the objects matching the query
See Also
--------
JObject.search
'''
results = AnnotationArray()
for annotation in self:
if annotation.search(**kwargs):
results.append(annotation)
return results |
def trim(self, start_time, end_time, strict=False):
'''
Trim every annotation contained in the annotation array using
`Annotation.trim` and return as a new `AnnotationArray`.
See `Annotation.trim` for details about trimming. This function does
not modify the annotations in the original annotation array.
Parameters
----------
start_time : float
The desired start time for the trimmed annotations in seconds.
end_time
The desired end time for trimmed annotations in seconds. Must be
greater than ``start_time``.
strict : bool
When ``False`` (default) observations that lie at the boundaries of
the trimming range (see `Annotation.trim` for details) will have
their time and/or duration adjusted such that only the part of the
observation that lies within the trim range is kept. When ``True``
such observations are discarded and not included in the trimmed
annotation.
Returns
-------
trimmed_array : AnnotationArray
An annotation array where every annotation has been trimmed.
'''
trimmed_array = AnnotationArray()
for ann in self:
trimmed_array.append(ann.trim(start_time, end_time, strict=strict))
return trimmed_array |
def slice(self, start_time, end_time, strict=False):
'''
Slice every annotation contained in the annotation array using
`Annotation.slice`
and return as a new AnnotationArray
See `Annotation.slice` for details about slicing. This function does
not modify the annotations in the original annotation array.
Parameters
----------
start_time : float
The desired start time for slicing in seconds.
end_time
The desired end time for slicing in seconds. Must be greater than
``start_time``.
strict : bool
When ``False`` (default) observations that lie at the boundaries of
the slicing range (see `Annotation.slice` for details) will have
their time and/or duration adjusted such that only the part of the
observation that lies within the trim range is kept. When ``True``
such observations are discarded and not included in the sliced
annotation.
Returns
-------
sliced_array : AnnotationArray
An annotation array where every annotation has been sliced.
'''
sliced_array = AnnotationArray()
for ann in self:
sliced_array.append(ann.slice(start_time, end_time, strict=strict))
return sliced_array |
def add(self, jam, on_conflict='fail'):
"""Add the contents of another jam to this object.
Note that, by default, this method fails if file_metadata is not
identical and raises a ValueError; either resolve this manually
(because conflicts should almost never happen), force an 'overwrite',
or tell the method to 'ignore' the metadata of the object being added.
Parameters
----------
jam: JAMS object
Object to add to this jam
on_conflict: str, default='fail'
Strategy for resolving metadata conflicts; one of
['fail', 'overwrite', or 'ignore'].
Raises
------
ParameterError
if `on_conflict` is an unknown value
JamsError
If a conflict is detected and `on_conflict='fail'`
"""
if on_conflict not in ['overwrite', 'fail', 'ignore']:
raise ParameterError("on_conflict='{}' is not in ['fail', "
"'overwrite', 'ignore'].".format(on_conflict))
if not self.file_metadata == jam.file_metadata:
if on_conflict == 'overwrite':
self.file_metadata = jam.file_metadata
elif on_conflict == 'fail':
raise JamsError("Metadata conflict! "
"Resolve manually or force-overwrite it.")
self.annotations.extend(jam.annotations)
self.sandbox.update(**jam.sandbox) |
def save(self, path_or_file, strict=True, fmt='auto'):
"""Serialize annotation as a JSON formatted stream to file.
Parameters
----------
path_or_file : str or file-like
Path to save the JAMS object on disk
OR
An open file descriptor to write into
strict : bool
Force strict schema validation
fmt : str ['auto', 'jams', 'jamz']
The output encoding format.
If `auto`, it is inferred from the file name.
If the input is an open file handle, `jams` encoding
is used.
Raises
------
SchemaError
If `strict == True` and the JAMS object fails schema
or namespace validation.
See also
--------
validate
"""
self.validate(strict=strict)
with _open(path_or_file, mode='w', fmt=fmt) as fdesc:
json.dump(self.__json__, fdesc, indent=2) |
def validate(self, strict=True):
'''Validate a JAMS object against the schema.
Parameters
----------
strict : bool
If `True`, an exception will be raised on validation failure.
If `False`, a warning will be raised on validation failure.
Returns
-------
valid : bool
`True` if the object passes schema validation.
`False` otherwise.
Raises
------
SchemaError
If `strict==True` and the JAMS object does not match the schema
See Also
--------
jsonschema.validate
'''
valid = True
try:
jsonschema.validate(self.__json_light__, schema.JAMS_SCHEMA)
for ann in self.annotations:
if isinstance(ann, Annotation):
valid &= ann.validate(strict=strict)
else:
msg = '{} is not a well-formed JAMS Annotation'.format(ann)
valid = False
if strict:
raise SchemaError(msg)
else:
warnings.warn(str(msg))
except jsonschema.ValidationError as invalid:
if strict:
raise SchemaError(str(invalid))
else:
warnings.warn(str(invalid))
valid = False
return valid |
def trim(self, start_time, end_time, strict=False):
'''
Trim all the annotations inside the jam and return as a new `JAMS`
object.
See `Annotation.trim` for details about how the annotations
are trimmed.
This operation is also documented in the jam-level sandbox
with a list keyed by ``JAMS.sandbox.trim`` containing a tuple for each
jam-level trim of the form ``(start_time, end_time)``.
This function also copies over all of the file metadata from the
original jam.
Note: trimming does not affect the duration of the jam, i.e. the value
of ``JAMS.file_metadata.duration`` will be the same for the original
and trimmed jams.
Parameters
----------
start_time : float
The desired start time for the trimmed annotations in seconds.
end_time
The desired end time for trimmed annotations in seconds. Must be
greater than ``start_time``.
strict : bool
When ``False`` (default) observations that lie at the boundaries of
the trimming range (see `Annotation.trim` for details), will have
their time and/or duration adjusted such that only the part of the
observation that lies within the trim range is kept. When ``True``
such observations are discarded and not included in the trimmed
annotation.
Returns
-------
jam_trimmed : JAMS
The trimmed jam with trimmed annotations, returned as a new JAMS
object.
'''
# Make sure duration is set in file metadata
if self.file_metadata.duration is None:
raise JamsError(
'Duration must be set (jam.file_metadata.duration) before '
'trimming can be performed.')
# Make sure start and end times are within the file start/end times
if not (0 <= start_time <= end_time <= float(
self.file_metadata.duration)):
raise ParameterError(
'start_time and end_time must be within the original file '
'duration ({:f}) and end_time cannot be smaller than '
'start_time.'.format(float(self.file_metadata.duration)))
# Create a new jams
jam_trimmed = JAMS(annotations=None,
file_metadata=self.file_metadata,
sandbox=self.sandbox)
# trim annotations
jam_trimmed.annotations = self.annotations.trim(
start_time, end_time, strict=strict)
# Document jam-level trim in top level sandbox
if 'trim' not in jam_trimmed.sandbox.keys():
jam_trimmed.sandbox.update(
trim=[{'start_time': start_time, 'end_time': end_time}])
else:
jam_trimmed.sandbox.trim.append(
{'start_time': start_time, 'end_time': end_time})
return jam_trimmed |
def slice(self, start_time, end_time, strict=False):
'''
Slice all the annotations inside the jam and return as a new `JAMS`
object.
See `Annotation.slice` for details about how the annotations
are sliced.
This operation is also documented in the jam-level sandbox
with a list keyed by ``JAMS.sandbox.slice`` containing a tuple for each
jam-level slice of the form ``(start_time, end_time)``.
Since slicing is implemented using trimming, the operation will also be
documented in ``JAMS.sandbox.trim`` as described in `JAMS.trim`.
This function also copies over all of the file metadata from the
original jam.
Note: slicing will affect the duration of the jam, i.e. the new value
of ``JAMS.file_metadata.duration`` will be ``end_time - start_time``.
Parameters
----------
start_time : float
The desired start time for slicing in seconds.
end_time
The desired end time for slicing in seconds. Must be greater than
``start_time``.
strict : bool
When ``False`` (default) observations that lie at the boundaries of
the slicing range (see `Annotation.slice` for details), will have
their time and/or duration adjusted such that only the part of the
observation that lies within the slice range is kept. When ``True``
such observations are discarded and not included in the sliced
annotation.
Returns
-------
jam_sliced: JAMS
The sliced jam with sliced annotations, returned as a new
JAMS object.
'''
# Make sure duration is set in file metadata
if self.file_metadata.duration is None:
raise JamsError(
'Duration must be set (jam.file_metadata.duration) before '
'slicing can be performed.')
# Make sure start and end times are within the file start/end times
if (start_time < 0 or
start_time > float(self.file_metadata.duration) or
end_time < start_time or
end_time > float(self.file_metadata.duration)):
raise ParameterError(
'start_time and end_time must be within the original file '
'duration ({:f}) and end_time cannot be smaller than '
'start_time.'.format(float(self.file_metadata.duration)))
# Create a new jams
jam_sliced = JAMS(annotations=None,
file_metadata=self.file_metadata,
sandbox=self.sandbox)
# trim annotations
jam_sliced.annotations = self.annotations.slice(
start_time, end_time, strict=strict)
# adjust dutation
jam_sliced.file_metadata.duration = end_time - start_time
# Document jam-level trim in top level sandbox
if 'slice' not in jam_sliced.sandbox.keys():
jam_sliced.sandbox.update(
slice=[{'start_time': start_time, 'end_time': end_time}])
else:
jam_sliced.sandbox.slice.append(
{'start_time': start_time, 'end_time': end_time})
return jam_sliced |
def pprint_jobject(obj, **kwargs):
'''Pretty-print a jobject.
Parameters
----------
obj : jams.JObject
kwargs
additional parameters to `json.dumps`
Returns
-------
string
A simplified display of `obj` contents.
'''
obj_simple = {k: v for k, v in six.iteritems(obj.__json__) if v}
string = json.dumps(obj_simple, **kwargs)
# Suppress braces and quotes
string = re.sub(r'[{}"]', '', string)
# Kill trailing commas
string = re.sub(r',\n', '\n', string)
# Kill blank lines
string = re.sub(r'^\s*$', '', string)
return string |
def intervals(annotation, **kwargs):
'''Plotting wrapper for labeled intervals'''
times, labels = annotation.to_interval_values()
return mir_eval.display.labeled_intervals(times, labels, **kwargs) |
def hierarchy(annotation, **kwargs):
'''Plotting wrapper for hierarchical segmentations'''
htimes, hlabels = hierarchy_flatten(annotation)
htimes = [np.asarray(_) for _ in htimes]
return mir_eval.display.hierarchy(htimes, hlabels, **kwargs) |
def pitch_contour(annotation, **kwargs):
'''Plotting wrapper for pitch contours'''
ax = kwargs.pop('ax', None)
# If the annotation is empty, we need to construct a new axes
ax = mir_eval.display.__get_axes(ax=ax)[0]
times, values = annotation.to_interval_values()
indices = np.unique([v['index'] for v in values])
for idx in indices:
rows = [i for (i, v) in enumerate(values) if v['index'] == idx]
freqs = np.asarray([values[r]['frequency'] for r in rows])
unvoiced = ~np.asarray([values[r]['voiced'] for r in rows])
freqs[unvoiced] *= -1
ax = mir_eval.display.pitch(times[rows, 0], freqs, unvoiced=True,
ax=ax,
**kwargs)
return ax |
def event(annotation, **kwargs):
'''Plotting wrapper for events'''
times, values = annotation.to_interval_values()
if any(values):
labels = values
else:
labels = None
return mir_eval.display.events(times, labels=labels, **kwargs) |
def beat_position(annotation, **kwargs):
'''Plotting wrapper for beat-position data'''
times, values = annotation.to_interval_values()
labels = [_['position'] for _ in values]
# TODO: plot time signature, measure number
return mir_eval.display.events(times, labels=labels, **kwargs) |
def piano_roll(annotation, **kwargs):
'''Plotting wrapper for piano rolls'''
times, midi = annotation.to_interval_values()
return mir_eval.display.piano_roll(times, midi=midi, **kwargs) |
def display(annotation, meta=True, **kwargs):
'''Visualize a jams annotation through mir_eval
Parameters
----------
annotation : jams.Annotation
The annotation to display
meta : bool
If `True`, include annotation metadata in the figure
kwargs
Additional keyword arguments to mir_eval.display functions
Returns
-------
ax
Axis handles for the new display
Raises
------
NamespaceError
If the annotation cannot be visualized
'''
for namespace, func in six.iteritems(VIZ_MAPPING):
try:
ann = coerce_annotation(annotation, namespace)
axes = func(ann, **kwargs)
# Title should correspond to original namespace, not the coerced version
axes.set_title(annotation.namespace)
if meta:
description = pprint_jobject(annotation.annotation_metadata, indent=2)
anchored_box = AnchoredText(description.strip('\n'),
loc=2,
frameon=True,
bbox_to_anchor=(1.02, 1.0),
bbox_transform=axes.transAxes,
borderpad=0.0)
axes.add_artist(anchored_box)
axes.figure.subplots_adjust(right=0.8)
return axes
except NamespaceError:
pass
raise NamespaceError('Unable to visualize annotation of namespace="{:s}"'
.format(annotation.namespace)) |
def display_multi(annotations, fig_kw=None, meta=True, **kwargs):
'''Display multiple annotations with shared axes
Parameters
----------
annotations : jams.AnnotationArray
A collection of annotations to display
fig_kw : dict
Keyword arguments to `plt.figure`
meta : bool
If `True`, display annotation metadata for each annotation
kwargs
Additional keyword arguments to the `mir_eval.display` routines
Returns
-------
fig
The created figure
axs
List of subplot axes corresponding to each displayed annotation
'''
if fig_kw is None:
fig_kw = dict()
fig_kw.setdefault('sharex', True)
fig_kw.setdefault('squeeze', True)
# Filter down to coercable annotations first
display_annotations = []
for ann in annotations:
for namespace in VIZ_MAPPING:
if can_convert(ann, namespace):
display_annotations.append(ann)
break
# If there are no displayable annotations, fail here
if not len(display_annotations):
raise ParameterError('No displayable annotations found')
fig, axs = plt.subplots(nrows=len(display_annotations), ncols=1, **fig_kw)
# MPL is stupid when making singleton subplots.
# We catch this and make it always iterable.
if len(display_annotations) == 1:
axs = [axs]
for ann, ax in zip(display_annotations, axs):
kwargs['ax'] = ax
display(ann, meta=meta, **kwargs)
return fig, axs |
def mkclick(freq, sr=22050, duration=0.1):
'''Generate a click sample.
This replicates functionality from mir_eval.sonify.clicks,
but exposes the target frequency and duration.
'''
times = np.arange(int(sr * duration))
click = np.sin(2 * np.pi * times * freq / float(sr))
click *= np.exp(- times / (1e-2 * sr))
return click |
def clicks(annotation, sr=22050, length=None, **kwargs):
'''Sonify events with clicks.
This uses mir_eval.sonify.clicks, and is appropriate for instantaneous
events such as beats or segment boundaries.
'''
interval, _ = annotation.to_interval_values()
return filter_kwargs(mir_eval.sonify.clicks, interval[:, 0],
fs=sr, length=length, **kwargs) |
def downbeat(annotation, sr=22050, length=None, **kwargs):
'''Sonify beats and downbeats together.
'''
beat_click = mkclick(440 * 2, sr=sr)
downbeat_click = mkclick(440 * 3, sr=sr)
intervals, values = annotation.to_interval_values()
beats, downbeats = [], []
for time, value in zip(intervals[:, 0], values):
if value['position'] == 1:
downbeats.append(time)
else:
beats.append(time)
if length is None:
length = int(sr * np.max(intervals)) + len(beat_click) + 1
y = filter_kwargs(mir_eval.sonify.clicks,
np.asarray(beats),
fs=sr, length=length, click=beat_click)
y += filter_kwargs(mir_eval.sonify.clicks,
np.asarray(downbeats),
fs=sr, length=length, click=downbeat_click)
return y |
def multi_segment(annotation, sr=22050, length=None, **kwargs):
'''Sonify multi-level segmentations'''
# Pentatonic scale, because why not
PENT = [1, 32./27, 4./3, 3./2, 16./9]
DURATION = 0.1
h_int, _ = hierarchy_flatten(annotation)
if length is None:
length = int(sr * (max(np.max(_) for _ in h_int) + 1. / DURATION) + 1)
y = 0.0
for ints, (oc, scale) in zip(h_int, product(range(3, 3 + len(h_int)),
PENT)):
click = mkclick(440.0 * scale * oc, sr=sr, duration=DURATION)
y = y + filter_kwargs(mir_eval.sonify.clicks,
np.unique(ints),
fs=sr, length=length,
click=click)
return y |
def chord(annotation, sr=22050, length=None, **kwargs):
'''Sonify chords
This uses mir_eval.sonify.chords.
'''
intervals, chords = annotation.to_interval_values()
return filter_kwargs(mir_eval.sonify.chords,
chords, intervals,
fs=sr, length=length,
**kwargs) |
def pitch_contour(annotation, sr=22050, length=None, **kwargs):
'''Sonify pitch contours.
This uses mir_eval.sonify.pitch_contour, and should only be applied
to pitch annotations using the pitch_contour namespace.
Each contour is sonified independently, and the resulting waveforms
are summed together.
'''
# Map contours to lists of observations
times = defaultdict(list)
freqs = defaultdict(list)
for obs in annotation:
times[obs.value['index']].append(obs.time)
freqs[obs.value['index']].append(obs.value['frequency'] *
(-1)**(~obs.value['voiced']))
y_out = 0.0
for ix in times:
y_out = y_out + filter_kwargs(mir_eval.sonify.pitch_contour,
np.asarray(times[ix]),
np.asarray(freqs[ix]),
fs=sr, length=length,
**kwargs)
if length is None:
length = len(y_out)
return y_out |
def piano_roll(annotation, sr=22050, length=None, **kwargs):
'''Sonify a piano-roll
This uses mir_eval.sonify.time_frequency, and is appropriate
for sparse transcription data, e.g., annotations in the `note_midi`
namespace.
'''
intervals, pitches = annotation.to_interval_values()
# Construct the pitchogram
pitch_map = {f: idx for idx, f in enumerate(np.unique(pitches))}
gram = np.zeros((len(pitch_map), len(intervals)))
for col, f in enumerate(pitches):
gram[pitch_map[f], col] = 1
return filter_kwargs(mir_eval.sonify.time_frequency,
gram, pitches, intervals,
sr, length=length, **kwargs) |
def sonify(annotation, sr=22050, duration=None, **kwargs):
'''Sonify a jams annotation through mir_eval
Parameters
----------
annotation : jams.Annotation
The annotation to sonify
sr = : positive number
The sampling rate of the output waveform
duration : float (optional)
Optional length (in seconds) of the output waveform
kwargs
Additional keyword arguments to mir_eval.sonify functions
Returns
-------
y_sonified : np.ndarray
The waveform of the sonified annotation
Raises
------
NamespaceError
If the annotation has an un-sonifiable namespace
'''
length = None
if duration is None:
duration = annotation.duration
if duration is not None:
length = int(duration * sr)
# If the annotation can be directly sonified, try that first
if annotation.namespace in SONIFY_MAPPING:
ann = coerce_annotation(annotation, annotation.namespace)
return SONIFY_MAPPING[annotation.namespace](ann,
sr=sr,
length=length,
**kwargs)
for namespace, func in six.iteritems(SONIFY_MAPPING):
try:
ann = coerce_annotation(annotation, namespace)
return func(ann, sr=sr, length=length, **kwargs)
except NamespaceError:
pass
raise NamespaceError('Unable to sonify annotation of namespace="{:s}"'
.format(annotation.namespace)) |
def validate(schema_file=None, jams_files=None):
'''Validate a jams file against a schema'''
schema = load_json(schema_file)
for jams_file in jams_files:
try:
jams = load_json(jams_file)
jsonschema.validate(jams, schema)
print '{:s} was successfully validated'.format(jams_file)
except jsonschema.ValidationError as exc:
print '{:s} was NOT successfully validated'.format(jams_file)
print exc |
def make_stream_features(self, stream, features):
"""Add SASL features to the <features/> element of the stream.
[receving entity only]
:returns: update <features/> element."""
mechs = self.settings['sasl_mechanisms']
if mechs and not stream.authenticated:
sub = ElementTree.SubElement(features, MECHANISMS_TAG)
for mech in mechs:
if mech in sasl.SERVER_MECHANISMS:
ElementTree.SubElement(sub, MECHANISM_TAG).text = mech
return features |
def handle_stream_features(self, stream, features):
"""Process incoming <stream:features/> element.
[initiating entity only]
"""
element = features.find(MECHANISMS_TAG)
self.peer_sasl_mechanisms = []
if element is None:
return None
for sub in element:
if sub.tag != MECHANISM_TAG:
continue
self.peer_sasl_mechanisms.append(sub.text)
if stream.authenticated or not self.peer_sasl_mechanisms:
return StreamFeatureNotHandled("SASL", mandatory = True)
username = self.settings.get("username")
if not username:
# TODO: other rules for s2s
if stream.me.local:
username = stream.me.local
else:
username = None
self._sasl_authenticate(stream, username, self.settings.get("authzid"))
return StreamFeatureHandled("SASL", mandatory = True) |
def process_sasl_auth(self, stream, element):
"""Process incoming <sasl:auth/> element.
[receiving entity only]
"""
if self.authenticator:
logger.debug("Authentication already started")
return False
password_db = self.settings["password_database"]
mechanism = element.get("mechanism")
if not mechanism:
stream.send_stream_error("bad-format")
raise FatalStreamError("<sasl:auth/> with no mechanism")
stream.auth_method_used = mechanism
self.authenticator = sasl.server_authenticator_factory(mechanism,
password_db)
content = element.text.encode("us-ascii")
ret = self.authenticator.start(stream.auth_properties,
a2b_base64(content))
if isinstance(ret, sasl.Success):
element = ElementTree.Element(SUCCESS_TAG)
element.text = ret.encode()
elif isinstance(ret, sasl.Challenge):
element = ElementTree.Element(CHALLENGE_TAG)
element.text = ret.encode()
else:
element = ElementTree.Element(FAILURE_TAG)
ElementTree.SubElement(element, SASL_QNP + ret.reason)
stream.write_element(element)
if isinstance(ret, sasl.Success):
self._handle_auth_success(stream, ret)
elif isinstance(ret, sasl.Failure):
raise SASLAuthenticationFailed("SASL authentication failed: {0}"
.format(ret.reason))
return True |
def _handle_auth_success(self, stream, success):
"""Handle successful authentication.
Send <success/> and mark the stream peer authenticated.
[receiver only]
"""
if not self._check_authorization(success.properties, stream):
element = ElementTree.Element(FAILURE_TAG)
ElementTree.SubElement(element, SASL_QNP + "invalid-authzid")
return True
authzid = success.properties.get("authzid")
if authzid:
peer = JID(success.authzid)
elif "username" in success.properties:
peer = JID(success.properties["username"], stream.me.domain)
else:
# anonymous
peer = None
stream.set_peer_authenticated(peer, True) |
def _process_sasl_challenge(self, stream, element):
"""Process incoming <sasl:challenge/> element.
[initiating entity only]
"""
if not self.authenticator:
logger.debug("Unexpected SASL challenge")
return False
content = element.text.encode("us-ascii")
ret = self.authenticator.challenge(a2b_base64(content))
if isinstance(ret, sasl.Response):
element = ElementTree.Element(RESPONSE_TAG)
element.text = ret.encode()
else:
element = ElementTree.Element(ABORT_TAG)
stream.write_element(element)
if isinstance(ret, sasl.Failure):
stream.disconnect()
raise SASLAuthenticationFailed("SASL authentication failed")
return True |
def _process_sasl_response(self, stream, element):
"""Process incoming <sasl:response/> element.
[receiving entity only]
"""
if not self.authenticator:
logger.debug("Unexpected SASL response")
return False
content = element.text.encode("us-ascii")
ret = self.authenticator.response(a2b_base64(content))
if isinstance(ret, sasl.Success):
element = ElementTree.Element(SUCCESS_TAG)
element.text = ret.encode()
elif isinstance(ret, sasl.Challenge):
element = ElementTree.Element(CHALLENGE_TAG)
element.text = ret.encode()
else:
element = ElementTree.Element(FAILURE_TAG)
ElementTree.SubElement(element, SASL_QNP + ret.reason)
stream.write_element(element)
if isinstance(ret, sasl.Success):
self._handle_auth_success(stream, ret)
elif isinstance(ret, sasl.Failure):
raise SASLAuthenticationFailed("SASL authentication failed: {0!r}"
.format(ret.reson))
return True |
def _check_authorization(self, properties, stream):
"""Check authorization id and other properties returned by the
authentication mechanism.
[receiving entity only]
Allow only no authzid or authzid equal to current username@domain
FIXME: other rules in s2s
:Parameters:
- `properties`: data obtained during authentication
:Types:
- `properties`: mapping
:return: `True` if user is authorized to use a provided authzid
:returntype: `bool`
"""
authzid = properties.get("authzid")
if not authzid:
return True
try:
jid = JID(authzid)
except ValueError:
return False
if "username" not in properties:
result = False
elif jid.local != properties["username"]:
result = False
elif jid.domain != stream.me.domain:
result = False
elif jid.resource:
result = False
else:
result = True
return result |
def _process_sasl_success(self, stream, element):
"""Process incoming <sasl:success/> element.
[initiating entity only]
"""
if not self.authenticator:
logger.debug("Unexpected SASL response")
return False
content = element.text
if content:
data = a2b_base64(content.encode("us-ascii"))
else:
data = None
ret = self.authenticator.finish(data)
if isinstance(ret, sasl.Success):
logger.debug("SASL authentication succeeded")
authzid = ret.properties.get("authzid")
if authzid:
me = JID(authzid)
elif "username" in ret.properties:
# FIXME: other rules for server
me = JID(ret.properties["username"], stream.peer.domain)
else:
me = None
stream.set_authenticated(me, True)
else:
logger.debug("SASL authentication failed")
raise SASLAuthenticationFailed("Additional success data"
" procesing failed")
return True |
def _process_sasl_failure(self, stream, element):
"""Process incoming <sasl:failure/> element.
[initiating entity only]
"""
_unused = stream
if not self.authenticator:
logger.debug("Unexpected SASL response")
return False
logger.debug("SASL authentication failed: {0!r}".format(
element_to_unicode(element)))
raise SASLAuthenticationFailed("SASL authentication failed") |
def _process_sasl_abort(self, stream, element):
"""Process incoming <sasl:abort/> element.
[receiving entity only]"""
_unused, _unused = stream, element
if not self.authenticator:
logger.debug("Unexpected SASL response")
return False
self.authenticator = None
logger.debug("SASL authentication aborted")
return True |
def _sasl_authenticate(self, stream, username, authzid):
"""Start SASL authentication process.
[initiating entity only]
:Parameters:
- `username`: user name.
- `authzid`: authorization ID.
- `mechanism`: SASL mechanism to use."""
if not stream.initiator:
raise SASLAuthenticationFailed("Only initiating entity start"
" SASL authentication")
if stream.features is None or not self.peer_sasl_mechanisms:
raise SASLNotAvailable("Peer doesn't support SASL")
props = dict(stream.auth_properties)
if not props.get("service-domain") and (
stream.peer and stream.peer.domain):
props["service-domain"] = stream.peer.domain
if username is not None:
props["username"] = username
if authzid is not None:
props["authzid"] = authzid
if "password" in self.settings:
props["password"] = self.settings["password"]
props["available_mechanisms"] = self.peer_sasl_mechanisms
enabled = sasl.filter_mechanism_list(
self.settings['sasl_mechanisms'], props,
self.settings['insecure_auth'])
if not enabled:
raise SASLNotAvailable(
"None of SASL mechanism selected can be used")
props["enabled_mechanisms"] = enabled
mechanism = None
for mech in enabled:
if mech in self.peer_sasl_mechanisms:
mechanism = mech
break
if not mechanism:
raise SASLMechanismNotAvailable("Peer doesn't support any of"
" our SASL mechanisms")
logger.debug("Our mechanism: {0!r}".format(mechanism))
stream.auth_method_used = mechanism
self.authenticator = sasl.client_authenticator_factory(mechanism)
initial_response = self.authenticator.start(props)
if not isinstance(initial_response, sasl.Response):
raise SASLAuthenticationFailed("SASL initiation failed")
element = ElementTree.Element(AUTH_TAG)
element.set("mechanism", mechanism)
if initial_response.data:
if initial_response.encode:
element.text = initial_response.encode()
else:
element.text = initial_response.data
stream.write_element(element) |
def timeout_handler(interval, recurring = None):
"""Method decorator generator for decorating event handlers.
To be used on `TimeoutHandler` subclass methods only.
:Parameters:
- `interval`: interval (in seconds) before the method will be called.
- `recurring`: When `True`, the handler will be called each `interval`
seconds, when `False` it will be called only once. If `True`,
then the handler should return the next interval or `None` if it
should not be called again.
:Types:
- `interval`: `float`
- `recurring`: `bool`
"""
def decorator(func):
"""The decorator"""
func._pyxmpp_timeout = interval
func._pyxmpp_recurring = recurring
return func
return decorator |
def delayed_call(self, delay, function):
"""Schedule function to be called from the main loop after `delay`
seconds.
:Parameters:
- `delay`: seconds to wait
:Types:
- `delay`: `float`
"""
main_loop = self
handler = []
class DelayedCallHandler(TimeoutHandler):
"""Wrapper timeout handler class for the delayed call."""
# pylint: disable=R0903
@timeout_handler(delay, False)
def callback(self):
"""Wrapper timeout handler method for the delayed call."""
try:
function()
finally:
main_loop.remove_handler(handler[0])
handler.append(DelayedCallHandler())
self.add_handler(handler[0]) |
def from_xml(cls, element):
"""Make a RosterItem from an XML element.
:Parameters:
- `element`: the XML element
:Types:
- `element`: :etree:`ElementTree.Element`
:return: a freshly created roster item
:returntype: `cls`
"""
if element.tag != ITEM_TAG:
raise ValueError("{0!r} is not a roster item".format(element))
try:
jid = JID(element.get("jid"))
except ValueError:
raise BadRequestProtocolError(u"Bad item JID")
subscription = element.get("subscription")
ask = element.get("ask")
name = element.get("name")
duplicate_group = False
groups = set()
for child in element:
if child.tag != GROUP_TAG:
continue
group = child.text
if group is None:
group = u""
if group in groups:
duplicate_group = True
else:
groups.add(group)
approved = element.get("approved")
if approved == "true":
approved = True
elif approved in ("false", None):
approved = False
else:
logger.debug("RosterItem.from_xml: got unknown 'approved':"
" {0!r}, changing to False".format(approved))
approved = False
result = cls(jid, name, groups, subscription, ask, approved)
result._duplicate_group = duplicate_group
return result |
def as_xml(self, parent = None):
"""Make an XML element from self.
:Parameters:
- `parent`: Parent element
:Types:
- `parent`: :etree:`ElementTree.Element`
"""
if parent is not None:
element = ElementTree.SubElement(parent, ITEM_TAG)
else:
element = ElementTree.Element(ITEM_TAG)
element.set("jid", unicode(self.jid))
if self.name is not None:
element.set("name", self.name)
if self.subscription is not None:
element.set("subscription", self.subscription)
if self.ask:
element.set("ask", self.ask)
if self.approved:
element.set("approved", "true")
for group in self.groups:
ElementTree.SubElement(element, GROUP_TAG).text = group
return element |
def _verify(self, valid_subscriptions, fix):
"""Check if `self` is valid roster item.
Valid item must have proper `subscription` and valid value for 'ask'.
:Parameters:
- `valid_subscriptions`: sequence of valid subscription values
- `fix`: if `True` than replace invalid 'subscription' and 'ask'
values with the defaults
:Types:
- `fix`: `bool`
:Raise: `ValueError` if the item is invalid.
"""
if self.subscription not in valid_subscriptions:
if fix:
logger.debug("RosterItem.from_xml: got unknown 'subscription':"
" {0!r}, changing to None".format(self.subscription))
self.subscription = None
else:
raise ValueError("Bad 'subscription'")
if self.ask not in (None, u"subscribe"):
if fix:
logger.debug("RosterItem.from_xml: got unknown 'ask':"
" {0!r}, changing to None".format(self.ask))
self.ask = None
else:
raise ValueError("Bad 'ask'") |
def verify_roster_result(self, fix = False):
"""Check if `self` is valid roster item.
Valid item must have proper `subscription` value other than 'remove'
and valid value for 'ask'.
:Parameters:
- `fix`: if `True` than replace invalid 'subscription' and 'ask'
values with the defaults
:Types:
- `fix`: `bool`
:Raise: `ValueError` if the item is invalid.
"""
self._verify((None, u"from", u"to", u"both"), fix) |
def verify_roster_push(self, fix = False):
"""Check if `self` is valid roster push item.
Valid item must have proper `subscription` value other and valid value
for 'ask'.
:Parameters:
- `fix`: if `True` than replace invalid 'subscription' and 'ask'
values with the defaults
:Types:
- `fix`: `bool`
:Raise: `ValueError` if the item is invalid.
"""
self._verify((None, u"from", u"to", u"both", u"remove"), fix) |
def verify_roster_set(self, fix = False, settings = None):
"""Check if `self` is valid roster set item.
For use on server to validate incoming roster sets.
Valid item must have proper `subscription` value other and valid value
for 'ask'. The lengths of name and group names must fit the configured
limits.
:Parameters:
- `fix`: if `True` than replace invalid 'subscription' and 'ask'
values with right defaults
- `settings`: settings object providing the name limits
:Types:
- `fix`: `bool`
- `settings`: `XMPPSettings`
:Raise: `BadRequestProtocolError` if the item is invalid.
"""
# pylint: disable=R0912
try:
self._verify((None, u"remove"), fix)
except ValueError, err:
raise BadRequestProtocolError(unicode(err))
if self.ask:
if fix:
self.ask = None
else:
raise BadRequestProtocolError("'ask' in roster set")
if self.approved:
if fix:
self.approved = False
else:
raise BadRequestProtocolError("'approved' in roster set")
if settings is None:
settings = XMPPSettings()
name_length_limit = settings["roster_name_length_limit"]
if self.name and len(self.name) > name_length_limit:
raise NotAcceptableProtocolError(u"Roster item name too long")
group_length_limit = settings["roster_group_name_length_limit"]
for group in self.groups:
if not group:
raise NotAcceptableProtocolError(u"Roster group name empty")
if len(group) > group_length_limit:
raise NotAcceptableProtocolError(u"Roster group name too long")
if self._duplicate_group:
raise BadRequestProtocolError(u"Item group duplicated") |
def groups(self):
"""Set of groups defined in the roster.
:Return: the groups
:ReturnType: `set` of `unicode`
"""
groups = set()
for item in self._items:
groups |= item.groups
return groups |
def get_items_by_name(self, name, case_sensitive = True):
"""
Return a list of items with given name.
:Parameters:
- `name`: name to look-up
- `case_sensitive`: if `False` the matching will be case
insensitive.
:Types:
- `name`: `unicode`
- `case_sensitive`: `bool`
:Returntype: `list` of `RosterItem`
"""
if not case_sensitive and name:
name = name.lower()
result = []
for item in self._items:
if item.name == name:
result.append(item)
elif item.name is None:
continue
elif not case_sensitive and item.name.lower() == name:
result.append(item)
return result |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.