repo
stringlengths 7
55
| path
stringlengths 4
127
| func_name
stringlengths 1
88
| original_string
stringlengths 75
19.8k
| language
stringclasses 1
value | code
stringlengths 75
19.8k
| code_tokens
list | docstring
stringlengths 3
17.3k
| docstring_tokens
list | sha
stringlengths 40
40
| url
stringlengths 87
242
| partition
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|
craffel/mir_eval
|
mir_eval/util.py
|
_fast_hit_windows
|
def _fast_hit_windows(ref, est, window):
'''Fast calculation of windowed hits for time events.
Given two lists of event times ``ref`` and ``est``, and a
tolerance window, computes a list of pairings
``(i, j)`` where ``|ref[i] - est[j]| <= window``.
This is equivalent to, but more efficient than the following:
>>> hit_ref, hit_est = np.where(np.abs(np.subtract.outer(ref, est))
... <= window)
Parameters
----------
ref : np.ndarray, shape=(n,)
Array of reference values
est : np.ndarray, shape=(m,)
Array of estimated values
window : float >= 0
Size of the tolerance window
Returns
-------
hit_ref : np.ndarray
hit_est : np.ndarray
indices such that ``|hit_ref[i] - hit_est[i]| <= window``
'''
ref = np.asarray(ref)
est = np.asarray(est)
ref_idx = np.argsort(ref)
ref_sorted = ref[ref_idx]
left_idx = np.searchsorted(ref_sorted, est - window, side='left')
right_idx = np.searchsorted(ref_sorted, est + window, side='right')
hit_ref, hit_est = [], []
for j, (start, end) in enumerate(zip(left_idx, right_idx)):
hit_ref.extend(ref_idx[start:end])
hit_est.extend([j] * (end - start))
return hit_ref, hit_est
|
python
|
def _fast_hit_windows(ref, est, window):
'''Fast calculation of windowed hits for time events.
Given two lists of event times ``ref`` and ``est``, and a
tolerance window, computes a list of pairings
``(i, j)`` where ``|ref[i] - est[j]| <= window``.
This is equivalent to, but more efficient than the following:
>>> hit_ref, hit_est = np.where(np.abs(np.subtract.outer(ref, est))
... <= window)
Parameters
----------
ref : np.ndarray, shape=(n,)
Array of reference values
est : np.ndarray, shape=(m,)
Array of estimated values
window : float >= 0
Size of the tolerance window
Returns
-------
hit_ref : np.ndarray
hit_est : np.ndarray
indices such that ``|hit_ref[i] - hit_est[i]| <= window``
'''
ref = np.asarray(ref)
est = np.asarray(est)
ref_idx = np.argsort(ref)
ref_sorted = ref[ref_idx]
left_idx = np.searchsorted(ref_sorted, est - window, side='left')
right_idx = np.searchsorted(ref_sorted, est + window, side='right')
hit_ref, hit_est = [], []
for j, (start, end) in enumerate(zip(left_idx, right_idx)):
hit_ref.extend(ref_idx[start:end])
hit_est.extend([j] * (end - start))
return hit_ref, hit_est
|
[
"def",
"_fast_hit_windows",
"(",
"ref",
",",
"est",
",",
"window",
")",
":",
"ref",
"=",
"np",
".",
"asarray",
"(",
"ref",
")",
"est",
"=",
"np",
".",
"asarray",
"(",
"est",
")",
"ref_idx",
"=",
"np",
".",
"argsort",
"(",
"ref",
")",
"ref_sorted",
"=",
"ref",
"[",
"ref_idx",
"]",
"left_idx",
"=",
"np",
".",
"searchsorted",
"(",
"ref_sorted",
",",
"est",
"-",
"window",
",",
"side",
"=",
"'left'",
")",
"right_idx",
"=",
"np",
".",
"searchsorted",
"(",
"ref_sorted",
",",
"est",
"+",
"window",
",",
"side",
"=",
"'right'",
")",
"hit_ref",
",",
"hit_est",
"=",
"[",
"]",
",",
"[",
"]",
"for",
"j",
",",
"(",
"start",
",",
"end",
")",
"in",
"enumerate",
"(",
"zip",
"(",
"left_idx",
",",
"right_idx",
")",
")",
":",
"hit_ref",
".",
"extend",
"(",
"ref_idx",
"[",
"start",
":",
"end",
"]",
")",
"hit_est",
".",
"extend",
"(",
"[",
"j",
"]",
"*",
"(",
"end",
"-",
"start",
")",
")",
"return",
"hit_ref",
",",
"hit_est"
] |
Fast calculation of windowed hits for time events.
Given two lists of event times ``ref`` and ``est``, and a
tolerance window, computes a list of pairings
``(i, j)`` where ``|ref[i] - est[j]| <= window``.
This is equivalent to, but more efficient than the following:
>>> hit_ref, hit_est = np.where(np.abs(np.subtract.outer(ref, est))
... <= window)
Parameters
----------
ref : np.ndarray, shape=(n,)
Array of reference values
est : np.ndarray, shape=(m,)
Array of estimated values
window : float >= 0
Size of the tolerance window
Returns
-------
hit_ref : np.ndarray
hit_est : np.ndarray
indices such that ``|hit_ref[i] - hit_est[i]| <= window``
|
[
"Fast",
"calculation",
"of",
"windowed",
"hits",
"for",
"time",
"events",
"."
] |
f41c8dafaea04b411252a516d1965af43c7d531b
|
https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/util.py#L713-L755
|
train
|
craffel/mir_eval
|
mir_eval/util.py
|
validate_events
|
def validate_events(events, max_time=30000.):
"""Checks that a 1-d event location ndarray is well-formed, and raises
errors if not.
Parameters
----------
events : np.ndarray, shape=(n,)
Array of event times
max_time : float
If an event is found above this time, a ValueError will be raised.
(Default value = 30000.)
"""
# Make sure no event times are huge
if (events > max_time).any():
raise ValueError('An event at time {} was found which is greater than '
'the maximum allowable time of max_time = {} (did you'
' supply event times in '
'seconds?)'.format(events.max(), max_time))
# Make sure event locations are 1-d np ndarrays
if events.ndim != 1:
raise ValueError('Event times should be 1-d numpy ndarray, '
'but shape={}'.format(events.shape))
# Make sure event times are increasing
if (np.diff(events) < 0).any():
raise ValueError('Events should be in increasing order.')
|
python
|
def validate_events(events, max_time=30000.):
"""Checks that a 1-d event location ndarray is well-formed, and raises
errors if not.
Parameters
----------
events : np.ndarray, shape=(n,)
Array of event times
max_time : float
If an event is found above this time, a ValueError will be raised.
(Default value = 30000.)
"""
# Make sure no event times are huge
if (events > max_time).any():
raise ValueError('An event at time {} was found which is greater than '
'the maximum allowable time of max_time = {} (did you'
' supply event times in '
'seconds?)'.format(events.max(), max_time))
# Make sure event locations are 1-d np ndarrays
if events.ndim != 1:
raise ValueError('Event times should be 1-d numpy ndarray, '
'but shape={}'.format(events.shape))
# Make sure event times are increasing
if (np.diff(events) < 0).any():
raise ValueError('Events should be in increasing order.')
|
[
"def",
"validate_events",
"(",
"events",
",",
"max_time",
"=",
"30000.",
")",
":",
"# Make sure no event times are huge",
"if",
"(",
"events",
">",
"max_time",
")",
".",
"any",
"(",
")",
":",
"raise",
"ValueError",
"(",
"'An event at time {} was found which is greater than '",
"'the maximum allowable time of max_time = {} (did you'",
"' supply event times in '",
"'seconds?)'",
".",
"format",
"(",
"events",
".",
"max",
"(",
")",
",",
"max_time",
")",
")",
"# Make sure event locations are 1-d np ndarrays",
"if",
"events",
".",
"ndim",
"!=",
"1",
":",
"raise",
"ValueError",
"(",
"'Event times should be 1-d numpy ndarray, '",
"'but shape={}'",
".",
"format",
"(",
"events",
".",
"shape",
")",
")",
"# Make sure event times are increasing",
"if",
"(",
"np",
".",
"diff",
"(",
"events",
")",
"<",
"0",
")",
".",
"any",
"(",
")",
":",
"raise",
"ValueError",
"(",
"'Events should be in increasing order.'",
")"
] |
Checks that a 1-d event location ndarray is well-formed, and raises
errors if not.
Parameters
----------
events : np.ndarray, shape=(n,)
Array of event times
max_time : float
If an event is found above this time, a ValueError will be raised.
(Default value = 30000.)
|
[
"Checks",
"that",
"a",
"1",
"-",
"d",
"event",
"location",
"ndarray",
"is",
"well",
"-",
"formed",
"and",
"raises",
"errors",
"if",
"not",
"."
] |
f41c8dafaea04b411252a516d1965af43c7d531b
|
https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/util.py#L783-L808
|
train
|
craffel/mir_eval
|
mir_eval/util.py
|
validate_frequencies
|
def validate_frequencies(frequencies, max_freq, min_freq,
allow_negatives=False):
"""Checks that a 1-d frequency ndarray is well-formed, and raises
errors if not.
Parameters
----------
frequencies : np.ndarray, shape=(n,)
Array of frequency values
max_freq : float
If a frequency is found above this pitch, a ValueError will be raised.
(Default value = 5000.)
min_freq : float
If a frequency is found below this pitch, a ValueError will be raised.
(Default value = 20.)
allow_negatives : bool
Whether or not to allow negative frequency values.
"""
# If flag is true, map frequencies to their absolute value.
if allow_negatives:
frequencies = np.abs(frequencies)
# Make sure no frequency values are huge
if (np.abs(frequencies) > max_freq).any():
raise ValueError('A frequency of {} was found which is greater than '
'the maximum allowable value of max_freq = {} (did '
'you supply frequency values in '
'Hz?)'.format(frequencies.max(), max_freq))
# Make sure no frequency values are tiny
if (np.abs(frequencies) < min_freq).any():
raise ValueError('A frequency of {} was found which is less than the '
'minimum allowable value of min_freq = {} (did you '
'supply frequency values in '
'Hz?)'.format(frequencies.min(), min_freq))
# Make sure frequency values are 1-d np ndarrays
if frequencies.ndim != 1:
raise ValueError('Frequencies should be 1-d numpy ndarray, '
'but shape={}'.format(frequencies.shape))
|
python
|
def validate_frequencies(frequencies, max_freq, min_freq,
allow_negatives=False):
"""Checks that a 1-d frequency ndarray is well-formed, and raises
errors if not.
Parameters
----------
frequencies : np.ndarray, shape=(n,)
Array of frequency values
max_freq : float
If a frequency is found above this pitch, a ValueError will be raised.
(Default value = 5000.)
min_freq : float
If a frequency is found below this pitch, a ValueError will be raised.
(Default value = 20.)
allow_negatives : bool
Whether or not to allow negative frequency values.
"""
# If flag is true, map frequencies to their absolute value.
if allow_negatives:
frequencies = np.abs(frequencies)
# Make sure no frequency values are huge
if (np.abs(frequencies) > max_freq).any():
raise ValueError('A frequency of {} was found which is greater than '
'the maximum allowable value of max_freq = {} (did '
'you supply frequency values in '
'Hz?)'.format(frequencies.max(), max_freq))
# Make sure no frequency values are tiny
if (np.abs(frequencies) < min_freq).any():
raise ValueError('A frequency of {} was found which is less than the '
'minimum allowable value of min_freq = {} (did you '
'supply frequency values in '
'Hz?)'.format(frequencies.min(), min_freq))
# Make sure frequency values are 1-d np ndarrays
if frequencies.ndim != 1:
raise ValueError('Frequencies should be 1-d numpy ndarray, '
'but shape={}'.format(frequencies.shape))
|
[
"def",
"validate_frequencies",
"(",
"frequencies",
",",
"max_freq",
",",
"min_freq",
",",
"allow_negatives",
"=",
"False",
")",
":",
"# If flag is true, map frequencies to their absolute value.",
"if",
"allow_negatives",
":",
"frequencies",
"=",
"np",
".",
"abs",
"(",
"frequencies",
")",
"# Make sure no frequency values are huge",
"if",
"(",
"np",
".",
"abs",
"(",
"frequencies",
")",
">",
"max_freq",
")",
".",
"any",
"(",
")",
":",
"raise",
"ValueError",
"(",
"'A frequency of {} was found which is greater than '",
"'the maximum allowable value of max_freq = {} (did '",
"'you supply frequency values in '",
"'Hz?)'",
".",
"format",
"(",
"frequencies",
".",
"max",
"(",
")",
",",
"max_freq",
")",
")",
"# Make sure no frequency values are tiny",
"if",
"(",
"np",
".",
"abs",
"(",
"frequencies",
")",
"<",
"min_freq",
")",
".",
"any",
"(",
")",
":",
"raise",
"ValueError",
"(",
"'A frequency of {} was found which is less than the '",
"'minimum allowable value of min_freq = {} (did you '",
"'supply frequency values in '",
"'Hz?)'",
".",
"format",
"(",
"frequencies",
".",
"min",
"(",
")",
",",
"min_freq",
")",
")",
"# Make sure frequency values are 1-d np ndarrays",
"if",
"frequencies",
".",
"ndim",
"!=",
"1",
":",
"raise",
"ValueError",
"(",
"'Frequencies should be 1-d numpy ndarray, '",
"'but shape={}'",
".",
"format",
"(",
"frequencies",
".",
"shape",
")",
")"
] |
Checks that a 1-d frequency ndarray is well-formed, and raises
errors if not.
Parameters
----------
frequencies : np.ndarray, shape=(n,)
Array of frequency values
max_freq : float
If a frequency is found above this pitch, a ValueError will be raised.
(Default value = 5000.)
min_freq : float
If a frequency is found below this pitch, a ValueError will be raised.
(Default value = 20.)
allow_negatives : bool
Whether or not to allow negative frequency values.
|
[
"Checks",
"that",
"a",
"1",
"-",
"d",
"frequency",
"ndarray",
"is",
"well",
"-",
"formed",
"and",
"raises",
"errors",
"if",
"not",
"."
] |
f41c8dafaea04b411252a516d1965af43c7d531b
|
https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/util.py#L811-L847
|
train
|
craffel/mir_eval
|
mir_eval/util.py
|
intervals_to_durations
|
def intervals_to_durations(intervals):
"""Converts an array of n intervals to their n durations.
Parameters
----------
intervals : np.ndarray, shape=(n, 2)
An array of time intervals, as returned by
:func:`mir_eval.io.load_intervals()`.
The ``i`` th interval spans time ``intervals[i, 0]`` to
``intervals[i, 1]``.
Returns
-------
durations : np.ndarray, shape=(n,)
Array of the duration of each interval.
"""
validate_intervals(intervals)
return np.abs(np.diff(intervals, axis=-1)).flatten()
|
python
|
def intervals_to_durations(intervals):
"""Converts an array of n intervals to their n durations.
Parameters
----------
intervals : np.ndarray, shape=(n, 2)
An array of time intervals, as returned by
:func:`mir_eval.io.load_intervals()`.
The ``i`` th interval spans time ``intervals[i, 0]`` to
``intervals[i, 1]``.
Returns
-------
durations : np.ndarray, shape=(n,)
Array of the duration of each interval.
"""
validate_intervals(intervals)
return np.abs(np.diff(intervals, axis=-1)).flatten()
|
[
"def",
"intervals_to_durations",
"(",
"intervals",
")",
":",
"validate_intervals",
"(",
"intervals",
")",
"return",
"np",
".",
"abs",
"(",
"np",
".",
"diff",
"(",
"intervals",
",",
"axis",
"=",
"-",
"1",
")",
")",
".",
"flatten",
"(",
")"
] |
Converts an array of n intervals to their n durations.
Parameters
----------
intervals : np.ndarray, shape=(n, 2)
An array of time intervals, as returned by
:func:`mir_eval.io.load_intervals()`.
The ``i`` th interval spans time ``intervals[i, 0]`` to
``intervals[i, 1]``.
Returns
-------
durations : np.ndarray, shape=(n,)
Array of the duration of each interval.
|
[
"Converts",
"an",
"array",
"of",
"n",
"intervals",
"to",
"their",
"n",
"durations",
"."
] |
f41c8dafaea04b411252a516d1965af43c7d531b
|
https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/util.py#L907-L925
|
train
|
craffel/mir_eval
|
mir_eval/separation.py
|
validate
|
def validate(reference_sources, estimated_sources):
"""Checks that the input data to a metric are valid, and throws helpful
errors if not.
Parameters
----------
reference_sources : np.ndarray, shape=(nsrc, nsampl)
matrix containing true sources
estimated_sources : np.ndarray, shape=(nsrc, nsampl)
matrix containing estimated sources
"""
if reference_sources.shape != estimated_sources.shape:
raise ValueError('The shape of estimated sources and the true '
'sources should match. reference_sources.shape '
'= {}, estimated_sources.shape '
'= {}'.format(reference_sources.shape,
estimated_sources.shape))
if reference_sources.ndim > 3 or estimated_sources.ndim > 3:
raise ValueError('The number of dimensions is too high (must be less '
'than 3). reference_sources.ndim = {}, '
'estimated_sources.ndim '
'= {}'.format(reference_sources.ndim,
estimated_sources.ndim))
if reference_sources.size == 0:
warnings.warn("reference_sources is empty, should be of size "
"(nsrc, nsample). sdr, sir, sar, and perm will all "
"be empty np.ndarrays")
elif _any_source_silent(reference_sources):
raise ValueError('All the reference sources should be non-silent (not '
'all-zeros), but at least one of the reference '
'sources is all 0s, which introduces ambiguity to the'
' evaluation. (Otherwise we can add infinitely many '
'all-zero sources.)')
if estimated_sources.size == 0:
warnings.warn("estimated_sources is empty, should be of size "
"(nsrc, nsample). sdr, sir, sar, and perm will all "
"be empty np.ndarrays")
elif _any_source_silent(estimated_sources):
raise ValueError('All the estimated sources should be non-silent (not '
'all-zeros), but at least one of the estimated '
'sources is all 0s. Since we require each reference '
'source to be non-silent, having a silent estimated '
'source will result in an underdetermined system.')
if (estimated_sources.shape[0] > MAX_SOURCES or
reference_sources.shape[0] > MAX_SOURCES):
raise ValueError('The supplied matrices should be of shape (nsrc,'
' nsampl) but reference_sources.shape[0] = {} and '
'estimated_sources.shape[0] = {} which is greater '
'than mir_eval.separation.MAX_SOURCES = {}. To '
'override this check, set '
'mir_eval.separation.MAX_SOURCES to a '
'larger value.'.format(reference_sources.shape[0],
estimated_sources.shape[0],
MAX_SOURCES))
|
python
|
def validate(reference_sources, estimated_sources):
"""Checks that the input data to a metric are valid, and throws helpful
errors if not.
Parameters
----------
reference_sources : np.ndarray, shape=(nsrc, nsampl)
matrix containing true sources
estimated_sources : np.ndarray, shape=(nsrc, nsampl)
matrix containing estimated sources
"""
if reference_sources.shape != estimated_sources.shape:
raise ValueError('The shape of estimated sources and the true '
'sources should match. reference_sources.shape '
'= {}, estimated_sources.shape '
'= {}'.format(reference_sources.shape,
estimated_sources.shape))
if reference_sources.ndim > 3 or estimated_sources.ndim > 3:
raise ValueError('The number of dimensions is too high (must be less '
'than 3). reference_sources.ndim = {}, '
'estimated_sources.ndim '
'= {}'.format(reference_sources.ndim,
estimated_sources.ndim))
if reference_sources.size == 0:
warnings.warn("reference_sources is empty, should be of size "
"(nsrc, nsample). sdr, sir, sar, and perm will all "
"be empty np.ndarrays")
elif _any_source_silent(reference_sources):
raise ValueError('All the reference sources should be non-silent (not '
'all-zeros), but at least one of the reference '
'sources is all 0s, which introduces ambiguity to the'
' evaluation. (Otherwise we can add infinitely many '
'all-zero sources.)')
if estimated_sources.size == 0:
warnings.warn("estimated_sources is empty, should be of size "
"(nsrc, nsample). sdr, sir, sar, and perm will all "
"be empty np.ndarrays")
elif _any_source_silent(estimated_sources):
raise ValueError('All the estimated sources should be non-silent (not '
'all-zeros), but at least one of the estimated '
'sources is all 0s. Since we require each reference '
'source to be non-silent, having a silent estimated '
'source will result in an underdetermined system.')
if (estimated_sources.shape[0] > MAX_SOURCES or
reference_sources.shape[0] > MAX_SOURCES):
raise ValueError('The supplied matrices should be of shape (nsrc,'
' nsampl) but reference_sources.shape[0] = {} and '
'estimated_sources.shape[0] = {} which is greater '
'than mir_eval.separation.MAX_SOURCES = {}. To '
'override this check, set '
'mir_eval.separation.MAX_SOURCES to a '
'larger value.'.format(reference_sources.shape[0],
estimated_sources.shape[0],
MAX_SOURCES))
|
[
"def",
"validate",
"(",
"reference_sources",
",",
"estimated_sources",
")",
":",
"if",
"reference_sources",
".",
"shape",
"!=",
"estimated_sources",
".",
"shape",
":",
"raise",
"ValueError",
"(",
"'The shape of estimated sources and the true '",
"'sources should match. reference_sources.shape '",
"'= {}, estimated_sources.shape '",
"'= {}'",
".",
"format",
"(",
"reference_sources",
".",
"shape",
",",
"estimated_sources",
".",
"shape",
")",
")",
"if",
"reference_sources",
".",
"ndim",
">",
"3",
"or",
"estimated_sources",
".",
"ndim",
">",
"3",
":",
"raise",
"ValueError",
"(",
"'The number of dimensions is too high (must be less '",
"'than 3). reference_sources.ndim = {}, '",
"'estimated_sources.ndim '",
"'= {}'",
".",
"format",
"(",
"reference_sources",
".",
"ndim",
",",
"estimated_sources",
".",
"ndim",
")",
")",
"if",
"reference_sources",
".",
"size",
"==",
"0",
":",
"warnings",
".",
"warn",
"(",
"\"reference_sources is empty, should be of size \"",
"\"(nsrc, nsample). sdr, sir, sar, and perm will all \"",
"\"be empty np.ndarrays\"",
")",
"elif",
"_any_source_silent",
"(",
"reference_sources",
")",
":",
"raise",
"ValueError",
"(",
"'All the reference sources should be non-silent (not '",
"'all-zeros), but at least one of the reference '",
"'sources is all 0s, which introduces ambiguity to the'",
"' evaluation. (Otherwise we can add infinitely many '",
"'all-zero sources.)'",
")",
"if",
"estimated_sources",
".",
"size",
"==",
"0",
":",
"warnings",
".",
"warn",
"(",
"\"estimated_sources is empty, should be of size \"",
"\"(nsrc, nsample). sdr, sir, sar, and perm will all \"",
"\"be empty np.ndarrays\"",
")",
"elif",
"_any_source_silent",
"(",
"estimated_sources",
")",
":",
"raise",
"ValueError",
"(",
"'All the estimated sources should be non-silent (not '",
"'all-zeros), but at least one of the estimated '",
"'sources is all 0s. Since we require each reference '",
"'source to be non-silent, having a silent estimated '",
"'source will result in an underdetermined system.'",
")",
"if",
"(",
"estimated_sources",
".",
"shape",
"[",
"0",
"]",
">",
"MAX_SOURCES",
"or",
"reference_sources",
".",
"shape",
"[",
"0",
"]",
">",
"MAX_SOURCES",
")",
":",
"raise",
"ValueError",
"(",
"'The supplied matrices should be of shape (nsrc,'",
"' nsampl) but reference_sources.shape[0] = {} and '",
"'estimated_sources.shape[0] = {} which is greater '",
"'than mir_eval.separation.MAX_SOURCES = {}. To '",
"'override this check, set '",
"'mir_eval.separation.MAX_SOURCES to a '",
"'larger value.'",
".",
"format",
"(",
"reference_sources",
".",
"shape",
"[",
"0",
"]",
",",
"estimated_sources",
".",
"shape",
"[",
"0",
"]",
",",
"MAX_SOURCES",
")",
")"
] |
Checks that the input data to a metric are valid, and throws helpful
errors if not.
Parameters
----------
reference_sources : np.ndarray, shape=(nsrc, nsampl)
matrix containing true sources
estimated_sources : np.ndarray, shape=(nsrc, nsampl)
matrix containing estimated sources
|
[
"Checks",
"that",
"the",
"input",
"data",
"to",
"a",
"metric",
"are",
"valid",
"and",
"throws",
"helpful",
"errors",
"if",
"not",
"."
] |
f41c8dafaea04b411252a516d1965af43c7d531b
|
https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/separation.py#L62-L121
|
train
|
craffel/mir_eval
|
mir_eval/separation.py
|
_any_source_silent
|
def _any_source_silent(sources):
"""Returns true if the parameter sources has any silent first dimensions"""
return np.any(np.all(np.sum(
sources, axis=tuple(range(2, sources.ndim))) == 0, axis=1))
|
python
|
def _any_source_silent(sources):
"""Returns true if the parameter sources has any silent first dimensions"""
return np.any(np.all(np.sum(
sources, axis=tuple(range(2, sources.ndim))) == 0, axis=1))
|
[
"def",
"_any_source_silent",
"(",
"sources",
")",
":",
"return",
"np",
".",
"any",
"(",
"np",
".",
"all",
"(",
"np",
".",
"sum",
"(",
"sources",
",",
"axis",
"=",
"tuple",
"(",
"range",
"(",
"2",
",",
"sources",
".",
"ndim",
")",
")",
")",
"==",
"0",
",",
"axis",
"=",
"1",
")",
")"
] |
Returns true if the parameter sources has any silent first dimensions
|
[
"Returns",
"true",
"if",
"the",
"parameter",
"sources",
"has",
"any",
"silent",
"first",
"dimensions"
] |
f41c8dafaea04b411252a516d1965af43c7d531b
|
https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/separation.py#L124-L127
|
train
|
craffel/mir_eval
|
mir_eval/separation.py
|
bss_eval_sources
|
def bss_eval_sources(reference_sources, estimated_sources,
compute_permutation=True):
"""
Ordering and measurement of the separation quality for estimated source
signals in terms of filtered true source, interference and artifacts.
The decomposition allows a time-invariant filter distortion of length
512, as described in Section III.B of [#vincent2006performance]_.
Passing ``False`` for ``compute_permutation`` will improve the computation
performance of the evaluation; however, it is not always appropriate and
is not the way that the BSS_EVAL Matlab toolbox computes bss_eval_sources.
Examples
--------
>>> # reference_sources[n] should be an ndarray of samples of the
>>> # n'th reference source
>>> # estimated_sources[n] should be the same for the n'th estimated
>>> # source
>>> (sdr, sir, sar,
... perm) = mir_eval.separation.bss_eval_sources(reference_sources,
... estimated_sources)
Parameters
----------
reference_sources : np.ndarray, shape=(nsrc, nsampl)
matrix containing true sources (must have same shape as
estimated_sources)
estimated_sources : np.ndarray, shape=(nsrc, nsampl)
matrix containing estimated sources (must have same shape as
reference_sources)
compute_permutation : bool, optional
compute permutation of estimate/source combinations (True by default)
Returns
-------
sdr : np.ndarray, shape=(nsrc,)
vector of Signal to Distortion Ratios (SDR)
sir : np.ndarray, shape=(nsrc,)
vector of Source to Interference Ratios (SIR)
sar : np.ndarray, shape=(nsrc,)
vector of Sources to Artifacts Ratios (SAR)
perm : np.ndarray, shape=(nsrc,)
vector containing the best ordering of estimated sources in
the mean SIR sense (estimated source number ``perm[j]`` corresponds to
true source number ``j``). Note: ``perm`` will be ``[0, 1, ...,
nsrc-1]`` if ``compute_permutation`` is ``False``.
References
----------
.. [#] Emmanuel Vincent, Shoko Araki, Fabian J. Theis, Guido Nolte, Pau
Bofill, Hiroshi Sawada, Alexey Ozerov, B. Vikrham Gowreesunker, Dominik
Lutter and Ngoc Q.K. Duong, "The Signal Separation Evaluation Campaign
(2007-2010): Achievements and remaining challenges", Signal Processing,
92, pp. 1928-1936, 2012.
"""
# make sure the input is of shape (nsrc, nsampl)
if estimated_sources.ndim == 1:
estimated_sources = estimated_sources[np.newaxis, :]
if reference_sources.ndim == 1:
reference_sources = reference_sources[np.newaxis, :]
validate(reference_sources, estimated_sources)
# If empty matrices were supplied, return empty lists (special case)
if reference_sources.size == 0 or estimated_sources.size == 0:
return np.array([]), np.array([]), np.array([]), np.array([])
nsrc = estimated_sources.shape[0]
# does user desire permutations?
if compute_permutation:
# compute criteria for all possible pair matches
sdr = np.empty((nsrc, nsrc))
sir = np.empty((nsrc, nsrc))
sar = np.empty((nsrc, nsrc))
for jest in range(nsrc):
for jtrue in range(nsrc):
s_true, e_spat, e_interf, e_artif = \
_bss_decomp_mtifilt(reference_sources,
estimated_sources[jest],
jtrue, 512)
sdr[jest, jtrue], sir[jest, jtrue], sar[jest, jtrue] = \
_bss_source_crit(s_true, e_spat, e_interf, e_artif)
# select the best ordering
perms = list(itertools.permutations(list(range(nsrc))))
mean_sir = np.empty(len(perms))
dum = np.arange(nsrc)
for (i, perm) in enumerate(perms):
mean_sir[i] = np.mean(sir[perm, dum])
popt = perms[np.argmax(mean_sir)]
idx = (popt, dum)
return (sdr[idx], sir[idx], sar[idx], np.asarray(popt))
else:
# compute criteria for only the simple correspondence
# (estimate 1 is estimate corresponding to reference source 1, etc.)
sdr = np.empty(nsrc)
sir = np.empty(nsrc)
sar = np.empty(nsrc)
for j in range(nsrc):
s_true, e_spat, e_interf, e_artif = \
_bss_decomp_mtifilt(reference_sources,
estimated_sources[j],
j, 512)
sdr[j], sir[j], sar[j] = \
_bss_source_crit(s_true, e_spat, e_interf, e_artif)
# return the default permutation for compatibility
popt = np.arange(nsrc)
return (sdr, sir, sar, popt)
|
python
|
def bss_eval_sources(reference_sources, estimated_sources,
compute_permutation=True):
"""
Ordering and measurement of the separation quality for estimated source
signals in terms of filtered true source, interference and artifacts.
The decomposition allows a time-invariant filter distortion of length
512, as described in Section III.B of [#vincent2006performance]_.
Passing ``False`` for ``compute_permutation`` will improve the computation
performance of the evaluation; however, it is not always appropriate and
is not the way that the BSS_EVAL Matlab toolbox computes bss_eval_sources.
Examples
--------
>>> # reference_sources[n] should be an ndarray of samples of the
>>> # n'th reference source
>>> # estimated_sources[n] should be the same for the n'th estimated
>>> # source
>>> (sdr, sir, sar,
... perm) = mir_eval.separation.bss_eval_sources(reference_sources,
... estimated_sources)
Parameters
----------
reference_sources : np.ndarray, shape=(nsrc, nsampl)
matrix containing true sources (must have same shape as
estimated_sources)
estimated_sources : np.ndarray, shape=(nsrc, nsampl)
matrix containing estimated sources (must have same shape as
reference_sources)
compute_permutation : bool, optional
compute permutation of estimate/source combinations (True by default)
Returns
-------
sdr : np.ndarray, shape=(nsrc,)
vector of Signal to Distortion Ratios (SDR)
sir : np.ndarray, shape=(nsrc,)
vector of Source to Interference Ratios (SIR)
sar : np.ndarray, shape=(nsrc,)
vector of Sources to Artifacts Ratios (SAR)
perm : np.ndarray, shape=(nsrc,)
vector containing the best ordering of estimated sources in
the mean SIR sense (estimated source number ``perm[j]`` corresponds to
true source number ``j``). Note: ``perm`` will be ``[0, 1, ...,
nsrc-1]`` if ``compute_permutation`` is ``False``.
References
----------
.. [#] Emmanuel Vincent, Shoko Araki, Fabian J. Theis, Guido Nolte, Pau
Bofill, Hiroshi Sawada, Alexey Ozerov, B. Vikrham Gowreesunker, Dominik
Lutter and Ngoc Q.K. Duong, "The Signal Separation Evaluation Campaign
(2007-2010): Achievements and remaining challenges", Signal Processing,
92, pp. 1928-1936, 2012.
"""
# make sure the input is of shape (nsrc, nsampl)
if estimated_sources.ndim == 1:
estimated_sources = estimated_sources[np.newaxis, :]
if reference_sources.ndim == 1:
reference_sources = reference_sources[np.newaxis, :]
validate(reference_sources, estimated_sources)
# If empty matrices were supplied, return empty lists (special case)
if reference_sources.size == 0 or estimated_sources.size == 0:
return np.array([]), np.array([]), np.array([]), np.array([])
nsrc = estimated_sources.shape[0]
# does user desire permutations?
if compute_permutation:
# compute criteria for all possible pair matches
sdr = np.empty((nsrc, nsrc))
sir = np.empty((nsrc, nsrc))
sar = np.empty((nsrc, nsrc))
for jest in range(nsrc):
for jtrue in range(nsrc):
s_true, e_spat, e_interf, e_artif = \
_bss_decomp_mtifilt(reference_sources,
estimated_sources[jest],
jtrue, 512)
sdr[jest, jtrue], sir[jest, jtrue], sar[jest, jtrue] = \
_bss_source_crit(s_true, e_spat, e_interf, e_artif)
# select the best ordering
perms = list(itertools.permutations(list(range(nsrc))))
mean_sir = np.empty(len(perms))
dum = np.arange(nsrc)
for (i, perm) in enumerate(perms):
mean_sir[i] = np.mean(sir[perm, dum])
popt = perms[np.argmax(mean_sir)]
idx = (popt, dum)
return (sdr[idx], sir[idx], sar[idx], np.asarray(popt))
else:
# compute criteria for only the simple correspondence
# (estimate 1 is estimate corresponding to reference source 1, etc.)
sdr = np.empty(nsrc)
sir = np.empty(nsrc)
sar = np.empty(nsrc)
for j in range(nsrc):
s_true, e_spat, e_interf, e_artif = \
_bss_decomp_mtifilt(reference_sources,
estimated_sources[j],
j, 512)
sdr[j], sir[j], sar[j] = \
_bss_source_crit(s_true, e_spat, e_interf, e_artif)
# return the default permutation for compatibility
popt = np.arange(nsrc)
return (sdr, sir, sar, popt)
|
[
"def",
"bss_eval_sources",
"(",
"reference_sources",
",",
"estimated_sources",
",",
"compute_permutation",
"=",
"True",
")",
":",
"# make sure the input is of shape (nsrc, nsampl)",
"if",
"estimated_sources",
".",
"ndim",
"==",
"1",
":",
"estimated_sources",
"=",
"estimated_sources",
"[",
"np",
".",
"newaxis",
",",
":",
"]",
"if",
"reference_sources",
".",
"ndim",
"==",
"1",
":",
"reference_sources",
"=",
"reference_sources",
"[",
"np",
".",
"newaxis",
",",
":",
"]",
"validate",
"(",
"reference_sources",
",",
"estimated_sources",
")",
"# If empty matrices were supplied, return empty lists (special case)",
"if",
"reference_sources",
".",
"size",
"==",
"0",
"or",
"estimated_sources",
".",
"size",
"==",
"0",
":",
"return",
"np",
".",
"array",
"(",
"[",
"]",
")",
",",
"np",
".",
"array",
"(",
"[",
"]",
")",
",",
"np",
".",
"array",
"(",
"[",
"]",
")",
",",
"np",
".",
"array",
"(",
"[",
"]",
")",
"nsrc",
"=",
"estimated_sources",
".",
"shape",
"[",
"0",
"]",
"# does user desire permutations?",
"if",
"compute_permutation",
":",
"# compute criteria for all possible pair matches",
"sdr",
"=",
"np",
".",
"empty",
"(",
"(",
"nsrc",
",",
"nsrc",
")",
")",
"sir",
"=",
"np",
".",
"empty",
"(",
"(",
"nsrc",
",",
"nsrc",
")",
")",
"sar",
"=",
"np",
".",
"empty",
"(",
"(",
"nsrc",
",",
"nsrc",
")",
")",
"for",
"jest",
"in",
"range",
"(",
"nsrc",
")",
":",
"for",
"jtrue",
"in",
"range",
"(",
"nsrc",
")",
":",
"s_true",
",",
"e_spat",
",",
"e_interf",
",",
"e_artif",
"=",
"_bss_decomp_mtifilt",
"(",
"reference_sources",
",",
"estimated_sources",
"[",
"jest",
"]",
",",
"jtrue",
",",
"512",
")",
"sdr",
"[",
"jest",
",",
"jtrue",
"]",
",",
"sir",
"[",
"jest",
",",
"jtrue",
"]",
",",
"sar",
"[",
"jest",
",",
"jtrue",
"]",
"=",
"_bss_source_crit",
"(",
"s_true",
",",
"e_spat",
",",
"e_interf",
",",
"e_artif",
")",
"# select the best ordering",
"perms",
"=",
"list",
"(",
"itertools",
".",
"permutations",
"(",
"list",
"(",
"range",
"(",
"nsrc",
")",
")",
")",
")",
"mean_sir",
"=",
"np",
".",
"empty",
"(",
"len",
"(",
"perms",
")",
")",
"dum",
"=",
"np",
".",
"arange",
"(",
"nsrc",
")",
"for",
"(",
"i",
",",
"perm",
")",
"in",
"enumerate",
"(",
"perms",
")",
":",
"mean_sir",
"[",
"i",
"]",
"=",
"np",
".",
"mean",
"(",
"sir",
"[",
"perm",
",",
"dum",
"]",
")",
"popt",
"=",
"perms",
"[",
"np",
".",
"argmax",
"(",
"mean_sir",
")",
"]",
"idx",
"=",
"(",
"popt",
",",
"dum",
")",
"return",
"(",
"sdr",
"[",
"idx",
"]",
",",
"sir",
"[",
"idx",
"]",
",",
"sar",
"[",
"idx",
"]",
",",
"np",
".",
"asarray",
"(",
"popt",
")",
")",
"else",
":",
"# compute criteria for only the simple correspondence",
"# (estimate 1 is estimate corresponding to reference source 1, etc.)",
"sdr",
"=",
"np",
".",
"empty",
"(",
"nsrc",
")",
"sir",
"=",
"np",
".",
"empty",
"(",
"nsrc",
")",
"sar",
"=",
"np",
".",
"empty",
"(",
"nsrc",
")",
"for",
"j",
"in",
"range",
"(",
"nsrc",
")",
":",
"s_true",
",",
"e_spat",
",",
"e_interf",
",",
"e_artif",
"=",
"_bss_decomp_mtifilt",
"(",
"reference_sources",
",",
"estimated_sources",
"[",
"j",
"]",
",",
"j",
",",
"512",
")",
"sdr",
"[",
"j",
"]",
",",
"sir",
"[",
"j",
"]",
",",
"sar",
"[",
"j",
"]",
"=",
"_bss_source_crit",
"(",
"s_true",
",",
"e_spat",
",",
"e_interf",
",",
"e_artif",
")",
"# return the default permutation for compatibility",
"popt",
"=",
"np",
".",
"arange",
"(",
"nsrc",
")",
"return",
"(",
"sdr",
",",
"sir",
",",
"sar",
",",
"popt",
")"
] |
Ordering and measurement of the separation quality for estimated source
signals in terms of filtered true source, interference and artifacts.
The decomposition allows a time-invariant filter distortion of length
512, as described in Section III.B of [#vincent2006performance]_.
Passing ``False`` for ``compute_permutation`` will improve the computation
performance of the evaluation; however, it is not always appropriate and
is not the way that the BSS_EVAL Matlab toolbox computes bss_eval_sources.
Examples
--------
>>> # reference_sources[n] should be an ndarray of samples of the
>>> # n'th reference source
>>> # estimated_sources[n] should be the same for the n'th estimated
>>> # source
>>> (sdr, sir, sar,
... perm) = mir_eval.separation.bss_eval_sources(reference_sources,
... estimated_sources)
Parameters
----------
reference_sources : np.ndarray, shape=(nsrc, nsampl)
matrix containing true sources (must have same shape as
estimated_sources)
estimated_sources : np.ndarray, shape=(nsrc, nsampl)
matrix containing estimated sources (must have same shape as
reference_sources)
compute_permutation : bool, optional
compute permutation of estimate/source combinations (True by default)
Returns
-------
sdr : np.ndarray, shape=(nsrc,)
vector of Signal to Distortion Ratios (SDR)
sir : np.ndarray, shape=(nsrc,)
vector of Source to Interference Ratios (SIR)
sar : np.ndarray, shape=(nsrc,)
vector of Sources to Artifacts Ratios (SAR)
perm : np.ndarray, shape=(nsrc,)
vector containing the best ordering of estimated sources in
the mean SIR sense (estimated source number ``perm[j]`` corresponds to
true source number ``j``). Note: ``perm`` will be ``[0, 1, ...,
nsrc-1]`` if ``compute_permutation`` is ``False``.
References
----------
.. [#] Emmanuel Vincent, Shoko Araki, Fabian J. Theis, Guido Nolte, Pau
Bofill, Hiroshi Sawada, Alexey Ozerov, B. Vikrham Gowreesunker, Dominik
Lutter and Ngoc Q.K. Duong, "The Signal Separation Evaluation Campaign
(2007-2010): Achievements and remaining challenges", Signal Processing,
92, pp. 1928-1936, 2012.
|
[
"Ordering",
"and",
"measurement",
"of",
"the",
"separation",
"quality",
"for",
"estimated",
"source",
"signals",
"in",
"terms",
"of",
"filtered",
"true",
"source",
"interference",
"and",
"artifacts",
"."
] |
f41c8dafaea04b411252a516d1965af43c7d531b
|
https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/separation.py#L130-L241
|
train
|
craffel/mir_eval
|
mir_eval/separation.py
|
bss_eval_sources_framewise
|
def bss_eval_sources_framewise(reference_sources, estimated_sources,
window=30*44100, hop=15*44100,
compute_permutation=False):
"""Framewise computation of bss_eval_sources
Please be aware that this function does not compute permutations (by
default) on the possible relations between reference_sources and
estimated_sources due to the dangers of a changing permutation. Therefore
(by default), it assumes that ``reference_sources[i]`` corresponds to
``estimated_sources[i]``. To enable computing permutations please set
``compute_permutation`` to be ``True`` and check that the returned ``perm``
is identical for all windows.
NOTE: if ``reference_sources`` and ``estimated_sources`` would be evaluated
using only a single window or are shorter than the window length, the
result of :func:`mir_eval.separation.bss_eval_sources` called on
``reference_sources`` and ``estimated_sources`` (with the
``compute_permutation`` parameter passed to
:func:`mir_eval.separation.bss_eval_sources`) is returned.
Examples
--------
>>> # reference_sources[n] should be an ndarray of samples of the
>>> # n'th reference source
>>> # estimated_sources[n] should be the same for the n'th estimated
>>> # source
>>> (sdr, sir, sar,
... perm) = mir_eval.separation.bss_eval_sources_framewise(
reference_sources,
... estimated_sources)
Parameters
----------
reference_sources : np.ndarray, shape=(nsrc, nsampl)
matrix containing true sources (must have the same shape as
``estimated_sources``)
estimated_sources : np.ndarray, shape=(nsrc, nsampl)
matrix containing estimated sources (must have the same shape as
``reference_sources``)
window : int, optional
Window length for framewise evaluation (default value is 30s at a
sample rate of 44.1kHz)
hop : int, optional
Hop size for framewise evaluation (default value is 15s at a
sample rate of 44.1kHz)
compute_permutation : bool, optional
compute permutation of estimate/source combinations for all windows
(False by default)
Returns
-------
sdr : np.ndarray, shape=(nsrc, nframes)
vector of Signal to Distortion Ratios (SDR)
sir : np.ndarray, shape=(nsrc, nframes)
vector of Source to Interference Ratios (SIR)
sar : np.ndarray, shape=(nsrc, nframes)
vector of Sources to Artifacts Ratios (SAR)
perm : np.ndarray, shape=(nsrc, nframes)
vector containing the best ordering of estimated sources in
the mean SIR sense (estimated source number ``perm[j]`` corresponds to
true source number ``j``). Note: ``perm`` will be ``range(nsrc)`` for
all windows if ``compute_permutation`` is ``False``
"""
# make sure the input is of shape (nsrc, nsampl)
if estimated_sources.ndim == 1:
estimated_sources = estimated_sources[np.newaxis, :]
if reference_sources.ndim == 1:
reference_sources = reference_sources[np.newaxis, :]
validate(reference_sources, estimated_sources)
# If empty matrices were supplied, return empty lists (special case)
if reference_sources.size == 0 or estimated_sources.size == 0:
return np.array([]), np.array([]), np.array([]), np.array([])
nsrc = reference_sources.shape[0]
nwin = int(
np.floor((reference_sources.shape[1] - window + hop) / hop)
)
# if fewer than 2 windows would be evaluated, return the sources result
if nwin < 2:
result = bss_eval_sources(reference_sources,
estimated_sources,
compute_permutation)
return [np.expand_dims(score, -1) for score in result]
# compute the criteria across all windows
sdr = np.empty((nsrc, nwin))
sir = np.empty((nsrc, nwin))
sar = np.empty((nsrc, nwin))
perm = np.empty((nsrc, nwin))
# k iterates across all the windows
for k in range(nwin):
win_slice = slice(k * hop, k * hop + window)
ref_slice = reference_sources[:, win_slice]
est_slice = estimated_sources[:, win_slice]
# check for a silent frame
if (not _any_source_silent(ref_slice) and
not _any_source_silent(est_slice)):
sdr[:, k], sir[:, k], sar[:, k], perm[:, k] = bss_eval_sources(
ref_slice, est_slice, compute_permutation
)
else:
# if we have a silent frame set results as np.nan
sdr[:, k] = sir[:, k] = sar[:, k] = perm[:, k] = np.nan
return sdr, sir, sar, perm
|
python
|
def bss_eval_sources_framewise(reference_sources, estimated_sources,
window=30*44100, hop=15*44100,
compute_permutation=False):
"""Framewise computation of bss_eval_sources
Please be aware that this function does not compute permutations (by
default) on the possible relations between reference_sources and
estimated_sources due to the dangers of a changing permutation. Therefore
(by default), it assumes that ``reference_sources[i]`` corresponds to
``estimated_sources[i]``. To enable computing permutations please set
``compute_permutation`` to be ``True`` and check that the returned ``perm``
is identical for all windows.
NOTE: if ``reference_sources`` and ``estimated_sources`` would be evaluated
using only a single window or are shorter than the window length, the
result of :func:`mir_eval.separation.bss_eval_sources` called on
``reference_sources`` and ``estimated_sources`` (with the
``compute_permutation`` parameter passed to
:func:`mir_eval.separation.bss_eval_sources`) is returned.
Examples
--------
>>> # reference_sources[n] should be an ndarray of samples of the
>>> # n'th reference source
>>> # estimated_sources[n] should be the same for the n'th estimated
>>> # source
>>> (sdr, sir, sar,
... perm) = mir_eval.separation.bss_eval_sources_framewise(
reference_sources,
... estimated_sources)
Parameters
----------
reference_sources : np.ndarray, shape=(nsrc, nsampl)
matrix containing true sources (must have the same shape as
``estimated_sources``)
estimated_sources : np.ndarray, shape=(nsrc, nsampl)
matrix containing estimated sources (must have the same shape as
``reference_sources``)
window : int, optional
Window length for framewise evaluation (default value is 30s at a
sample rate of 44.1kHz)
hop : int, optional
Hop size for framewise evaluation (default value is 15s at a
sample rate of 44.1kHz)
compute_permutation : bool, optional
compute permutation of estimate/source combinations for all windows
(False by default)
Returns
-------
sdr : np.ndarray, shape=(nsrc, nframes)
vector of Signal to Distortion Ratios (SDR)
sir : np.ndarray, shape=(nsrc, nframes)
vector of Source to Interference Ratios (SIR)
sar : np.ndarray, shape=(nsrc, nframes)
vector of Sources to Artifacts Ratios (SAR)
perm : np.ndarray, shape=(nsrc, nframes)
vector containing the best ordering of estimated sources in
the mean SIR sense (estimated source number ``perm[j]`` corresponds to
true source number ``j``). Note: ``perm`` will be ``range(nsrc)`` for
all windows if ``compute_permutation`` is ``False``
"""
# make sure the input is of shape (nsrc, nsampl)
if estimated_sources.ndim == 1:
estimated_sources = estimated_sources[np.newaxis, :]
if reference_sources.ndim == 1:
reference_sources = reference_sources[np.newaxis, :]
validate(reference_sources, estimated_sources)
# If empty matrices were supplied, return empty lists (special case)
if reference_sources.size == 0 or estimated_sources.size == 0:
return np.array([]), np.array([]), np.array([]), np.array([])
nsrc = reference_sources.shape[0]
nwin = int(
np.floor((reference_sources.shape[1] - window + hop) / hop)
)
# if fewer than 2 windows would be evaluated, return the sources result
if nwin < 2:
result = bss_eval_sources(reference_sources,
estimated_sources,
compute_permutation)
return [np.expand_dims(score, -1) for score in result]
# compute the criteria across all windows
sdr = np.empty((nsrc, nwin))
sir = np.empty((nsrc, nwin))
sar = np.empty((nsrc, nwin))
perm = np.empty((nsrc, nwin))
# k iterates across all the windows
for k in range(nwin):
win_slice = slice(k * hop, k * hop + window)
ref_slice = reference_sources[:, win_slice]
est_slice = estimated_sources[:, win_slice]
# check for a silent frame
if (not _any_source_silent(ref_slice) and
not _any_source_silent(est_slice)):
sdr[:, k], sir[:, k], sar[:, k], perm[:, k] = bss_eval_sources(
ref_slice, est_slice, compute_permutation
)
else:
# if we have a silent frame set results as np.nan
sdr[:, k] = sir[:, k] = sar[:, k] = perm[:, k] = np.nan
return sdr, sir, sar, perm
|
[
"def",
"bss_eval_sources_framewise",
"(",
"reference_sources",
",",
"estimated_sources",
",",
"window",
"=",
"30",
"*",
"44100",
",",
"hop",
"=",
"15",
"*",
"44100",
",",
"compute_permutation",
"=",
"False",
")",
":",
"# make sure the input is of shape (nsrc, nsampl)",
"if",
"estimated_sources",
".",
"ndim",
"==",
"1",
":",
"estimated_sources",
"=",
"estimated_sources",
"[",
"np",
".",
"newaxis",
",",
":",
"]",
"if",
"reference_sources",
".",
"ndim",
"==",
"1",
":",
"reference_sources",
"=",
"reference_sources",
"[",
"np",
".",
"newaxis",
",",
":",
"]",
"validate",
"(",
"reference_sources",
",",
"estimated_sources",
")",
"# If empty matrices were supplied, return empty lists (special case)",
"if",
"reference_sources",
".",
"size",
"==",
"0",
"or",
"estimated_sources",
".",
"size",
"==",
"0",
":",
"return",
"np",
".",
"array",
"(",
"[",
"]",
")",
",",
"np",
".",
"array",
"(",
"[",
"]",
")",
",",
"np",
".",
"array",
"(",
"[",
"]",
")",
",",
"np",
".",
"array",
"(",
"[",
"]",
")",
"nsrc",
"=",
"reference_sources",
".",
"shape",
"[",
"0",
"]",
"nwin",
"=",
"int",
"(",
"np",
".",
"floor",
"(",
"(",
"reference_sources",
".",
"shape",
"[",
"1",
"]",
"-",
"window",
"+",
"hop",
")",
"/",
"hop",
")",
")",
"# if fewer than 2 windows would be evaluated, return the sources result",
"if",
"nwin",
"<",
"2",
":",
"result",
"=",
"bss_eval_sources",
"(",
"reference_sources",
",",
"estimated_sources",
",",
"compute_permutation",
")",
"return",
"[",
"np",
".",
"expand_dims",
"(",
"score",
",",
"-",
"1",
")",
"for",
"score",
"in",
"result",
"]",
"# compute the criteria across all windows",
"sdr",
"=",
"np",
".",
"empty",
"(",
"(",
"nsrc",
",",
"nwin",
")",
")",
"sir",
"=",
"np",
".",
"empty",
"(",
"(",
"nsrc",
",",
"nwin",
")",
")",
"sar",
"=",
"np",
".",
"empty",
"(",
"(",
"nsrc",
",",
"nwin",
")",
")",
"perm",
"=",
"np",
".",
"empty",
"(",
"(",
"nsrc",
",",
"nwin",
")",
")",
"# k iterates across all the windows",
"for",
"k",
"in",
"range",
"(",
"nwin",
")",
":",
"win_slice",
"=",
"slice",
"(",
"k",
"*",
"hop",
",",
"k",
"*",
"hop",
"+",
"window",
")",
"ref_slice",
"=",
"reference_sources",
"[",
":",
",",
"win_slice",
"]",
"est_slice",
"=",
"estimated_sources",
"[",
":",
",",
"win_slice",
"]",
"# check for a silent frame",
"if",
"(",
"not",
"_any_source_silent",
"(",
"ref_slice",
")",
"and",
"not",
"_any_source_silent",
"(",
"est_slice",
")",
")",
":",
"sdr",
"[",
":",
",",
"k",
"]",
",",
"sir",
"[",
":",
",",
"k",
"]",
",",
"sar",
"[",
":",
",",
"k",
"]",
",",
"perm",
"[",
":",
",",
"k",
"]",
"=",
"bss_eval_sources",
"(",
"ref_slice",
",",
"est_slice",
",",
"compute_permutation",
")",
"else",
":",
"# if we have a silent frame set results as np.nan",
"sdr",
"[",
":",
",",
"k",
"]",
"=",
"sir",
"[",
":",
",",
"k",
"]",
"=",
"sar",
"[",
":",
",",
"k",
"]",
"=",
"perm",
"[",
":",
",",
"k",
"]",
"=",
"np",
".",
"nan",
"return",
"sdr",
",",
"sir",
",",
"sar",
",",
"perm"
] |
Framewise computation of bss_eval_sources
Please be aware that this function does not compute permutations (by
default) on the possible relations between reference_sources and
estimated_sources due to the dangers of a changing permutation. Therefore
(by default), it assumes that ``reference_sources[i]`` corresponds to
``estimated_sources[i]``. To enable computing permutations please set
``compute_permutation`` to be ``True`` and check that the returned ``perm``
is identical for all windows.
NOTE: if ``reference_sources`` and ``estimated_sources`` would be evaluated
using only a single window or are shorter than the window length, the
result of :func:`mir_eval.separation.bss_eval_sources` called on
``reference_sources`` and ``estimated_sources`` (with the
``compute_permutation`` parameter passed to
:func:`mir_eval.separation.bss_eval_sources`) is returned.
Examples
--------
>>> # reference_sources[n] should be an ndarray of samples of the
>>> # n'th reference source
>>> # estimated_sources[n] should be the same for the n'th estimated
>>> # source
>>> (sdr, sir, sar,
... perm) = mir_eval.separation.bss_eval_sources_framewise(
reference_sources,
... estimated_sources)
Parameters
----------
reference_sources : np.ndarray, shape=(nsrc, nsampl)
matrix containing true sources (must have the same shape as
``estimated_sources``)
estimated_sources : np.ndarray, shape=(nsrc, nsampl)
matrix containing estimated sources (must have the same shape as
``reference_sources``)
window : int, optional
Window length for framewise evaluation (default value is 30s at a
sample rate of 44.1kHz)
hop : int, optional
Hop size for framewise evaluation (default value is 15s at a
sample rate of 44.1kHz)
compute_permutation : bool, optional
compute permutation of estimate/source combinations for all windows
(False by default)
Returns
-------
sdr : np.ndarray, shape=(nsrc, nframes)
vector of Signal to Distortion Ratios (SDR)
sir : np.ndarray, shape=(nsrc, nframes)
vector of Source to Interference Ratios (SIR)
sar : np.ndarray, shape=(nsrc, nframes)
vector of Sources to Artifacts Ratios (SAR)
perm : np.ndarray, shape=(nsrc, nframes)
vector containing the best ordering of estimated sources in
the mean SIR sense (estimated source number ``perm[j]`` corresponds to
true source number ``j``). Note: ``perm`` will be ``range(nsrc)`` for
all windows if ``compute_permutation`` is ``False``
|
[
"Framewise",
"computation",
"of",
"bss_eval_sources"
] |
f41c8dafaea04b411252a516d1965af43c7d531b
|
https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/separation.py#L244-L353
|
train
|
craffel/mir_eval
|
mir_eval/separation.py
|
bss_eval_images_framewise
|
def bss_eval_images_framewise(reference_sources, estimated_sources,
window=30*44100, hop=15*44100,
compute_permutation=False):
"""Framewise computation of bss_eval_images
Please be aware that this function does not compute permutations (by
default) on the possible relations between ``reference_sources`` and
``estimated_sources`` due to the dangers of a changing permutation.
Therefore (by default), it assumes that ``reference_sources[i]``
corresponds to ``estimated_sources[i]``. To enable computing permutations
please set ``compute_permutation`` to be ``True`` and check that the
returned ``perm`` is identical for all windows.
NOTE: if ``reference_sources`` and ``estimated_sources`` would be evaluated
using only a single window or are shorter than the window length, the
result of ``bss_eval_images`` called on ``reference_sources`` and
``estimated_sources`` (with the ``compute_permutation`` parameter passed to
``bss_eval_images``) is returned
Examples
--------
>>> # reference_sources[n] should be an ndarray of samples of the
>>> # n'th reference source
>>> # estimated_sources[n] should be the same for the n'th estimated
>>> # source
>>> (sdr, isr, sir, sar,
... perm) = mir_eval.separation.bss_eval_images_framewise(
reference_sources,
... estimated_sources,
window,
.... hop)
Parameters
----------
reference_sources : np.ndarray, shape=(nsrc, nsampl, nchan)
matrix containing true sources (must have the same shape as
``estimated_sources``)
estimated_sources : np.ndarray, shape=(nsrc, nsampl, nchan)
matrix containing estimated sources (must have the same shape as
``reference_sources``)
window : int
Window length for framewise evaluation
hop : int
Hop size for framewise evaluation
compute_permutation : bool, optional
compute permutation of estimate/source combinations for all windows
(False by default)
Returns
-------
sdr : np.ndarray, shape=(nsrc, nframes)
vector of Signal to Distortion Ratios (SDR)
isr : np.ndarray, shape=(nsrc, nframes)
vector of source Image to Spatial distortion Ratios (ISR)
sir : np.ndarray, shape=(nsrc, nframes)
vector of Source to Interference Ratios (SIR)
sar : np.ndarray, shape=(nsrc, nframes)
vector of Sources to Artifacts Ratios (SAR)
perm : np.ndarray, shape=(nsrc, nframes)
vector containing the best ordering of estimated sources in
the mean SIR sense (estimated source number perm[j] corresponds to
true source number j)
Note: perm will be range(nsrc) for all windows if compute_permutation
is False
"""
# make sure the input has 3 dimensions
# assuming input is in shape (nsampl) or (nsrc, nsampl)
estimated_sources = np.atleast_3d(estimated_sources)
reference_sources = np.atleast_3d(reference_sources)
# we will ensure input doesn't have more than 3 dimensions in validate
validate(reference_sources, estimated_sources)
# If empty matrices were supplied, return empty lists (special case)
if reference_sources.size == 0 or estimated_sources.size == 0:
return np.array([]), np.array([]), np.array([]), np.array([])
nsrc = reference_sources.shape[0]
nwin = int(
np.floor((reference_sources.shape[1] - window + hop) / hop)
)
# if fewer than 2 windows would be evaluated, return the images result
if nwin < 2:
result = bss_eval_images(reference_sources,
estimated_sources,
compute_permutation)
return [np.expand_dims(score, -1) for score in result]
# compute the criteria across all windows
sdr = np.empty((nsrc, nwin))
isr = np.empty((nsrc, nwin))
sir = np.empty((nsrc, nwin))
sar = np.empty((nsrc, nwin))
perm = np.empty((nsrc, nwin))
# k iterates across all the windows
for k in range(nwin):
win_slice = slice(k * hop, k * hop + window)
ref_slice = reference_sources[:, win_slice, :]
est_slice = estimated_sources[:, win_slice, :]
# check for a silent frame
if (not _any_source_silent(ref_slice) and
not _any_source_silent(est_slice)):
sdr[:, k], isr[:, k], sir[:, k], sar[:, k], perm[:, k] = \
bss_eval_images(
ref_slice, est_slice, compute_permutation
)
else:
# if we have a silent frame set results as np.nan
sdr[:, k] = sir[:, k] = sar[:, k] = perm[:, k] = np.nan
return sdr, isr, sir, sar, perm
|
python
|
def bss_eval_images_framewise(reference_sources, estimated_sources,
window=30*44100, hop=15*44100,
compute_permutation=False):
"""Framewise computation of bss_eval_images
Please be aware that this function does not compute permutations (by
default) on the possible relations between ``reference_sources`` and
``estimated_sources`` due to the dangers of a changing permutation.
Therefore (by default), it assumes that ``reference_sources[i]``
corresponds to ``estimated_sources[i]``. To enable computing permutations
please set ``compute_permutation`` to be ``True`` and check that the
returned ``perm`` is identical for all windows.
NOTE: if ``reference_sources`` and ``estimated_sources`` would be evaluated
using only a single window or are shorter than the window length, the
result of ``bss_eval_images`` called on ``reference_sources`` and
``estimated_sources`` (with the ``compute_permutation`` parameter passed to
``bss_eval_images``) is returned
Examples
--------
>>> # reference_sources[n] should be an ndarray of samples of the
>>> # n'th reference source
>>> # estimated_sources[n] should be the same for the n'th estimated
>>> # source
>>> (sdr, isr, sir, sar,
... perm) = mir_eval.separation.bss_eval_images_framewise(
reference_sources,
... estimated_sources,
window,
.... hop)
Parameters
----------
reference_sources : np.ndarray, shape=(nsrc, nsampl, nchan)
matrix containing true sources (must have the same shape as
``estimated_sources``)
estimated_sources : np.ndarray, shape=(nsrc, nsampl, nchan)
matrix containing estimated sources (must have the same shape as
``reference_sources``)
window : int
Window length for framewise evaluation
hop : int
Hop size for framewise evaluation
compute_permutation : bool, optional
compute permutation of estimate/source combinations for all windows
(False by default)
Returns
-------
sdr : np.ndarray, shape=(nsrc, nframes)
vector of Signal to Distortion Ratios (SDR)
isr : np.ndarray, shape=(nsrc, nframes)
vector of source Image to Spatial distortion Ratios (ISR)
sir : np.ndarray, shape=(nsrc, nframes)
vector of Source to Interference Ratios (SIR)
sar : np.ndarray, shape=(nsrc, nframes)
vector of Sources to Artifacts Ratios (SAR)
perm : np.ndarray, shape=(nsrc, nframes)
vector containing the best ordering of estimated sources in
the mean SIR sense (estimated source number perm[j] corresponds to
true source number j)
Note: perm will be range(nsrc) for all windows if compute_permutation
is False
"""
# make sure the input has 3 dimensions
# assuming input is in shape (nsampl) or (nsrc, nsampl)
estimated_sources = np.atleast_3d(estimated_sources)
reference_sources = np.atleast_3d(reference_sources)
# we will ensure input doesn't have more than 3 dimensions in validate
validate(reference_sources, estimated_sources)
# If empty matrices were supplied, return empty lists (special case)
if reference_sources.size == 0 or estimated_sources.size == 0:
return np.array([]), np.array([]), np.array([]), np.array([])
nsrc = reference_sources.shape[0]
nwin = int(
np.floor((reference_sources.shape[1] - window + hop) / hop)
)
# if fewer than 2 windows would be evaluated, return the images result
if nwin < 2:
result = bss_eval_images(reference_sources,
estimated_sources,
compute_permutation)
return [np.expand_dims(score, -1) for score in result]
# compute the criteria across all windows
sdr = np.empty((nsrc, nwin))
isr = np.empty((nsrc, nwin))
sir = np.empty((nsrc, nwin))
sar = np.empty((nsrc, nwin))
perm = np.empty((nsrc, nwin))
# k iterates across all the windows
for k in range(nwin):
win_slice = slice(k * hop, k * hop + window)
ref_slice = reference_sources[:, win_slice, :]
est_slice = estimated_sources[:, win_slice, :]
# check for a silent frame
if (not _any_source_silent(ref_slice) and
not _any_source_silent(est_slice)):
sdr[:, k], isr[:, k], sir[:, k], sar[:, k], perm[:, k] = \
bss_eval_images(
ref_slice, est_slice, compute_permutation
)
else:
# if we have a silent frame set results as np.nan
sdr[:, k] = sir[:, k] = sar[:, k] = perm[:, k] = np.nan
return sdr, isr, sir, sar, perm
|
[
"def",
"bss_eval_images_framewise",
"(",
"reference_sources",
",",
"estimated_sources",
",",
"window",
"=",
"30",
"*",
"44100",
",",
"hop",
"=",
"15",
"*",
"44100",
",",
"compute_permutation",
"=",
"False",
")",
":",
"# make sure the input has 3 dimensions",
"# assuming input is in shape (nsampl) or (nsrc, nsampl)",
"estimated_sources",
"=",
"np",
".",
"atleast_3d",
"(",
"estimated_sources",
")",
"reference_sources",
"=",
"np",
".",
"atleast_3d",
"(",
"reference_sources",
")",
"# we will ensure input doesn't have more than 3 dimensions in validate",
"validate",
"(",
"reference_sources",
",",
"estimated_sources",
")",
"# If empty matrices were supplied, return empty lists (special case)",
"if",
"reference_sources",
".",
"size",
"==",
"0",
"or",
"estimated_sources",
".",
"size",
"==",
"0",
":",
"return",
"np",
".",
"array",
"(",
"[",
"]",
")",
",",
"np",
".",
"array",
"(",
"[",
"]",
")",
",",
"np",
".",
"array",
"(",
"[",
"]",
")",
",",
"np",
".",
"array",
"(",
"[",
"]",
")",
"nsrc",
"=",
"reference_sources",
".",
"shape",
"[",
"0",
"]",
"nwin",
"=",
"int",
"(",
"np",
".",
"floor",
"(",
"(",
"reference_sources",
".",
"shape",
"[",
"1",
"]",
"-",
"window",
"+",
"hop",
")",
"/",
"hop",
")",
")",
"# if fewer than 2 windows would be evaluated, return the images result",
"if",
"nwin",
"<",
"2",
":",
"result",
"=",
"bss_eval_images",
"(",
"reference_sources",
",",
"estimated_sources",
",",
"compute_permutation",
")",
"return",
"[",
"np",
".",
"expand_dims",
"(",
"score",
",",
"-",
"1",
")",
"for",
"score",
"in",
"result",
"]",
"# compute the criteria across all windows",
"sdr",
"=",
"np",
".",
"empty",
"(",
"(",
"nsrc",
",",
"nwin",
")",
")",
"isr",
"=",
"np",
".",
"empty",
"(",
"(",
"nsrc",
",",
"nwin",
")",
")",
"sir",
"=",
"np",
".",
"empty",
"(",
"(",
"nsrc",
",",
"nwin",
")",
")",
"sar",
"=",
"np",
".",
"empty",
"(",
"(",
"nsrc",
",",
"nwin",
")",
")",
"perm",
"=",
"np",
".",
"empty",
"(",
"(",
"nsrc",
",",
"nwin",
")",
")",
"# k iterates across all the windows",
"for",
"k",
"in",
"range",
"(",
"nwin",
")",
":",
"win_slice",
"=",
"slice",
"(",
"k",
"*",
"hop",
",",
"k",
"*",
"hop",
"+",
"window",
")",
"ref_slice",
"=",
"reference_sources",
"[",
":",
",",
"win_slice",
",",
":",
"]",
"est_slice",
"=",
"estimated_sources",
"[",
":",
",",
"win_slice",
",",
":",
"]",
"# check for a silent frame",
"if",
"(",
"not",
"_any_source_silent",
"(",
"ref_slice",
")",
"and",
"not",
"_any_source_silent",
"(",
"est_slice",
")",
")",
":",
"sdr",
"[",
":",
",",
"k",
"]",
",",
"isr",
"[",
":",
",",
"k",
"]",
",",
"sir",
"[",
":",
",",
"k",
"]",
",",
"sar",
"[",
":",
",",
"k",
"]",
",",
"perm",
"[",
":",
",",
"k",
"]",
"=",
"bss_eval_images",
"(",
"ref_slice",
",",
"est_slice",
",",
"compute_permutation",
")",
"else",
":",
"# if we have a silent frame set results as np.nan",
"sdr",
"[",
":",
",",
"k",
"]",
"=",
"sir",
"[",
":",
",",
"k",
"]",
"=",
"sar",
"[",
":",
",",
"k",
"]",
"=",
"perm",
"[",
":",
",",
"k",
"]",
"=",
"np",
".",
"nan",
"return",
"sdr",
",",
"isr",
",",
"sir",
",",
"sar",
",",
"perm"
] |
Framewise computation of bss_eval_images
Please be aware that this function does not compute permutations (by
default) on the possible relations between ``reference_sources`` and
``estimated_sources`` due to the dangers of a changing permutation.
Therefore (by default), it assumes that ``reference_sources[i]``
corresponds to ``estimated_sources[i]``. To enable computing permutations
please set ``compute_permutation`` to be ``True`` and check that the
returned ``perm`` is identical for all windows.
NOTE: if ``reference_sources`` and ``estimated_sources`` would be evaluated
using only a single window or are shorter than the window length, the
result of ``bss_eval_images`` called on ``reference_sources`` and
``estimated_sources`` (with the ``compute_permutation`` parameter passed to
``bss_eval_images``) is returned
Examples
--------
>>> # reference_sources[n] should be an ndarray of samples of the
>>> # n'th reference source
>>> # estimated_sources[n] should be the same for the n'th estimated
>>> # source
>>> (sdr, isr, sir, sar,
... perm) = mir_eval.separation.bss_eval_images_framewise(
reference_sources,
... estimated_sources,
window,
.... hop)
Parameters
----------
reference_sources : np.ndarray, shape=(nsrc, nsampl, nchan)
matrix containing true sources (must have the same shape as
``estimated_sources``)
estimated_sources : np.ndarray, shape=(nsrc, nsampl, nchan)
matrix containing estimated sources (must have the same shape as
``reference_sources``)
window : int
Window length for framewise evaluation
hop : int
Hop size for framewise evaluation
compute_permutation : bool, optional
compute permutation of estimate/source combinations for all windows
(False by default)
Returns
-------
sdr : np.ndarray, shape=(nsrc, nframes)
vector of Signal to Distortion Ratios (SDR)
isr : np.ndarray, shape=(nsrc, nframes)
vector of source Image to Spatial distortion Ratios (ISR)
sir : np.ndarray, shape=(nsrc, nframes)
vector of Source to Interference Ratios (SIR)
sar : np.ndarray, shape=(nsrc, nframes)
vector of Sources to Artifacts Ratios (SAR)
perm : np.ndarray, shape=(nsrc, nframes)
vector containing the best ordering of estimated sources in
the mean SIR sense (estimated source number perm[j] corresponds to
true source number j)
Note: perm will be range(nsrc) for all windows if compute_permutation
is False
|
[
"Framewise",
"computation",
"of",
"bss_eval_images"
] |
f41c8dafaea04b411252a516d1965af43c7d531b
|
https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/separation.py#L493-L606
|
train
|
craffel/mir_eval
|
mir_eval/separation.py
|
_project
|
def _project(reference_sources, estimated_source, flen):
"""Least-squares projection of estimated source on the subspace spanned by
delayed versions of reference sources, with delays between 0 and flen-1
"""
nsrc = reference_sources.shape[0]
nsampl = reference_sources.shape[1]
# computing coefficients of least squares problem via FFT ##
# zero padding and FFT of input data
reference_sources = np.hstack((reference_sources,
np.zeros((nsrc, flen - 1))))
estimated_source = np.hstack((estimated_source, np.zeros(flen - 1)))
n_fft = int(2**np.ceil(np.log2(nsampl + flen - 1.)))
sf = scipy.fftpack.fft(reference_sources, n=n_fft, axis=1)
sef = scipy.fftpack.fft(estimated_source, n=n_fft)
# inner products between delayed versions of reference_sources
G = np.zeros((nsrc * flen, nsrc * flen))
for i in range(nsrc):
for j in range(nsrc):
ssf = sf[i] * np.conj(sf[j])
ssf = np.real(scipy.fftpack.ifft(ssf))
ss = toeplitz(np.hstack((ssf[0], ssf[-1:-flen:-1])),
r=ssf[:flen])
G[i * flen: (i+1) * flen, j * flen: (j+1) * flen] = ss
G[j * flen: (j+1) * flen, i * flen: (i+1) * flen] = ss.T
# inner products between estimated_source and delayed versions of
# reference_sources
D = np.zeros(nsrc * flen)
for i in range(nsrc):
ssef = sf[i] * np.conj(sef)
ssef = np.real(scipy.fftpack.ifft(ssef))
D[i * flen: (i+1) * flen] = np.hstack((ssef[0], ssef[-1:-flen:-1]))
# Computing projection
# Distortion filters
try:
C = np.linalg.solve(G, D).reshape(flen, nsrc, order='F')
except np.linalg.linalg.LinAlgError:
C = np.linalg.lstsq(G, D)[0].reshape(flen, nsrc, order='F')
# Filtering
sproj = np.zeros(nsampl + flen - 1)
for i in range(nsrc):
sproj += fftconvolve(C[:, i], reference_sources[i])[:nsampl + flen - 1]
return sproj
|
python
|
def _project(reference_sources, estimated_source, flen):
"""Least-squares projection of estimated source on the subspace spanned by
delayed versions of reference sources, with delays between 0 and flen-1
"""
nsrc = reference_sources.shape[0]
nsampl = reference_sources.shape[1]
# computing coefficients of least squares problem via FFT ##
# zero padding and FFT of input data
reference_sources = np.hstack((reference_sources,
np.zeros((nsrc, flen - 1))))
estimated_source = np.hstack((estimated_source, np.zeros(flen - 1)))
n_fft = int(2**np.ceil(np.log2(nsampl + flen - 1.)))
sf = scipy.fftpack.fft(reference_sources, n=n_fft, axis=1)
sef = scipy.fftpack.fft(estimated_source, n=n_fft)
# inner products between delayed versions of reference_sources
G = np.zeros((nsrc * flen, nsrc * flen))
for i in range(nsrc):
for j in range(nsrc):
ssf = sf[i] * np.conj(sf[j])
ssf = np.real(scipy.fftpack.ifft(ssf))
ss = toeplitz(np.hstack((ssf[0], ssf[-1:-flen:-1])),
r=ssf[:flen])
G[i * flen: (i+1) * flen, j * flen: (j+1) * flen] = ss
G[j * flen: (j+1) * flen, i * flen: (i+1) * flen] = ss.T
# inner products between estimated_source and delayed versions of
# reference_sources
D = np.zeros(nsrc * flen)
for i in range(nsrc):
ssef = sf[i] * np.conj(sef)
ssef = np.real(scipy.fftpack.ifft(ssef))
D[i * flen: (i+1) * flen] = np.hstack((ssef[0], ssef[-1:-flen:-1]))
# Computing projection
# Distortion filters
try:
C = np.linalg.solve(G, D).reshape(flen, nsrc, order='F')
except np.linalg.linalg.LinAlgError:
C = np.linalg.lstsq(G, D)[0].reshape(flen, nsrc, order='F')
# Filtering
sproj = np.zeros(nsampl + flen - 1)
for i in range(nsrc):
sproj += fftconvolve(C[:, i], reference_sources[i])[:nsampl + flen - 1]
return sproj
|
[
"def",
"_project",
"(",
"reference_sources",
",",
"estimated_source",
",",
"flen",
")",
":",
"nsrc",
"=",
"reference_sources",
".",
"shape",
"[",
"0",
"]",
"nsampl",
"=",
"reference_sources",
".",
"shape",
"[",
"1",
"]",
"# computing coefficients of least squares problem via FFT ##",
"# zero padding and FFT of input data",
"reference_sources",
"=",
"np",
".",
"hstack",
"(",
"(",
"reference_sources",
",",
"np",
".",
"zeros",
"(",
"(",
"nsrc",
",",
"flen",
"-",
"1",
")",
")",
")",
")",
"estimated_source",
"=",
"np",
".",
"hstack",
"(",
"(",
"estimated_source",
",",
"np",
".",
"zeros",
"(",
"flen",
"-",
"1",
")",
")",
")",
"n_fft",
"=",
"int",
"(",
"2",
"**",
"np",
".",
"ceil",
"(",
"np",
".",
"log2",
"(",
"nsampl",
"+",
"flen",
"-",
"1.",
")",
")",
")",
"sf",
"=",
"scipy",
".",
"fftpack",
".",
"fft",
"(",
"reference_sources",
",",
"n",
"=",
"n_fft",
",",
"axis",
"=",
"1",
")",
"sef",
"=",
"scipy",
".",
"fftpack",
".",
"fft",
"(",
"estimated_source",
",",
"n",
"=",
"n_fft",
")",
"# inner products between delayed versions of reference_sources",
"G",
"=",
"np",
".",
"zeros",
"(",
"(",
"nsrc",
"*",
"flen",
",",
"nsrc",
"*",
"flen",
")",
")",
"for",
"i",
"in",
"range",
"(",
"nsrc",
")",
":",
"for",
"j",
"in",
"range",
"(",
"nsrc",
")",
":",
"ssf",
"=",
"sf",
"[",
"i",
"]",
"*",
"np",
".",
"conj",
"(",
"sf",
"[",
"j",
"]",
")",
"ssf",
"=",
"np",
".",
"real",
"(",
"scipy",
".",
"fftpack",
".",
"ifft",
"(",
"ssf",
")",
")",
"ss",
"=",
"toeplitz",
"(",
"np",
".",
"hstack",
"(",
"(",
"ssf",
"[",
"0",
"]",
",",
"ssf",
"[",
"-",
"1",
":",
"-",
"flen",
":",
"-",
"1",
"]",
")",
")",
",",
"r",
"=",
"ssf",
"[",
":",
"flen",
"]",
")",
"G",
"[",
"i",
"*",
"flen",
":",
"(",
"i",
"+",
"1",
")",
"*",
"flen",
",",
"j",
"*",
"flen",
":",
"(",
"j",
"+",
"1",
")",
"*",
"flen",
"]",
"=",
"ss",
"G",
"[",
"j",
"*",
"flen",
":",
"(",
"j",
"+",
"1",
")",
"*",
"flen",
",",
"i",
"*",
"flen",
":",
"(",
"i",
"+",
"1",
")",
"*",
"flen",
"]",
"=",
"ss",
".",
"T",
"# inner products between estimated_source and delayed versions of",
"# reference_sources",
"D",
"=",
"np",
".",
"zeros",
"(",
"nsrc",
"*",
"flen",
")",
"for",
"i",
"in",
"range",
"(",
"nsrc",
")",
":",
"ssef",
"=",
"sf",
"[",
"i",
"]",
"*",
"np",
".",
"conj",
"(",
"sef",
")",
"ssef",
"=",
"np",
".",
"real",
"(",
"scipy",
".",
"fftpack",
".",
"ifft",
"(",
"ssef",
")",
")",
"D",
"[",
"i",
"*",
"flen",
":",
"(",
"i",
"+",
"1",
")",
"*",
"flen",
"]",
"=",
"np",
".",
"hstack",
"(",
"(",
"ssef",
"[",
"0",
"]",
",",
"ssef",
"[",
"-",
"1",
":",
"-",
"flen",
":",
"-",
"1",
"]",
")",
")",
"# Computing projection",
"# Distortion filters",
"try",
":",
"C",
"=",
"np",
".",
"linalg",
".",
"solve",
"(",
"G",
",",
"D",
")",
".",
"reshape",
"(",
"flen",
",",
"nsrc",
",",
"order",
"=",
"'F'",
")",
"except",
"np",
".",
"linalg",
".",
"linalg",
".",
"LinAlgError",
":",
"C",
"=",
"np",
".",
"linalg",
".",
"lstsq",
"(",
"G",
",",
"D",
")",
"[",
"0",
"]",
".",
"reshape",
"(",
"flen",
",",
"nsrc",
",",
"order",
"=",
"'F'",
")",
"# Filtering",
"sproj",
"=",
"np",
".",
"zeros",
"(",
"nsampl",
"+",
"flen",
"-",
"1",
")",
"for",
"i",
"in",
"range",
"(",
"nsrc",
")",
":",
"sproj",
"+=",
"fftconvolve",
"(",
"C",
"[",
":",
",",
"i",
"]",
",",
"reference_sources",
"[",
"i",
"]",
")",
"[",
":",
"nsampl",
"+",
"flen",
"-",
"1",
"]",
"return",
"sproj"
] |
Least-squares projection of estimated source on the subspace spanned by
delayed versions of reference sources, with delays between 0 and flen-1
|
[
"Least",
"-",
"squares",
"projection",
"of",
"estimated",
"source",
"on",
"the",
"subspace",
"spanned",
"by",
"delayed",
"versions",
"of",
"reference",
"sources",
"with",
"delays",
"between",
"0",
"and",
"flen",
"-",
"1"
] |
f41c8dafaea04b411252a516d1965af43c7d531b
|
https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/separation.py#L679-L722
|
train
|
craffel/mir_eval
|
mir_eval/separation.py
|
_bss_image_crit
|
def _bss_image_crit(s_true, e_spat, e_interf, e_artif):
"""Measurement of the separation quality for a given image in terms of
filtered true source, spatial error, interference and artifacts.
"""
# energy ratios
sdr = _safe_db(np.sum(s_true**2), np.sum((e_spat+e_interf+e_artif)**2))
isr = _safe_db(np.sum(s_true**2), np.sum(e_spat**2))
sir = _safe_db(np.sum((s_true+e_spat)**2), np.sum(e_interf**2))
sar = _safe_db(np.sum((s_true+e_spat+e_interf)**2), np.sum(e_artif**2))
return (sdr, isr, sir, sar)
|
python
|
def _bss_image_crit(s_true, e_spat, e_interf, e_artif):
"""Measurement of the separation quality for a given image in terms of
filtered true source, spatial error, interference and artifacts.
"""
# energy ratios
sdr = _safe_db(np.sum(s_true**2), np.sum((e_spat+e_interf+e_artif)**2))
isr = _safe_db(np.sum(s_true**2), np.sum(e_spat**2))
sir = _safe_db(np.sum((s_true+e_spat)**2), np.sum(e_interf**2))
sar = _safe_db(np.sum((s_true+e_spat+e_interf)**2), np.sum(e_artif**2))
return (sdr, isr, sir, sar)
|
[
"def",
"_bss_image_crit",
"(",
"s_true",
",",
"e_spat",
",",
"e_interf",
",",
"e_artif",
")",
":",
"# energy ratios",
"sdr",
"=",
"_safe_db",
"(",
"np",
".",
"sum",
"(",
"s_true",
"**",
"2",
")",
",",
"np",
".",
"sum",
"(",
"(",
"e_spat",
"+",
"e_interf",
"+",
"e_artif",
")",
"**",
"2",
")",
")",
"isr",
"=",
"_safe_db",
"(",
"np",
".",
"sum",
"(",
"s_true",
"**",
"2",
")",
",",
"np",
".",
"sum",
"(",
"e_spat",
"**",
"2",
")",
")",
"sir",
"=",
"_safe_db",
"(",
"np",
".",
"sum",
"(",
"(",
"s_true",
"+",
"e_spat",
")",
"**",
"2",
")",
",",
"np",
".",
"sum",
"(",
"e_interf",
"**",
"2",
")",
")",
"sar",
"=",
"_safe_db",
"(",
"np",
".",
"sum",
"(",
"(",
"s_true",
"+",
"e_spat",
"+",
"e_interf",
")",
"**",
"2",
")",
",",
"np",
".",
"sum",
"(",
"e_artif",
"**",
"2",
")",
")",
"return",
"(",
"sdr",
",",
"isr",
",",
"sir",
",",
"sar",
")"
] |
Measurement of the separation quality for a given image in terms of
filtered true source, spatial error, interference and artifacts.
|
[
"Measurement",
"of",
"the",
"separation",
"quality",
"for",
"a",
"given",
"image",
"in",
"terms",
"of",
"filtered",
"true",
"source",
"spatial",
"error",
"interference",
"and",
"artifacts",
"."
] |
f41c8dafaea04b411252a516d1965af43c7d531b
|
https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/separation.py#L815-L824
|
train
|
craffel/mir_eval
|
mir_eval/separation.py
|
_safe_db
|
def _safe_db(num, den):
"""Properly handle the potential +Inf db SIR, instead of raising a
RuntimeWarning. Only denominator is checked because the numerator can never
be 0.
"""
if den == 0:
return np.Inf
return 10 * np.log10(num / den)
|
python
|
def _safe_db(num, den):
"""Properly handle the potential +Inf db SIR, instead of raising a
RuntimeWarning. Only denominator is checked because the numerator can never
be 0.
"""
if den == 0:
return np.Inf
return 10 * np.log10(num / den)
|
[
"def",
"_safe_db",
"(",
"num",
",",
"den",
")",
":",
"if",
"den",
"==",
"0",
":",
"return",
"np",
".",
"Inf",
"return",
"10",
"*",
"np",
".",
"log10",
"(",
"num",
"/",
"den",
")"
] |
Properly handle the potential +Inf db SIR, instead of raising a
RuntimeWarning. Only denominator is checked because the numerator can never
be 0.
|
[
"Properly",
"handle",
"the",
"potential",
"+",
"Inf",
"db",
"SIR",
"instead",
"of",
"raising",
"a",
"RuntimeWarning",
".",
"Only",
"denominator",
"is",
"checked",
"because",
"the",
"numerator",
"can",
"never",
"be",
"0",
"."
] |
f41c8dafaea04b411252a516d1965af43c7d531b
|
https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/separation.py#L827-L834
|
train
|
craffel/mir_eval
|
mir_eval/separation.py
|
evaluate
|
def evaluate(reference_sources, estimated_sources, **kwargs):
"""Compute all metrics for the given reference and estimated signals.
NOTE: This will always compute :func:`mir_eval.separation.bss_eval_images`
for any valid input and will additionally compute
:func:`mir_eval.separation.bss_eval_sources` for valid input with fewer
than 3 dimensions.
Examples
--------
>>> # reference_sources[n] should be an ndarray of samples of the
>>> # n'th reference source
>>> # estimated_sources[n] should be the same for the n'th estimated source
>>> scores = mir_eval.separation.evaluate(reference_sources,
... estimated_sources)
Parameters
----------
reference_sources : np.ndarray, shape=(nsrc, nsampl[, nchan])
matrix containing true sources
estimated_sources : np.ndarray, shape=(nsrc, nsampl[, nchan])
matrix containing estimated sources
kwargs
Additional keyword arguments which will be passed to the
appropriate metric or preprocessing functions.
Returns
-------
scores : dict
Dictionary of scores, where the key is the metric name (str) and
the value is the (float) score achieved.
"""
# Compute all the metrics
scores = collections.OrderedDict()
sdr, isr, sir, sar, perm = util.filter_kwargs(
bss_eval_images,
reference_sources,
estimated_sources,
**kwargs
)
scores['Images - Source to Distortion'] = sdr.tolist()
scores['Images - Image to Spatial'] = isr.tolist()
scores['Images - Source to Interference'] = sir.tolist()
scores['Images - Source to Artifact'] = sar.tolist()
scores['Images - Source permutation'] = perm.tolist()
sdr, isr, sir, sar, perm = util.filter_kwargs(
bss_eval_images_framewise,
reference_sources,
estimated_sources,
**kwargs
)
scores['Images Frames - Source to Distortion'] = sdr.tolist()
scores['Images Frames - Image to Spatial'] = isr.tolist()
scores['Images Frames - Source to Interference'] = sir.tolist()
scores['Images Frames - Source to Artifact'] = sar.tolist()
scores['Images Frames - Source permutation'] = perm.tolist()
# Verify we can compute sources on this input
if reference_sources.ndim < 3 and estimated_sources.ndim < 3:
sdr, sir, sar, perm = util.filter_kwargs(
bss_eval_sources_framewise,
reference_sources,
estimated_sources,
**kwargs
)
scores['Sources Frames - Source to Distortion'] = sdr.tolist()
scores['Sources Frames - Source to Interference'] = sir.tolist()
scores['Sources Frames - Source to Artifact'] = sar.tolist()
scores['Sources Frames - Source permutation'] = perm.tolist()
sdr, sir, sar, perm = util.filter_kwargs(
bss_eval_sources,
reference_sources,
estimated_sources,
**kwargs
)
scores['Sources - Source to Distortion'] = sdr.tolist()
scores['Sources - Source to Interference'] = sir.tolist()
scores['Sources - Source to Artifact'] = sar.tolist()
scores['Sources - Source permutation'] = perm.tolist()
return scores
|
python
|
def evaluate(reference_sources, estimated_sources, **kwargs):
"""Compute all metrics for the given reference and estimated signals.
NOTE: This will always compute :func:`mir_eval.separation.bss_eval_images`
for any valid input and will additionally compute
:func:`mir_eval.separation.bss_eval_sources` for valid input with fewer
than 3 dimensions.
Examples
--------
>>> # reference_sources[n] should be an ndarray of samples of the
>>> # n'th reference source
>>> # estimated_sources[n] should be the same for the n'th estimated source
>>> scores = mir_eval.separation.evaluate(reference_sources,
... estimated_sources)
Parameters
----------
reference_sources : np.ndarray, shape=(nsrc, nsampl[, nchan])
matrix containing true sources
estimated_sources : np.ndarray, shape=(nsrc, nsampl[, nchan])
matrix containing estimated sources
kwargs
Additional keyword arguments which will be passed to the
appropriate metric or preprocessing functions.
Returns
-------
scores : dict
Dictionary of scores, where the key is the metric name (str) and
the value is the (float) score achieved.
"""
# Compute all the metrics
scores = collections.OrderedDict()
sdr, isr, sir, sar, perm = util.filter_kwargs(
bss_eval_images,
reference_sources,
estimated_sources,
**kwargs
)
scores['Images - Source to Distortion'] = sdr.tolist()
scores['Images - Image to Spatial'] = isr.tolist()
scores['Images - Source to Interference'] = sir.tolist()
scores['Images - Source to Artifact'] = sar.tolist()
scores['Images - Source permutation'] = perm.tolist()
sdr, isr, sir, sar, perm = util.filter_kwargs(
bss_eval_images_framewise,
reference_sources,
estimated_sources,
**kwargs
)
scores['Images Frames - Source to Distortion'] = sdr.tolist()
scores['Images Frames - Image to Spatial'] = isr.tolist()
scores['Images Frames - Source to Interference'] = sir.tolist()
scores['Images Frames - Source to Artifact'] = sar.tolist()
scores['Images Frames - Source permutation'] = perm.tolist()
# Verify we can compute sources on this input
if reference_sources.ndim < 3 and estimated_sources.ndim < 3:
sdr, sir, sar, perm = util.filter_kwargs(
bss_eval_sources_framewise,
reference_sources,
estimated_sources,
**kwargs
)
scores['Sources Frames - Source to Distortion'] = sdr.tolist()
scores['Sources Frames - Source to Interference'] = sir.tolist()
scores['Sources Frames - Source to Artifact'] = sar.tolist()
scores['Sources Frames - Source permutation'] = perm.tolist()
sdr, sir, sar, perm = util.filter_kwargs(
bss_eval_sources,
reference_sources,
estimated_sources,
**kwargs
)
scores['Sources - Source to Distortion'] = sdr.tolist()
scores['Sources - Source to Interference'] = sir.tolist()
scores['Sources - Source to Artifact'] = sar.tolist()
scores['Sources - Source permutation'] = perm.tolist()
return scores
|
[
"def",
"evaluate",
"(",
"reference_sources",
",",
"estimated_sources",
",",
"*",
"*",
"kwargs",
")",
":",
"# Compute all the metrics",
"scores",
"=",
"collections",
".",
"OrderedDict",
"(",
")",
"sdr",
",",
"isr",
",",
"sir",
",",
"sar",
",",
"perm",
"=",
"util",
".",
"filter_kwargs",
"(",
"bss_eval_images",
",",
"reference_sources",
",",
"estimated_sources",
",",
"*",
"*",
"kwargs",
")",
"scores",
"[",
"'Images - Source to Distortion'",
"]",
"=",
"sdr",
".",
"tolist",
"(",
")",
"scores",
"[",
"'Images - Image to Spatial'",
"]",
"=",
"isr",
".",
"tolist",
"(",
")",
"scores",
"[",
"'Images - Source to Interference'",
"]",
"=",
"sir",
".",
"tolist",
"(",
")",
"scores",
"[",
"'Images - Source to Artifact'",
"]",
"=",
"sar",
".",
"tolist",
"(",
")",
"scores",
"[",
"'Images - Source permutation'",
"]",
"=",
"perm",
".",
"tolist",
"(",
")",
"sdr",
",",
"isr",
",",
"sir",
",",
"sar",
",",
"perm",
"=",
"util",
".",
"filter_kwargs",
"(",
"bss_eval_images_framewise",
",",
"reference_sources",
",",
"estimated_sources",
",",
"*",
"*",
"kwargs",
")",
"scores",
"[",
"'Images Frames - Source to Distortion'",
"]",
"=",
"sdr",
".",
"tolist",
"(",
")",
"scores",
"[",
"'Images Frames - Image to Spatial'",
"]",
"=",
"isr",
".",
"tolist",
"(",
")",
"scores",
"[",
"'Images Frames - Source to Interference'",
"]",
"=",
"sir",
".",
"tolist",
"(",
")",
"scores",
"[",
"'Images Frames - Source to Artifact'",
"]",
"=",
"sar",
".",
"tolist",
"(",
")",
"scores",
"[",
"'Images Frames - Source permutation'",
"]",
"=",
"perm",
".",
"tolist",
"(",
")",
"# Verify we can compute sources on this input",
"if",
"reference_sources",
".",
"ndim",
"<",
"3",
"and",
"estimated_sources",
".",
"ndim",
"<",
"3",
":",
"sdr",
",",
"sir",
",",
"sar",
",",
"perm",
"=",
"util",
".",
"filter_kwargs",
"(",
"bss_eval_sources_framewise",
",",
"reference_sources",
",",
"estimated_sources",
",",
"*",
"*",
"kwargs",
")",
"scores",
"[",
"'Sources Frames - Source to Distortion'",
"]",
"=",
"sdr",
".",
"tolist",
"(",
")",
"scores",
"[",
"'Sources Frames - Source to Interference'",
"]",
"=",
"sir",
".",
"tolist",
"(",
")",
"scores",
"[",
"'Sources Frames - Source to Artifact'",
"]",
"=",
"sar",
".",
"tolist",
"(",
")",
"scores",
"[",
"'Sources Frames - Source permutation'",
"]",
"=",
"perm",
".",
"tolist",
"(",
")",
"sdr",
",",
"sir",
",",
"sar",
",",
"perm",
"=",
"util",
".",
"filter_kwargs",
"(",
"bss_eval_sources",
",",
"reference_sources",
",",
"estimated_sources",
",",
"*",
"*",
"kwargs",
")",
"scores",
"[",
"'Sources - Source to Distortion'",
"]",
"=",
"sdr",
".",
"tolist",
"(",
")",
"scores",
"[",
"'Sources - Source to Interference'",
"]",
"=",
"sir",
".",
"tolist",
"(",
")",
"scores",
"[",
"'Sources - Source to Artifact'",
"]",
"=",
"sar",
".",
"tolist",
"(",
")",
"scores",
"[",
"'Sources - Source permutation'",
"]",
"=",
"perm",
".",
"tolist",
"(",
")",
"return",
"scores"
] |
Compute all metrics for the given reference and estimated signals.
NOTE: This will always compute :func:`mir_eval.separation.bss_eval_images`
for any valid input and will additionally compute
:func:`mir_eval.separation.bss_eval_sources` for valid input with fewer
than 3 dimensions.
Examples
--------
>>> # reference_sources[n] should be an ndarray of samples of the
>>> # n'th reference source
>>> # estimated_sources[n] should be the same for the n'th estimated source
>>> scores = mir_eval.separation.evaluate(reference_sources,
... estimated_sources)
Parameters
----------
reference_sources : np.ndarray, shape=(nsrc, nsampl[, nchan])
matrix containing true sources
estimated_sources : np.ndarray, shape=(nsrc, nsampl[, nchan])
matrix containing estimated sources
kwargs
Additional keyword arguments which will be passed to the
appropriate metric or preprocessing functions.
Returns
-------
scores : dict
Dictionary of scores, where the key is the metric name (str) and
the value is the (float) score achieved.
|
[
"Compute",
"all",
"metrics",
"for",
"the",
"given",
"reference",
"and",
"estimated",
"signals",
"."
] |
f41c8dafaea04b411252a516d1965af43c7d531b
|
https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/separation.py#L837-L921
|
train
|
craffel/mir_eval
|
mir_eval/sonify.py
|
clicks
|
def clicks(times, fs, click=None, length=None):
"""Returns a signal with the signal 'click' placed at each specified time
Parameters
----------
times : np.ndarray
times to place clicks, in seconds
fs : int
desired sampling rate of the output signal
click : np.ndarray
click signal, defaults to a 1 kHz blip
length : int
desired number of samples in the output signal,
defaults to ``times.max()*fs + click.shape[0] + 1``
Returns
-------
click_signal : np.ndarray
Synthesized click signal
"""
# Create default click signal
if click is None:
# 1 kHz tone, 100ms
click = np.sin(2*np.pi*np.arange(fs*.1)*1000/(1.*fs))
# Exponential decay
click *= np.exp(-np.arange(fs*.1)/(fs*.01))
# Set default length
if length is None:
length = int(times.max()*fs + click.shape[0] + 1)
# Pre-allocate click signal
click_signal = np.zeros(length)
# Place clicks
for time in times:
# Compute the boundaries of the click
start = int(time*fs)
end = start + click.shape[0]
# Make sure we don't try to output past the end of the signal
if start >= length:
break
if end >= length:
click_signal[start:] = click[:length - start]
break
# Normally, just add a click here
click_signal[start:end] = click
return click_signal
|
python
|
def clicks(times, fs, click=None, length=None):
"""Returns a signal with the signal 'click' placed at each specified time
Parameters
----------
times : np.ndarray
times to place clicks, in seconds
fs : int
desired sampling rate of the output signal
click : np.ndarray
click signal, defaults to a 1 kHz blip
length : int
desired number of samples in the output signal,
defaults to ``times.max()*fs + click.shape[0] + 1``
Returns
-------
click_signal : np.ndarray
Synthesized click signal
"""
# Create default click signal
if click is None:
# 1 kHz tone, 100ms
click = np.sin(2*np.pi*np.arange(fs*.1)*1000/(1.*fs))
# Exponential decay
click *= np.exp(-np.arange(fs*.1)/(fs*.01))
# Set default length
if length is None:
length = int(times.max()*fs + click.shape[0] + 1)
# Pre-allocate click signal
click_signal = np.zeros(length)
# Place clicks
for time in times:
# Compute the boundaries of the click
start = int(time*fs)
end = start + click.shape[0]
# Make sure we don't try to output past the end of the signal
if start >= length:
break
if end >= length:
click_signal[start:] = click[:length - start]
break
# Normally, just add a click here
click_signal[start:end] = click
return click_signal
|
[
"def",
"clicks",
"(",
"times",
",",
"fs",
",",
"click",
"=",
"None",
",",
"length",
"=",
"None",
")",
":",
"# Create default click signal",
"if",
"click",
"is",
"None",
":",
"# 1 kHz tone, 100ms",
"click",
"=",
"np",
".",
"sin",
"(",
"2",
"*",
"np",
".",
"pi",
"*",
"np",
".",
"arange",
"(",
"fs",
"*",
".1",
")",
"*",
"1000",
"/",
"(",
"1.",
"*",
"fs",
")",
")",
"# Exponential decay",
"click",
"*=",
"np",
".",
"exp",
"(",
"-",
"np",
".",
"arange",
"(",
"fs",
"*",
".1",
")",
"/",
"(",
"fs",
"*",
".01",
")",
")",
"# Set default length",
"if",
"length",
"is",
"None",
":",
"length",
"=",
"int",
"(",
"times",
".",
"max",
"(",
")",
"*",
"fs",
"+",
"click",
".",
"shape",
"[",
"0",
"]",
"+",
"1",
")",
"# Pre-allocate click signal",
"click_signal",
"=",
"np",
".",
"zeros",
"(",
"length",
")",
"# Place clicks",
"for",
"time",
"in",
"times",
":",
"# Compute the boundaries of the click",
"start",
"=",
"int",
"(",
"time",
"*",
"fs",
")",
"end",
"=",
"start",
"+",
"click",
".",
"shape",
"[",
"0",
"]",
"# Make sure we don't try to output past the end of the signal",
"if",
"start",
">=",
"length",
":",
"break",
"if",
"end",
">=",
"length",
":",
"click_signal",
"[",
"start",
":",
"]",
"=",
"click",
"[",
":",
"length",
"-",
"start",
"]",
"break",
"# Normally, just add a click here",
"click_signal",
"[",
"start",
":",
"end",
"]",
"=",
"click",
"return",
"click_signal"
] |
Returns a signal with the signal 'click' placed at each specified time
Parameters
----------
times : np.ndarray
times to place clicks, in seconds
fs : int
desired sampling rate of the output signal
click : np.ndarray
click signal, defaults to a 1 kHz blip
length : int
desired number of samples in the output signal,
defaults to ``times.max()*fs + click.shape[0] + 1``
Returns
-------
click_signal : np.ndarray
Synthesized click signal
|
[
"Returns",
"a",
"signal",
"with",
"the",
"signal",
"click",
"placed",
"at",
"each",
"specified",
"time"
] |
f41c8dafaea04b411252a516d1965af43c7d531b
|
https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/sonify.py#L14-L60
|
train
|
craffel/mir_eval
|
mir_eval/sonify.py
|
time_frequency
|
def time_frequency(gram, frequencies, times, fs, function=np.sin, length=None,
n_dec=1):
"""Reverse synthesis of a time-frequency representation of a signal
Parameters
----------
gram : np.ndarray
``gram[n, m]`` is the magnitude of ``frequencies[n]``
from ``times[m]`` to ``times[m + 1]``
Non-positive magnitudes are interpreted as silence.
frequencies : np.ndarray
array of size ``gram.shape[0]`` denoting the frequency of
each row of gram
times : np.ndarray, shape= ``(gram.shape[1],)`` or ``(gram.shape[1], 2)``
Either the start time of each column in the gram,
or the time interval corresponding to each column.
fs : int
desired sampling rate of the output signal
function : function
function to use to synthesize notes, should be :math:`2\pi`-periodic
length : int
desired number of samples in the output signal,
defaults to ``times[-1]*fs``
n_dec : int
the number of decimals used to approximate each sonfied frequency.
Defaults to 1 decimal place. Higher precision will be slower.
Returns
-------
output : np.ndarray
synthesized version of the piano roll
"""
# Default value for length
if times.ndim == 1:
# Convert to intervals
times = util.boundaries_to_intervals(times)
if length is None:
length = int(times[-1, 1] * fs)
times, _ = util.adjust_intervals(times, t_max=length)
# Truncate times so that the shape matches gram
n_times = gram.shape[1]
times = times[:n_times]
def _fast_synthesize(frequency):
"""A faster way to synthesize a signal.
Generate one cycle, and simulate arbitrary repetitions
using array indexing tricks.
"""
# hack so that we can ensure an integer number of periods and samples
# rounds frequency to 1st decimal, s.t. 10 * frequency will be an int
frequency = np.round(frequency, n_dec)
# Generate 10*frequency periods at this frequency
# Equivalent to n_samples = int(n_periods * fs / frequency)
# n_periods = 10*frequency is the smallest integer that guarantees
# that n_samples will be an integer, since assuming 10*frequency
# is an integer
n_samples = int(10.0**n_dec * fs)
short_signal = function(2.0 * np.pi * np.arange(n_samples) *
frequency / fs)
# Calculate the number of loops we need to fill the duration
n_repeats = int(np.ceil(length/float(short_signal.shape[0])))
# Simulate tiling the short buffer by using stride tricks
long_signal = as_strided(short_signal,
shape=(n_repeats, len(short_signal)),
strides=(0, short_signal.itemsize))
# Use a flatiter to simulate a long 1D buffer
return long_signal.flat
def _const_interpolator(value):
"""Return a function that returns `value`
no matter the input.
"""
def __interpolator(x):
return value
return __interpolator
# Threshold the tfgram to remove non-positive values
gram = np.maximum(gram, 0)
# Pre-allocate output signal
output = np.zeros(length)
time_centers = np.mean(times, axis=1) * float(fs)
for n, frequency in enumerate(frequencies):
# Get a waveform of length samples at this frequency
wave = _fast_synthesize(frequency)
# Interpolate the values in gram over the time grid
if len(time_centers) > 1:
gram_interpolator = interp1d(
time_centers, gram[n, :],
kind='linear', bounds_error=False,
fill_value=0.0)
# If only one time point, create constant interpolator
else:
gram_interpolator = _const_interpolator(gram[n, 0])
# Scale each time interval by the piano roll magnitude
for m, (start, end) in enumerate((times * fs).astype(int)):
# Clip the timings to make sure the indices are valid
start, end = max(start, 0), min(end, length)
# add to waveform
output[start:end] += (
wave[start:end] * gram_interpolator(np.arange(start, end)))
# Normalize, but only if there's non-zero values
norm = np.abs(output).max()
if norm >= np.finfo(output.dtype).tiny:
output /= norm
return output
|
python
|
def time_frequency(gram, frequencies, times, fs, function=np.sin, length=None,
n_dec=1):
"""Reverse synthesis of a time-frequency representation of a signal
Parameters
----------
gram : np.ndarray
``gram[n, m]`` is the magnitude of ``frequencies[n]``
from ``times[m]`` to ``times[m + 1]``
Non-positive magnitudes are interpreted as silence.
frequencies : np.ndarray
array of size ``gram.shape[0]`` denoting the frequency of
each row of gram
times : np.ndarray, shape= ``(gram.shape[1],)`` or ``(gram.shape[1], 2)``
Either the start time of each column in the gram,
or the time interval corresponding to each column.
fs : int
desired sampling rate of the output signal
function : function
function to use to synthesize notes, should be :math:`2\pi`-periodic
length : int
desired number of samples in the output signal,
defaults to ``times[-1]*fs``
n_dec : int
the number of decimals used to approximate each sonfied frequency.
Defaults to 1 decimal place. Higher precision will be slower.
Returns
-------
output : np.ndarray
synthesized version of the piano roll
"""
# Default value for length
if times.ndim == 1:
# Convert to intervals
times = util.boundaries_to_intervals(times)
if length is None:
length = int(times[-1, 1] * fs)
times, _ = util.adjust_intervals(times, t_max=length)
# Truncate times so that the shape matches gram
n_times = gram.shape[1]
times = times[:n_times]
def _fast_synthesize(frequency):
"""A faster way to synthesize a signal.
Generate one cycle, and simulate arbitrary repetitions
using array indexing tricks.
"""
# hack so that we can ensure an integer number of periods and samples
# rounds frequency to 1st decimal, s.t. 10 * frequency will be an int
frequency = np.round(frequency, n_dec)
# Generate 10*frequency periods at this frequency
# Equivalent to n_samples = int(n_periods * fs / frequency)
# n_periods = 10*frequency is the smallest integer that guarantees
# that n_samples will be an integer, since assuming 10*frequency
# is an integer
n_samples = int(10.0**n_dec * fs)
short_signal = function(2.0 * np.pi * np.arange(n_samples) *
frequency / fs)
# Calculate the number of loops we need to fill the duration
n_repeats = int(np.ceil(length/float(short_signal.shape[0])))
# Simulate tiling the short buffer by using stride tricks
long_signal = as_strided(short_signal,
shape=(n_repeats, len(short_signal)),
strides=(0, short_signal.itemsize))
# Use a flatiter to simulate a long 1D buffer
return long_signal.flat
def _const_interpolator(value):
"""Return a function that returns `value`
no matter the input.
"""
def __interpolator(x):
return value
return __interpolator
# Threshold the tfgram to remove non-positive values
gram = np.maximum(gram, 0)
# Pre-allocate output signal
output = np.zeros(length)
time_centers = np.mean(times, axis=1) * float(fs)
for n, frequency in enumerate(frequencies):
# Get a waveform of length samples at this frequency
wave = _fast_synthesize(frequency)
# Interpolate the values in gram over the time grid
if len(time_centers) > 1:
gram_interpolator = interp1d(
time_centers, gram[n, :],
kind='linear', bounds_error=False,
fill_value=0.0)
# If only one time point, create constant interpolator
else:
gram_interpolator = _const_interpolator(gram[n, 0])
# Scale each time interval by the piano roll magnitude
for m, (start, end) in enumerate((times * fs).astype(int)):
# Clip the timings to make sure the indices are valid
start, end = max(start, 0), min(end, length)
# add to waveform
output[start:end] += (
wave[start:end] * gram_interpolator(np.arange(start, end)))
# Normalize, but only if there's non-zero values
norm = np.abs(output).max()
if norm >= np.finfo(output.dtype).tiny:
output /= norm
return output
|
[
"def",
"time_frequency",
"(",
"gram",
",",
"frequencies",
",",
"times",
",",
"fs",
",",
"function",
"=",
"np",
".",
"sin",
",",
"length",
"=",
"None",
",",
"n_dec",
"=",
"1",
")",
":",
"# Default value for length",
"if",
"times",
".",
"ndim",
"==",
"1",
":",
"# Convert to intervals",
"times",
"=",
"util",
".",
"boundaries_to_intervals",
"(",
"times",
")",
"if",
"length",
"is",
"None",
":",
"length",
"=",
"int",
"(",
"times",
"[",
"-",
"1",
",",
"1",
"]",
"*",
"fs",
")",
"times",
",",
"_",
"=",
"util",
".",
"adjust_intervals",
"(",
"times",
",",
"t_max",
"=",
"length",
")",
"# Truncate times so that the shape matches gram",
"n_times",
"=",
"gram",
".",
"shape",
"[",
"1",
"]",
"times",
"=",
"times",
"[",
":",
"n_times",
"]",
"def",
"_fast_synthesize",
"(",
"frequency",
")",
":",
"\"\"\"A faster way to synthesize a signal.\n Generate one cycle, and simulate arbitrary repetitions\n using array indexing tricks.\n \"\"\"",
"# hack so that we can ensure an integer number of periods and samples",
"# rounds frequency to 1st decimal, s.t. 10 * frequency will be an int",
"frequency",
"=",
"np",
".",
"round",
"(",
"frequency",
",",
"n_dec",
")",
"# Generate 10*frequency periods at this frequency",
"# Equivalent to n_samples = int(n_periods * fs / frequency)",
"# n_periods = 10*frequency is the smallest integer that guarantees",
"# that n_samples will be an integer, since assuming 10*frequency",
"# is an integer",
"n_samples",
"=",
"int",
"(",
"10.0",
"**",
"n_dec",
"*",
"fs",
")",
"short_signal",
"=",
"function",
"(",
"2.0",
"*",
"np",
".",
"pi",
"*",
"np",
".",
"arange",
"(",
"n_samples",
")",
"*",
"frequency",
"/",
"fs",
")",
"# Calculate the number of loops we need to fill the duration",
"n_repeats",
"=",
"int",
"(",
"np",
".",
"ceil",
"(",
"length",
"/",
"float",
"(",
"short_signal",
".",
"shape",
"[",
"0",
"]",
")",
")",
")",
"# Simulate tiling the short buffer by using stride tricks",
"long_signal",
"=",
"as_strided",
"(",
"short_signal",
",",
"shape",
"=",
"(",
"n_repeats",
",",
"len",
"(",
"short_signal",
")",
")",
",",
"strides",
"=",
"(",
"0",
",",
"short_signal",
".",
"itemsize",
")",
")",
"# Use a flatiter to simulate a long 1D buffer",
"return",
"long_signal",
".",
"flat",
"def",
"_const_interpolator",
"(",
"value",
")",
":",
"\"\"\"Return a function that returns `value`\n no matter the input.\n \"\"\"",
"def",
"__interpolator",
"(",
"x",
")",
":",
"return",
"value",
"return",
"__interpolator",
"# Threshold the tfgram to remove non-positive values",
"gram",
"=",
"np",
".",
"maximum",
"(",
"gram",
",",
"0",
")",
"# Pre-allocate output signal",
"output",
"=",
"np",
".",
"zeros",
"(",
"length",
")",
"time_centers",
"=",
"np",
".",
"mean",
"(",
"times",
",",
"axis",
"=",
"1",
")",
"*",
"float",
"(",
"fs",
")",
"for",
"n",
",",
"frequency",
"in",
"enumerate",
"(",
"frequencies",
")",
":",
"# Get a waveform of length samples at this frequency",
"wave",
"=",
"_fast_synthesize",
"(",
"frequency",
")",
"# Interpolate the values in gram over the time grid",
"if",
"len",
"(",
"time_centers",
")",
">",
"1",
":",
"gram_interpolator",
"=",
"interp1d",
"(",
"time_centers",
",",
"gram",
"[",
"n",
",",
":",
"]",
",",
"kind",
"=",
"'linear'",
",",
"bounds_error",
"=",
"False",
",",
"fill_value",
"=",
"0.0",
")",
"# If only one time point, create constant interpolator",
"else",
":",
"gram_interpolator",
"=",
"_const_interpolator",
"(",
"gram",
"[",
"n",
",",
"0",
"]",
")",
"# Scale each time interval by the piano roll magnitude",
"for",
"m",
",",
"(",
"start",
",",
"end",
")",
"in",
"enumerate",
"(",
"(",
"times",
"*",
"fs",
")",
".",
"astype",
"(",
"int",
")",
")",
":",
"# Clip the timings to make sure the indices are valid",
"start",
",",
"end",
"=",
"max",
"(",
"start",
",",
"0",
")",
",",
"min",
"(",
"end",
",",
"length",
")",
"# add to waveform",
"output",
"[",
"start",
":",
"end",
"]",
"+=",
"(",
"wave",
"[",
"start",
":",
"end",
"]",
"*",
"gram_interpolator",
"(",
"np",
".",
"arange",
"(",
"start",
",",
"end",
")",
")",
")",
"# Normalize, but only if there's non-zero values",
"norm",
"=",
"np",
".",
"abs",
"(",
"output",
")",
".",
"max",
"(",
")",
"if",
"norm",
">=",
"np",
".",
"finfo",
"(",
"output",
".",
"dtype",
")",
".",
"tiny",
":",
"output",
"/=",
"norm",
"return",
"output"
] |
Reverse synthesis of a time-frequency representation of a signal
Parameters
----------
gram : np.ndarray
``gram[n, m]`` is the magnitude of ``frequencies[n]``
from ``times[m]`` to ``times[m + 1]``
Non-positive magnitudes are interpreted as silence.
frequencies : np.ndarray
array of size ``gram.shape[0]`` denoting the frequency of
each row of gram
times : np.ndarray, shape= ``(gram.shape[1],)`` or ``(gram.shape[1], 2)``
Either the start time of each column in the gram,
or the time interval corresponding to each column.
fs : int
desired sampling rate of the output signal
function : function
function to use to synthesize notes, should be :math:`2\pi`-periodic
length : int
desired number of samples in the output signal,
defaults to ``times[-1]*fs``
n_dec : int
the number of decimals used to approximate each sonfied frequency.
Defaults to 1 decimal place. Higher precision will be slower.
Returns
-------
output : np.ndarray
synthesized version of the piano roll
|
[
"Reverse",
"synthesis",
"of",
"a",
"time",
"-",
"frequency",
"representation",
"of",
"a",
"signal"
] |
f41c8dafaea04b411252a516d1965af43c7d531b
|
https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/sonify.py#L63-L184
|
train
|
craffel/mir_eval
|
mir_eval/sonify.py
|
pitch_contour
|
def pitch_contour(times, frequencies, fs, amplitudes=None, function=np.sin,
length=None, kind='linear'):
'''Sonify a pitch contour.
Parameters
----------
times : np.ndarray
time indices for each frequency measurement, in seconds
frequencies : np.ndarray
frequency measurements, in Hz.
Non-positive measurements will be interpreted as un-voiced samples.
fs : int
desired sampling rate of the output signal
amplitudes : np.ndarray
amplitude measurments, nonnegative
defaults to ``np.ones((length,))``
function : function
function to use to synthesize notes, should be :math:`2\pi`-periodic
length : int
desired number of samples in the output signal,
defaults to ``max(times)*fs``
kind : str
Interpolation mode for the frequency and amplitude values.
See: ``scipy.interpolate.interp1d`` for valid settings.
Returns
-------
output : np.ndarray
synthesized version of the pitch contour
'''
fs = float(fs)
if length is None:
length = int(times.max() * fs)
# Squash the negative frequencies.
# wave(0) = 0, so clipping here will un-voice the corresponding instants
frequencies = np.maximum(frequencies, 0.0)
# Build a frequency interpolator
f_interp = interp1d(times * fs, 2 * np.pi * frequencies / fs, kind=kind,
fill_value=0.0, bounds_error=False, copy=False)
# Estimate frequency at sample points
f_est = f_interp(np.arange(length))
if amplitudes is None:
a_est = np.ones((length, ))
else:
# build an amplitude interpolator
a_interp = interp1d(
times * fs, amplitudes, kind=kind,
fill_value=0.0, bounds_error=False, copy=False)
a_est = a_interp(np.arange(length))
# Sonify the waveform
return a_est * function(np.cumsum(f_est))
|
python
|
def pitch_contour(times, frequencies, fs, amplitudes=None, function=np.sin,
length=None, kind='linear'):
'''Sonify a pitch contour.
Parameters
----------
times : np.ndarray
time indices for each frequency measurement, in seconds
frequencies : np.ndarray
frequency measurements, in Hz.
Non-positive measurements will be interpreted as un-voiced samples.
fs : int
desired sampling rate of the output signal
amplitudes : np.ndarray
amplitude measurments, nonnegative
defaults to ``np.ones((length,))``
function : function
function to use to synthesize notes, should be :math:`2\pi`-periodic
length : int
desired number of samples in the output signal,
defaults to ``max(times)*fs``
kind : str
Interpolation mode for the frequency and amplitude values.
See: ``scipy.interpolate.interp1d`` for valid settings.
Returns
-------
output : np.ndarray
synthesized version of the pitch contour
'''
fs = float(fs)
if length is None:
length = int(times.max() * fs)
# Squash the negative frequencies.
# wave(0) = 0, so clipping here will un-voice the corresponding instants
frequencies = np.maximum(frequencies, 0.0)
# Build a frequency interpolator
f_interp = interp1d(times * fs, 2 * np.pi * frequencies / fs, kind=kind,
fill_value=0.0, bounds_error=False, copy=False)
# Estimate frequency at sample points
f_est = f_interp(np.arange(length))
if amplitudes is None:
a_est = np.ones((length, ))
else:
# build an amplitude interpolator
a_interp = interp1d(
times * fs, amplitudes, kind=kind,
fill_value=0.0, bounds_error=False, copy=False)
a_est = a_interp(np.arange(length))
# Sonify the waveform
return a_est * function(np.cumsum(f_est))
|
[
"def",
"pitch_contour",
"(",
"times",
",",
"frequencies",
",",
"fs",
",",
"amplitudes",
"=",
"None",
",",
"function",
"=",
"np",
".",
"sin",
",",
"length",
"=",
"None",
",",
"kind",
"=",
"'linear'",
")",
":",
"fs",
"=",
"float",
"(",
"fs",
")",
"if",
"length",
"is",
"None",
":",
"length",
"=",
"int",
"(",
"times",
".",
"max",
"(",
")",
"*",
"fs",
")",
"# Squash the negative frequencies.",
"# wave(0) = 0, so clipping here will un-voice the corresponding instants",
"frequencies",
"=",
"np",
".",
"maximum",
"(",
"frequencies",
",",
"0.0",
")",
"# Build a frequency interpolator",
"f_interp",
"=",
"interp1d",
"(",
"times",
"*",
"fs",
",",
"2",
"*",
"np",
".",
"pi",
"*",
"frequencies",
"/",
"fs",
",",
"kind",
"=",
"kind",
",",
"fill_value",
"=",
"0.0",
",",
"bounds_error",
"=",
"False",
",",
"copy",
"=",
"False",
")",
"# Estimate frequency at sample points",
"f_est",
"=",
"f_interp",
"(",
"np",
".",
"arange",
"(",
"length",
")",
")",
"if",
"amplitudes",
"is",
"None",
":",
"a_est",
"=",
"np",
".",
"ones",
"(",
"(",
"length",
",",
")",
")",
"else",
":",
"# build an amplitude interpolator",
"a_interp",
"=",
"interp1d",
"(",
"times",
"*",
"fs",
",",
"amplitudes",
",",
"kind",
"=",
"kind",
",",
"fill_value",
"=",
"0.0",
",",
"bounds_error",
"=",
"False",
",",
"copy",
"=",
"False",
")",
"a_est",
"=",
"a_interp",
"(",
"np",
".",
"arange",
"(",
"length",
")",
")",
"# Sonify the waveform",
"return",
"a_est",
"*",
"function",
"(",
"np",
".",
"cumsum",
"(",
"f_est",
")",
")"
] |
Sonify a pitch contour.
Parameters
----------
times : np.ndarray
time indices for each frequency measurement, in seconds
frequencies : np.ndarray
frequency measurements, in Hz.
Non-positive measurements will be interpreted as un-voiced samples.
fs : int
desired sampling rate of the output signal
amplitudes : np.ndarray
amplitude measurments, nonnegative
defaults to ``np.ones((length,))``
function : function
function to use to synthesize notes, should be :math:`2\pi`-periodic
length : int
desired number of samples in the output signal,
defaults to ``max(times)*fs``
kind : str
Interpolation mode for the frequency and amplitude values.
See: ``scipy.interpolate.interp1d`` for valid settings.
Returns
-------
output : np.ndarray
synthesized version of the pitch contour
|
[
"Sonify",
"a",
"pitch",
"contour",
"."
] |
f41c8dafaea04b411252a516d1965af43c7d531b
|
https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/sonify.py#L187-L250
|
train
|
craffel/mir_eval
|
mir_eval/sonify.py
|
chords
|
def chords(chord_labels, intervals, fs, **kwargs):
"""Synthesizes chord labels
Parameters
----------
chord_labels : list of str
List of chord label strings.
intervals : np.ndarray, shape=(len(chord_labels), 2)
Start and end times of each chord label
fs : int
Sampling rate to synthesize at
kwargs
Additional keyword arguments to pass to
:func:`mir_eval.sonify.time_frequency`
Returns
-------
output : np.ndarray
Synthesized chord labels
"""
util.validate_intervals(intervals)
# Convert from labels to chroma
roots, interval_bitmaps, _ = chord.encode_many(chord_labels)
chromagram = np.array([np.roll(interval_bitmap, root)
for (interval_bitmap, root)
in zip(interval_bitmaps, roots)]).T
return chroma(chromagram, intervals, fs, **kwargs)
|
python
|
def chords(chord_labels, intervals, fs, **kwargs):
"""Synthesizes chord labels
Parameters
----------
chord_labels : list of str
List of chord label strings.
intervals : np.ndarray, shape=(len(chord_labels), 2)
Start and end times of each chord label
fs : int
Sampling rate to synthesize at
kwargs
Additional keyword arguments to pass to
:func:`mir_eval.sonify.time_frequency`
Returns
-------
output : np.ndarray
Synthesized chord labels
"""
util.validate_intervals(intervals)
# Convert from labels to chroma
roots, interval_bitmaps, _ = chord.encode_many(chord_labels)
chromagram = np.array([np.roll(interval_bitmap, root)
for (interval_bitmap, root)
in zip(interval_bitmaps, roots)]).T
return chroma(chromagram, intervals, fs, **kwargs)
|
[
"def",
"chords",
"(",
"chord_labels",
",",
"intervals",
",",
"fs",
",",
"*",
"*",
"kwargs",
")",
":",
"util",
".",
"validate_intervals",
"(",
"intervals",
")",
"# Convert from labels to chroma",
"roots",
",",
"interval_bitmaps",
",",
"_",
"=",
"chord",
".",
"encode_many",
"(",
"chord_labels",
")",
"chromagram",
"=",
"np",
".",
"array",
"(",
"[",
"np",
".",
"roll",
"(",
"interval_bitmap",
",",
"root",
")",
"for",
"(",
"interval_bitmap",
",",
"root",
")",
"in",
"zip",
"(",
"interval_bitmaps",
",",
"roots",
")",
"]",
")",
".",
"T",
"return",
"chroma",
"(",
"chromagram",
",",
"intervals",
",",
"fs",
",",
"*",
"*",
"kwargs",
")"
] |
Synthesizes chord labels
Parameters
----------
chord_labels : list of str
List of chord label strings.
intervals : np.ndarray, shape=(len(chord_labels), 2)
Start and end times of each chord label
fs : int
Sampling rate to synthesize at
kwargs
Additional keyword arguments to pass to
:func:`mir_eval.sonify.time_frequency`
Returns
-------
output : np.ndarray
Synthesized chord labels
|
[
"Synthesizes",
"chord",
"labels"
] |
f41c8dafaea04b411252a516d1965af43c7d531b
|
https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/sonify.py#L300-L329
|
train
|
craffel/mir_eval
|
mir_eval/onset.py
|
validate
|
def validate(reference_onsets, estimated_onsets):
"""Checks that the input annotations to a metric look like valid onset time
arrays, and throws helpful errors if not.
Parameters
----------
reference_onsets : np.ndarray
reference onset locations, in seconds
estimated_onsets : np.ndarray
estimated onset locations, in seconds
"""
# If reference or estimated onsets are empty, warn because metric will be 0
if reference_onsets.size == 0:
warnings.warn("Reference onsets are empty.")
if estimated_onsets.size == 0:
warnings.warn("Estimated onsets are empty.")
for onsets in [reference_onsets, estimated_onsets]:
util.validate_events(onsets, MAX_TIME)
|
python
|
def validate(reference_onsets, estimated_onsets):
"""Checks that the input annotations to a metric look like valid onset time
arrays, and throws helpful errors if not.
Parameters
----------
reference_onsets : np.ndarray
reference onset locations, in seconds
estimated_onsets : np.ndarray
estimated onset locations, in seconds
"""
# If reference or estimated onsets are empty, warn because metric will be 0
if reference_onsets.size == 0:
warnings.warn("Reference onsets are empty.")
if estimated_onsets.size == 0:
warnings.warn("Estimated onsets are empty.")
for onsets in [reference_onsets, estimated_onsets]:
util.validate_events(onsets, MAX_TIME)
|
[
"def",
"validate",
"(",
"reference_onsets",
",",
"estimated_onsets",
")",
":",
"# If reference or estimated onsets are empty, warn because metric will be 0",
"if",
"reference_onsets",
".",
"size",
"==",
"0",
":",
"warnings",
".",
"warn",
"(",
"\"Reference onsets are empty.\"",
")",
"if",
"estimated_onsets",
".",
"size",
"==",
"0",
":",
"warnings",
".",
"warn",
"(",
"\"Estimated onsets are empty.\"",
")",
"for",
"onsets",
"in",
"[",
"reference_onsets",
",",
"estimated_onsets",
"]",
":",
"util",
".",
"validate_events",
"(",
"onsets",
",",
"MAX_TIME",
")"
] |
Checks that the input annotations to a metric look like valid onset time
arrays, and throws helpful errors if not.
Parameters
----------
reference_onsets : np.ndarray
reference onset locations, in seconds
estimated_onsets : np.ndarray
estimated onset locations, in seconds
|
[
"Checks",
"that",
"the",
"input",
"annotations",
"to",
"a",
"metric",
"look",
"like",
"valid",
"onset",
"time",
"arrays",
"and",
"throws",
"helpful",
"errors",
"if",
"not",
"."
] |
f41c8dafaea04b411252a516d1965af43c7d531b
|
https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/onset.py#L35-L53
|
train
|
craffel/mir_eval
|
mir_eval/onset.py
|
f_measure
|
def f_measure(reference_onsets, estimated_onsets, window=.05):
"""Compute the F-measure of correct vs incorrectly predicted onsets.
"Corectness" is determined over a small window.
Examples
--------
>>> reference_onsets = mir_eval.io.load_events('reference.txt')
>>> estimated_onsets = mir_eval.io.load_events('estimated.txt')
>>> F, P, R = mir_eval.onset.f_measure(reference_onsets,
... estimated_onsets)
Parameters
----------
reference_onsets : np.ndarray
reference onset locations, in seconds
estimated_onsets : np.ndarray
estimated onset locations, in seconds
window : float
Window size, in seconds
(Default value = .05)
Returns
-------
f_measure : float
2*precision*recall/(precision + recall)
precision : float
(# true positives)/(# true positives + # false positives)
recall : float
(# true positives)/(# true positives + # false negatives)
"""
validate(reference_onsets, estimated_onsets)
# If either list is empty, return 0s
if reference_onsets.size == 0 or estimated_onsets.size == 0:
return 0., 0., 0.
# Compute the best-case matching between reference and estimated onset
# locations
matching = util.match_events(reference_onsets, estimated_onsets, window)
precision = float(len(matching))/len(estimated_onsets)
recall = float(len(matching))/len(reference_onsets)
# Compute F-measure and return all statistics
return util.f_measure(precision, recall), precision, recall
|
python
|
def f_measure(reference_onsets, estimated_onsets, window=.05):
"""Compute the F-measure of correct vs incorrectly predicted onsets.
"Corectness" is determined over a small window.
Examples
--------
>>> reference_onsets = mir_eval.io.load_events('reference.txt')
>>> estimated_onsets = mir_eval.io.load_events('estimated.txt')
>>> F, P, R = mir_eval.onset.f_measure(reference_onsets,
... estimated_onsets)
Parameters
----------
reference_onsets : np.ndarray
reference onset locations, in seconds
estimated_onsets : np.ndarray
estimated onset locations, in seconds
window : float
Window size, in seconds
(Default value = .05)
Returns
-------
f_measure : float
2*precision*recall/(precision + recall)
precision : float
(# true positives)/(# true positives + # false positives)
recall : float
(# true positives)/(# true positives + # false negatives)
"""
validate(reference_onsets, estimated_onsets)
# If either list is empty, return 0s
if reference_onsets.size == 0 or estimated_onsets.size == 0:
return 0., 0., 0.
# Compute the best-case matching between reference and estimated onset
# locations
matching = util.match_events(reference_onsets, estimated_onsets, window)
precision = float(len(matching))/len(estimated_onsets)
recall = float(len(matching))/len(reference_onsets)
# Compute F-measure and return all statistics
return util.f_measure(precision, recall), precision, recall
|
[
"def",
"f_measure",
"(",
"reference_onsets",
",",
"estimated_onsets",
",",
"window",
"=",
".05",
")",
":",
"validate",
"(",
"reference_onsets",
",",
"estimated_onsets",
")",
"# If either list is empty, return 0s",
"if",
"reference_onsets",
".",
"size",
"==",
"0",
"or",
"estimated_onsets",
".",
"size",
"==",
"0",
":",
"return",
"0.",
",",
"0.",
",",
"0.",
"# Compute the best-case matching between reference and estimated onset",
"# locations",
"matching",
"=",
"util",
".",
"match_events",
"(",
"reference_onsets",
",",
"estimated_onsets",
",",
"window",
")",
"precision",
"=",
"float",
"(",
"len",
"(",
"matching",
")",
")",
"/",
"len",
"(",
"estimated_onsets",
")",
"recall",
"=",
"float",
"(",
"len",
"(",
"matching",
")",
")",
"/",
"len",
"(",
"reference_onsets",
")",
"# Compute F-measure and return all statistics",
"return",
"util",
".",
"f_measure",
"(",
"precision",
",",
"recall",
")",
",",
"precision",
",",
"recall"
] |
Compute the F-measure of correct vs incorrectly predicted onsets.
"Corectness" is determined over a small window.
Examples
--------
>>> reference_onsets = mir_eval.io.load_events('reference.txt')
>>> estimated_onsets = mir_eval.io.load_events('estimated.txt')
>>> F, P, R = mir_eval.onset.f_measure(reference_onsets,
... estimated_onsets)
Parameters
----------
reference_onsets : np.ndarray
reference onset locations, in seconds
estimated_onsets : np.ndarray
estimated onset locations, in seconds
window : float
Window size, in seconds
(Default value = .05)
Returns
-------
f_measure : float
2*precision*recall/(precision + recall)
precision : float
(# true positives)/(# true positives + # false positives)
recall : float
(# true positives)/(# true positives + # false negatives)
|
[
"Compute",
"the",
"F",
"-",
"measure",
"of",
"correct",
"vs",
"incorrectly",
"predicted",
"onsets",
".",
"Corectness",
"is",
"determined",
"over",
"a",
"small",
"window",
"."
] |
f41c8dafaea04b411252a516d1965af43c7d531b
|
https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/onset.py#L56-L98
|
train
|
craffel/mir_eval
|
mir_eval/transcription.py
|
validate
|
def validate(ref_intervals, ref_pitches, est_intervals, est_pitches):
"""Checks that the input annotations to a metric look like time intervals
and a pitch list, and throws helpful errors if not.
Parameters
----------
ref_intervals : np.ndarray, shape=(n,2)
Array of reference notes time intervals (onset and offset times)
ref_pitches : np.ndarray, shape=(n,)
Array of reference pitch values in Hertz
est_intervals : np.ndarray, shape=(m,2)
Array of estimated notes time intervals (onset and offset times)
est_pitches : np.ndarray, shape=(m,)
Array of estimated pitch values in Hertz
"""
# Validate intervals
validate_intervals(ref_intervals, est_intervals)
# Make sure intervals and pitches match in length
if not ref_intervals.shape[0] == ref_pitches.shape[0]:
raise ValueError('Reference intervals and pitches have different '
'lengths.')
if not est_intervals.shape[0] == est_pitches.shape[0]:
raise ValueError('Estimated intervals and pitches have different '
'lengths.')
# Make sure all pitch values are positive
if ref_pitches.size > 0 and np.min(ref_pitches) <= 0:
raise ValueError("Reference contains at least one non-positive pitch "
"value")
if est_pitches.size > 0 and np.min(est_pitches) <= 0:
raise ValueError("Estimate contains at least one non-positive pitch "
"value")
|
python
|
def validate(ref_intervals, ref_pitches, est_intervals, est_pitches):
"""Checks that the input annotations to a metric look like time intervals
and a pitch list, and throws helpful errors if not.
Parameters
----------
ref_intervals : np.ndarray, shape=(n,2)
Array of reference notes time intervals (onset and offset times)
ref_pitches : np.ndarray, shape=(n,)
Array of reference pitch values in Hertz
est_intervals : np.ndarray, shape=(m,2)
Array of estimated notes time intervals (onset and offset times)
est_pitches : np.ndarray, shape=(m,)
Array of estimated pitch values in Hertz
"""
# Validate intervals
validate_intervals(ref_intervals, est_intervals)
# Make sure intervals and pitches match in length
if not ref_intervals.shape[0] == ref_pitches.shape[0]:
raise ValueError('Reference intervals and pitches have different '
'lengths.')
if not est_intervals.shape[0] == est_pitches.shape[0]:
raise ValueError('Estimated intervals and pitches have different '
'lengths.')
# Make sure all pitch values are positive
if ref_pitches.size > 0 and np.min(ref_pitches) <= 0:
raise ValueError("Reference contains at least one non-positive pitch "
"value")
if est_pitches.size > 0 and np.min(est_pitches) <= 0:
raise ValueError("Estimate contains at least one non-positive pitch "
"value")
|
[
"def",
"validate",
"(",
"ref_intervals",
",",
"ref_pitches",
",",
"est_intervals",
",",
"est_pitches",
")",
":",
"# Validate intervals",
"validate_intervals",
"(",
"ref_intervals",
",",
"est_intervals",
")",
"# Make sure intervals and pitches match in length",
"if",
"not",
"ref_intervals",
".",
"shape",
"[",
"0",
"]",
"==",
"ref_pitches",
".",
"shape",
"[",
"0",
"]",
":",
"raise",
"ValueError",
"(",
"'Reference intervals and pitches have different '",
"'lengths.'",
")",
"if",
"not",
"est_intervals",
".",
"shape",
"[",
"0",
"]",
"==",
"est_pitches",
".",
"shape",
"[",
"0",
"]",
":",
"raise",
"ValueError",
"(",
"'Estimated intervals and pitches have different '",
"'lengths.'",
")",
"# Make sure all pitch values are positive",
"if",
"ref_pitches",
".",
"size",
">",
"0",
"and",
"np",
".",
"min",
"(",
"ref_pitches",
")",
"<=",
"0",
":",
"raise",
"ValueError",
"(",
"\"Reference contains at least one non-positive pitch \"",
"\"value\"",
")",
"if",
"est_pitches",
".",
"size",
">",
"0",
"and",
"np",
".",
"min",
"(",
"est_pitches",
")",
"<=",
"0",
":",
"raise",
"ValueError",
"(",
"\"Estimate contains at least one non-positive pitch \"",
"\"value\"",
")"
] |
Checks that the input annotations to a metric look like time intervals
and a pitch list, and throws helpful errors if not.
Parameters
----------
ref_intervals : np.ndarray, shape=(n,2)
Array of reference notes time intervals (onset and offset times)
ref_pitches : np.ndarray, shape=(n,)
Array of reference pitch values in Hertz
est_intervals : np.ndarray, shape=(m,2)
Array of estimated notes time intervals (onset and offset times)
est_pitches : np.ndarray, shape=(m,)
Array of estimated pitch values in Hertz
|
[
"Checks",
"that",
"the",
"input",
"annotations",
"to",
"a",
"metric",
"look",
"like",
"time",
"intervals",
"and",
"a",
"pitch",
"list",
"and",
"throws",
"helpful",
"errors",
"if",
"not",
"."
] |
f41c8dafaea04b411252a516d1965af43c7d531b
|
https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/transcription.py#L117-L149
|
train
|
craffel/mir_eval
|
mir_eval/transcription.py
|
validate_intervals
|
def validate_intervals(ref_intervals, est_intervals):
"""Checks that the input annotations to a metric look like time intervals,
and throws helpful errors if not.
Parameters
----------
ref_intervals : np.ndarray, shape=(n,2)
Array of reference notes time intervals (onset and offset times)
est_intervals : np.ndarray, shape=(m,2)
Array of estimated notes time intervals (onset and offset times)
"""
# If reference or estimated notes are empty, warn
if ref_intervals.size == 0:
warnings.warn("Reference notes are empty.")
if est_intervals.size == 0:
warnings.warn("Estimated notes are empty.")
# Validate intervals
util.validate_intervals(ref_intervals)
util.validate_intervals(est_intervals)
|
python
|
def validate_intervals(ref_intervals, est_intervals):
"""Checks that the input annotations to a metric look like time intervals,
and throws helpful errors if not.
Parameters
----------
ref_intervals : np.ndarray, shape=(n,2)
Array of reference notes time intervals (onset and offset times)
est_intervals : np.ndarray, shape=(m,2)
Array of estimated notes time intervals (onset and offset times)
"""
# If reference or estimated notes are empty, warn
if ref_intervals.size == 0:
warnings.warn("Reference notes are empty.")
if est_intervals.size == 0:
warnings.warn("Estimated notes are empty.")
# Validate intervals
util.validate_intervals(ref_intervals)
util.validate_intervals(est_intervals)
|
[
"def",
"validate_intervals",
"(",
"ref_intervals",
",",
"est_intervals",
")",
":",
"# If reference or estimated notes are empty, warn",
"if",
"ref_intervals",
".",
"size",
"==",
"0",
":",
"warnings",
".",
"warn",
"(",
"\"Reference notes are empty.\"",
")",
"if",
"est_intervals",
".",
"size",
"==",
"0",
":",
"warnings",
".",
"warn",
"(",
"\"Estimated notes are empty.\"",
")",
"# Validate intervals",
"util",
".",
"validate_intervals",
"(",
"ref_intervals",
")",
"util",
".",
"validate_intervals",
"(",
"est_intervals",
")"
] |
Checks that the input annotations to a metric look like time intervals,
and throws helpful errors if not.
Parameters
----------
ref_intervals : np.ndarray, shape=(n,2)
Array of reference notes time intervals (onset and offset times)
est_intervals : np.ndarray, shape=(m,2)
Array of estimated notes time intervals (onset and offset times)
|
[
"Checks",
"that",
"the",
"input",
"annotations",
"to",
"a",
"metric",
"look",
"like",
"time",
"intervals",
"and",
"throws",
"helpful",
"errors",
"if",
"not",
"."
] |
f41c8dafaea04b411252a516d1965af43c7d531b
|
https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/transcription.py#L152-L171
|
train
|
craffel/mir_eval
|
mir_eval/transcription.py
|
match_note_offsets
|
def match_note_offsets(ref_intervals, est_intervals, offset_ratio=0.2,
offset_min_tolerance=0.05, strict=False):
"""Compute a maximum matching between reference and estimated notes,
only taking note offsets into account.
Given two note sequences represented by ``ref_intervals`` and
``est_intervals`` (see :func:`mir_eval.io.load_valued_intervals`), we seek
the largest set of correspondences ``(i, j)`` such that the offset of
reference note ``i`` has to be within ``offset_tolerance`` of the offset of
estimated note ``j``, where ``offset_tolerance`` is equal to
``offset_ratio`` times the reference note's duration, i.e. ``offset_ratio
* ref_duration[i]`` where ``ref_duration[i] = ref_intervals[i, 1] -
ref_intervals[i, 0]``. If the resulting ``offset_tolerance`` is less than
``offset_min_tolerance`` (50 ms by default) then ``offset_min_tolerance``
is used instead.
Every reference note is matched against at most one estimated note.
Note there are separate functions :func:`match_note_onsets` and
:func:`match_notes` for matching notes based on onsets only or based on
onset, offset, and pitch, respectively. This is because the rules for
matching note onsets and matching note offsets are different.
Parameters
----------
ref_intervals : np.ndarray, shape=(n,2)
Array of reference notes time intervals (onset and offset times)
est_intervals : np.ndarray, shape=(m,2)
Array of estimated notes time intervals (onset and offset times)
offset_ratio : float > 0
The ratio of the reference note's duration used to define the
``offset_tolerance``. Default is 0.2 (20%), meaning the
``offset_tolerance`` will equal the ``ref_duration * 0.2``, or 0.05 (50
ms), whichever is greater.
offset_min_tolerance : float > 0
The minimum tolerance for offset matching. See ``offset_ratio``
description for an explanation of how the offset tolerance is
determined.
strict : bool
If ``strict=False`` (the default), threshold checks for offset
matching are performed using ``<=`` (less than or equal). If
``strict=True``, the threshold checks are performed using ``<`` (less
than).
Returns
-------
matching : list of tuples
A list of matched reference and estimated notes.
``matching[i] == (i, j)`` where reference note ``i`` matches estimated
note ``j``.
"""
# set the comparison function
if strict:
cmp_func = np.less
else:
cmp_func = np.less_equal
# check for offset matches
offset_distances = np.abs(np.subtract.outer(ref_intervals[:, 1],
est_intervals[:, 1]))
# Round distances to a target precision to avoid the situation where
# if the distance is exactly 50ms (and strict=False) it erroneously
# doesn't match the notes because of precision issues.
offset_distances = np.around(offset_distances, decimals=N_DECIMALS)
ref_durations = util.intervals_to_durations(ref_intervals)
offset_tolerances = np.maximum(offset_ratio * ref_durations,
offset_min_tolerance)
offset_hit_matrix = (
cmp_func(offset_distances, offset_tolerances.reshape(-1, 1)))
# check for hits
hits = np.where(offset_hit_matrix)
# Construct the graph input
# Flip graph so that 'matching' is a list of tuples where the first item
# in each tuple is the reference note index, and the second item is the
# estimated note index.
G = {}
for ref_i, est_i in zip(*hits):
if est_i not in G:
G[est_i] = []
G[est_i].append(ref_i)
# Compute the maximum matching
matching = sorted(util._bipartite_match(G).items())
return matching
|
python
|
def match_note_offsets(ref_intervals, est_intervals, offset_ratio=0.2,
offset_min_tolerance=0.05, strict=False):
"""Compute a maximum matching between reference and estimated notes,
only taking note offsets into account.
Given two note sequences represented by ``ref_intervals`` and
``est_intervals`` (see :func:`mir_eval.io.load_valued_intervals`), we seek
the largest set of correspondences ``(i, j)`` such that the offset of
reference note ``i`` has to be within ``offset_tolerance`` of the offset of
estimated note ``j``, where ``offset_tolerance`` is equal to
``offset_ratio`` times the reference note's duration, i.e. ``offset_ratio
* ref_duration[i]`` where ``ref_duration[i] = ref_intervals[i, 1] -
ref_intervals[i, 0]``. If the resulting ``offset_tolerance`` is less than
``offset_min_tolerance`` (50 ms by default) then ``offset_min_tolerance``
is used instead.
Every reference note is matched against at most one estimated note.
Note there are separate functions :func:`match_note_onsets` and
:func:`match_notes` for matching notes based on onsets only or based on
onset, offset, and pitch, respectively. This is because the rules for
matching note onsets and matching note offsets are different.
Parameters
----------
ref_intervals : np.ndarray, shape=(n,2)
Array of reference notes time intervals (onset and offset times)
est_intervals : np.ndarray, shape=(m,2)
Array of estimated notes time intervals (onset and offset times)
offset_ratio : float > 0
The ratio of the reference note's duration used to define the
``offset_tolerance``. Default is 0.2 (20%), meaning the
``offset_tolerance`` will equal the ``ref_duration * 0.2``, or 0.05 (50
ms), whichever is greater.
offset_min_tolerance : float > 0
The minimum tolerance for offset matching. See ``offset_ratio``
description for an explanation of how the offset tolerance is
determined.
strict : bool
If ``strict=False`` (the default), threshold checks for offset
matching are performed using ``<=`` (less than or equal). If
``strict=True``, the threshold checks are performed using ``<`` (less
than).
Returns
-------
matching : list of tuples
A list of matched reference and estimated notes.
``matching[i] == (i, j)`` where reference note ``i`` matches estimated
note ``j``.
"""
# set the comparison function
if strict:
cmp_func = np.less
else:
cmp_func = np.less_equal
# check for offset matches
offset_distances = np.abs(np.subtract.outer(ref_intervals[:, 1],
est_intervals[:, 1]))
# Round distances to a target precision to avoid the situation where
# if the distance is exactly 50ms (and strict=False) it erroneously
# doesn't match the notes because of precision issues.
offset_distances = np.around(offset_distances, decimals=N_DECIMALS)
ref_durations = util.intervals_to_durations(ref_intervals)
offset_tolerances = np.maximum(offset_ratio * ref_durations,
offset_min_tolerance)
offset_hit_matrix = (
cmp_func(offset_distances, offset_tolerances.reshape(-1, 1)))
# check for hits
hits = np.where(offset_hit_matrix)
# Construct the graph input
# Flip graph so that 'matching' is a list of tuples where the first item
# in each tuple is the reference note index, and the second item is the
# estimated note index.
G = {}
for ref_i, est_i in zip(*hits):
if est_i not in G:
G[est_i] = []
G[est_i].append(ref_i)
# Compute the maximum matching
matching = sorted(util._bipartite_match(G).items())
return matching
|
[
"def",
"match_note_offsets",
"(",
"ref_intervals",
",",
"est_intervals",
",",
"offset_ratio",
"=",
"0.2",
",",
"offset_min_tolerance",
"=",
"0.05",
",",
"strict",
"=",
"False",
")",
":",
"# set the comparison function",
"if",
"strict",
":",
"cmp_func",
"=",
"np",
".",
"less",
"else",
":",
"cmp_func",
"=",
"np",
".",
"less_equal",
"# check for offset matches",
"offset_distances",
"=",
"np",
".",
"abs",
"(",
"np",
".",
"subtract",
".",
"outer",
"(",
"ref_intervals",
"[",
":",
",",
"1",
"]",
",",
"est_intervals",
"[",
":",
",",
"1",
"]",
")",
")",
"# Round distances to a target precision to avoid the situation where",
"# if the distance is exactly 50ms (and strict=False) it erroneously",
"# doesn't match the notes because of precision issues.",
"offset_distances",
"=",
"np",
".",
"around",
"(",
"offset_distances",
",",
"decimals",
"=",
"N_DECIMALS",
")",
"ref_durations",
"=",
"util",
".",
"intervals_to_durations",
"(",
"ref_intervals",
")",
"offset_tolerances",
"=",
"np",
".",
"maximum",
"(",
"offset_ratio",
"*",
"ref_durations",
",",
"offset_min_tolerance",
")",
"offset_hit_matrix",
"=",
"(",
"cmp_func",
"(",
"offset_distances",
",",
"offset_tolerances",
".",
"reshape",
"(",
"-",
"1",
",",
"1",
")",
")",
")",
"# check for hits",
"hits",
"=",
"np",
".",
"where",
"(",
"offset_hit_matrix",
")",
"# Construct the graph input",
"# Flip graph so that 'matching' is a list of tuples where the first item",
"# in each tuple is the reference note index, and the second item is the",
"# estimated note index.",
"G",
"=",
"{",
"}",
"for",
"ref_i",
",",
"est_i",
"in",
"zip",
"(",
"*",
"hits",
")",
":",
"if",
"est_i",
"not",
"in",
"G",
":",
"G",
"[",
"est_i",
"]",
"=",
"[",
"]",
"G",
"[",
"est_i",
"]",
".",
"append",
"(",
"ref_i",
")",
"# Compute the maximum matching",
"matching",
"=",
"sorted",
"(",
"util",
".",
"_bipartite_match",
"(",
"G",
")",
".",
"items",
"(",
")",
")",
"return",
"matching"
] |
Compute a maximum matching between reference and estimated notes,
only taking note offsets into account.
Given two note sequences represented by ``ref_intervals`` and
``est_intervals`` (see :func:`mir_eval.io.load_valued_intervals`), we seek
the largest set of correspondences ``(i, j)`` such that the offset of
reference note ``i`` has to be within ``offset_tolerance`` of the offset of
estimated note ``j``, where ``offset_tolerance`` is equal to
``offset_ratio`` times the reference note's duration, i.e. ``offset_ratio
* ref_duration[i]`` where ``ref_duration[i] = ref_intervals[i, 1] -
ref_intervals[i, 0]``. If the resulting ``offset_tolerance`` is less than
``offset_min_tolerance`` (50 ms by default) then ``offset_min_tolerance``
is used instead.
Every reference note is matched against at most one estimated note.
Note there are separate functions :func:`match_note_onsets` and
:func:`match_notes` for matching notes based on onsets only or based on
onset, offset, and pitch, respectively. This is because the rules for
matching note onsets and matching note offsets are different.
Parameters
----------
ref_intervals : np.ndarray, shape=(n,2)
Array of reference notes time intervals (onset and offset times)
est_intervals : np.ndarray, shape=(m,2)
Array of estimated notes time intervals (onset and offset times)
offset_ratio : float > 0
The ratio of the reference note's duration used to define the
``offset_tolerance``. Default is 0.2 (20%), meaning the
``offset_tolerance`` will equal the ``ref_duration * 0.2``, or 0.05 (50
ms), whichever is greater.
offset_min_tolerance : float > 0
The minimum tolerance for offset matching. See ``offset_ratio``
description for an explanation of how the offset tolerance is
determined.
strict : bool
If ``strict=False`` (the default), threshold checks for offset
matching are performed using ``<=`` (less than or equal). If
``strict=True``, the threshold checks are performed using ``<`` (less
than).
Returns
-------
matching : list of tuples
A list of matched reference and estimated notes.
``matching[i] == (i, j)`` where reference note ``i`` matches estimated
note ``j``.
|
[
"Compute",
"a",
"maximum",
"matching",
"between",
"reference",
"and",
"estimated",
"notes",
"only",
"taking",
"note",
"offsets",
"into",
"account",
"."
] |
f41c8dafaea04b411252a516d1965af43c7d531b
|
https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/transcription.py#L174-L260
|
train
|
craffel/mir_eval
|
mir_eval/transcription.py
|
match_note_onsets
|
def match_note_onsets(ref_intervals, est_intervals, onset_tolerance=0.05,
strict=False):
"""Compute a maximum matching between reference and estimated notes,
only taking note onsets into account.
Given two note sequences represented by ``ref_intervals`` and
``est_intervals`` (see :func:`mir_eval.io.load_valued_intervals`), we see
the largest set of correspondences ``(i,j)`` such that the onset of
reference note ``i`` is within ``onset_tolerance`` of the onset of
estimated note ``j``.
Every reference note is matched against at most one estimated note.
Note there are separate functions :func:`match_note_offsets` and
:func:`match_notes` for matching notes based on offsets only or based on
onset, offset, and pitch, respectively. This is because the rules for
matching note onsets and matching note offsets are different.
Parameters
----------
ref_intervals : np.ndarray, shape=(n,2)
Array of reference notes time intervals (onset and offset times)
est_intervals : np.ndarray, shape=(m,2)
Array of estimated notes time intervals (onset and offset times)
onset_tolerance : float > 0
The tolerance for an estimated note's onset deviating from the
reference note's onset, in seconds. Default is 0.05 (50 ms).
strict : bool
If ``strict=False`` (the default), threshold checks for onset matching
are performed using ``<=`` (less than or equal). If ``strict=True``,
the threshold checks are performed using ``<`` (less than).
Returns
-------
matching : list of tuples
A list of matched reference and estimated notes.
``matching[i] == (i, j)`` where reference note ``i`` matches estimated
note ``j``.
"""
# set the comparison function
if strict:
cmp_func = np.less
else:
cmp_func = np.less_equal
# check for onset matches
onset_distances = np.abs(np.subtract.outer(ref_intervals[:, 0],
est_intervals[:, 0]))
# Round distances to a target precision to avoid the situation where
# if the distance is exactly 50ms (and strict=False) it erroneously
# doesn't match the notes because of precision issues.
onset_distances = np.around(onset_distances, decimals=N_DECIMALS)
onset_hit_matrix = cmp_func(onset_distances, onset_tolerance)
# find hits
hits = np.where(onset_hit_matrix)
# Construct the graph input
# Flip graph so that 'matching' is a list of tuples where the first item
# in each tuple is the reference note index, and the second item is the
# estimated note index.
G = {}
for ref_i, est_i in zip(*hits):
if est_i not in G:
G[est_i] = []
G[est_i].append(ref_i)
# Compute the maximum matching
matching = sorted(util._bipartite_match(G).items())
return matching
|
python
|
def match_note_onsets(ref_intervals, est_intervals, onset_tolerance=0.05,
strict=False):
"""Compute a maximum matching between reference and estimated notes,
only taking note onsets into account.
Given two note sequences represented by ``ref_intervals`` and
``est_intervals`` (see :func:`mir_eval.io.load_valued_intervals`), we see
the largest set of correspondences ``(i,j)`` such that the onset of
reference note ``i`` is within ``onset_tolerance`` of the onset of
estimated note ``j``.
Every reference note is matched against at most one estimated note.
Note there are separate functions :func:`match_note_offsets` and
:func:`match_notes` for matching notes based on offsets only or based on
onset, offset, and pitch, respectively. This is because the rules for
matching note onsets and matching note offsets are different.
Parameters
----------
ref_intervals : np.ndarray, shape=(n,2)
Array of reference notes time intervals (onset and offset times)
est_intervals : np.ndarray, shape=(m,2)
Array of estimated notes time intervals (onset and offset times)
onset_tolerance : float > 0
The tolerance for an estimated note's onset deviating from the
reference note's onset, in seconds. Default is 0.05 (50 ms).
strict : bool
If ``strict=False`` (the default), threshold checks for onset matching
are performed using ``<=`` (less than or equal). If ``strict=True``,
the threshold checks are performed using ``<`` (less than).
Returns
-------
matching : list of tuples
A list of matched reference and estimated notes.
``matching[i] == (i, j)`` where reference note ``i`` matches estimated
note ``j``.
"""
# set the comparison function
if strict:
cmp_func = np.less
else:
cmp_func = np.less_equal
# check for onset matches
onset_distances = np.abs(np.subtract.outer(ref_intervals[:, 0],
est_intervals[:, 0]))
# Round distances to a target precision to avoid the situation where
# if the distance is exactly 50ms (and strict=False) it erroneously
# doesn't match the notes because of precision issues.
onset_distances = np.around(onset_distances, decimals=N_DECIMALS)
onset_hit_matrix = cmp_func(onset_distances, onset_tolerance)
# find hits
hits = np.where(onset_hit_matrix)
# Construct the graph input
# Flip graph so that 'matching' is a list of tuples where the first item
# in each tuple is the reference note index, and the second item is the
# estimated note index.
G = {}
for ref_i, est_i in zip(*hits):
if est_i not in G:
G[est_i] = []
G[est_i].append(ref_i)
# Compute the maximum matching
matching = sorted(util._bipartite_match(G).items())
return matching
|
[
"def",
"match_note_onsets",
"(",
"ref_intervals",
",",
"est_intervals",
",",
"onset_tolerance",
"=",
"0.05",
",",
"strict",
"=",
"False",
")",
":",
"# set the comparison function",
"if",
"strict",
":",
"cmp_func",
"=",
"np",
".",
"less",
"else",
":",
"cmp_func",
"=",
"np",
".",
"less_equal",
"# check for onset matches",
"onset_distances",
"=",
"np",
".",
"abs",
"(",
"np",
".",
"subtract",
".",
"outer",
"(",
"ref_intervals",
"[",
":",
",",
"0",
"]",
",",
"est_intervals",
"[",
":",
",",
"0",
"]",
")",
")",
"# Round distances to a target precision to avoid the situation where",
"# if the distance is exactly 50ms (and strict=False) it erroneously",
"# doesn't match the notes because of precision issues.",
"onset_distances",
"=",
"np",
".",
"around",
"(",
"onset_distances",
",",
"decimals",
"=",
"N_DECIMALS",
")",
"onset_hit_matrix",
"=",
"cmp_func",
"(",
"onset_distances",
",",
"onset_tolerance",
")",
"# find hits",
"hits",
"=",
"np",
".",
"where",
"(",
"onset_hit_matrix",
")",
"# Construct the graph input",
"# Flip graph so that 'matching' is a list of tuples where the first item",
"# in each tuple is the reference note index, and the second item is the",
"# estimated note index.",
"G",
"=",
"{",
"}",
"for",
"ref_i",
",",
"est_i",
"in",
"zip",
"(",
"*",
"hits",
")",
":",
"if",
"est_i",
"not",
"in",
"G",
":",
"G",
"[",
"est_i",
"]",
"=",
"[",
"]",
"G",
"[",
"est_i",
"]",
".",
"append",
"(",
"ref_i",
")",
"# Compute the maximum matching",
"matching",
"=",
"sorted",
"(",
"util",
".",
"_bipartite_match",
"(",
"G",
")",
".",
"items",
"(",
")",
")",
"return",
"matching"
] |
Compute a maximum matching between reference and estimated notes,
only taking note onsets into account.
Given two note sequences represented by ``ref_intervals`` and
``est_intervals`` (see :func:`mir_eval.io.load_valued_intervals`), we see
the largest set of correspondences ``(i,j)`` such that the onset of
reference note ``i`` is within ``onset_tolerance`` of the onset of
estimated note ``j``.
Every reference note is matched against at most one estimated note.
Note there are separate functions :func:`match_note_offsets` and
:func:`match_notes` for matching notes based on offsets only or based on
onset, offset, and pitch, respectively. This is because the rules for
matching note onsets and matching note offsets are different.
Parameters
----------
ref_intervals : np.ndarray, shape=(n,2)
Array of reference notes time intervals (onset and offset times)
est_intervals : np.ndarray, shape=(m,2)
Array of estimated notes time intervals (onset and offset times)
onset_tolerance : float > 0
The tolerance for an estimated note's onset deviating from the
reference note's onset, in seconds. Default is 0.05 (50 ms).
strict : bool
If ``strict=False`` (the default), threshold checks for onset matching
are performed using ``<=`` (less than or equal). If ``strict=True``,
the threshold checks are performed using ``<`` (less than).
Returns
-------
matching : list of tuples
A list of matched reference and estimated notes.
``matching[i] == (i, j)`` where reference note ``i`` matches estimated
note ``j``.
|
[
"Compute",
"a",
"maximum",
"matching",
"between",
"reference",
"and",
"estimated",
"notes",
"only",
"taking",
"note",
"onsets",
"into",
"account",
"."
] |
f41c8dafaea04b411252a516d1965af43c7d531b
|
https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/transcription.py#L263-L333
|
train
|
craffel/mir_eval
|
mir_eval/melody.py
|
validate_voicing
|
def validate_voicing(ref_voicing, est_voicing):
"""Checks that voicing inputs to a metric are in the correct format.
Parameters
----------
ref_voicing : np.ndarray
Reference boolean voicing array
est_voicing : np.ndarray
Estimated boolean voicing array
"""
if ref_voicing.size == 0:
warnings.warn("Reference voicing array is empty.")
if est_voicing.size == 0:
warnings.warn("Estimated voicing array is empty.")
if ref_voicing.sum() == 0:
warnings.warn("Reference melody has no voiced frames.")
if est_voicing.sum() == 0:
warnings.warn("Estimated melody has no voiced frames.")
# Make sure they're the same length
if ref_voicing.shape[0] != est_voicing.shape[0]:
raise ValueError('Reference and estimated voicing arrays should '
'be the same length.')
for voicing in [ref_voicing, est_voicing]:
# Make sure they're (effectively) boolean
if np.logical_and(voicing != 0, voicing != 1).any():
raise ValueError('Voicing arrays must be boolean.')
|
python
|
def validate_voicing(ref_voicing, est_voicing):
"""Checks that voicing inputs to a metric are in the correct format.
Parameters
----------
ref_voicing : np.ndarray
Reference boolean voicing array
est_voicing : np.ndarray
Estimated boolean voicing array
"""
if ref_voicing.size == 0:
warnings.warn("Reference voicing array is empty.")
if est_voicing.size == 0:
warnings.warn("Estimated voicing array is empty.")
if ref_voicing.sum() == 0:
warnings.warn("Reference melody has no voiced frames.")
if est_voicing.sum() == 0:
warnings.warn("Estimated melody has no voiced frames.")
# Make sure they're the same length
if ref_voicing.shape[0] != est_voicing.shape[0]:
raise ValueError('Reference and estimated voicing arrays should '
'be the same length.')
for voicing in [ref_voicing, est_voicing]:
# Make sure they're (effectively) boolean
if np.logical_and(voicing != 0, voicing != 1).any():
raise ValueError('Voicing arrays must be boolean.')
|
[
"def",
"validate_voicing",
"(",
"ref_voicing",
",",
"est_voicing",
")",
":",
"if",
"ref_voicing",
".",
"size",
"==",
"0",
":",
"warnings",
".",
"warn",
"(",
"\"Reference voicing array is empty.\"",
")",
"if",
"est_voicing",
".",
"size",
"==",
"0",
":",
"warnings",
".",
"warn",
"(",
"\"Estimated voicing array is empty.\"",
")",
"if",
"ref_voicing",
".",
"sum",
"(",
")",
"==",
"0",
":",
"warnings",
".",
"warn",
"(",
"\"Reference melody has no voiced frames.\"",
")",
"if",
"est_voicing",
".",
"sum",
"(",
")",
"==",
"0",
":",
"warnings",
".",
"warn",
"(",
"\"Estimated melody has no voiced frames.\"",
")",
"# Make sure they're the same length",
"if",
"ref_voicing",
".",
"shape",
"[",
"0",
"]",
"!=",
"est_voicing",
".",
"shape",
"[",
"0",
"]",
":",
"raise",
"ValueError",
"(",
"'Reference and estimated voicing arrays should '",
"'be the same length.'",
")",
"for",
"voicing",
"in",
"[",
"ref_voicing",
",",
"est_voicing",
"]",
":",
"# Make sure they're (effectively) boolean",
"if",
"np",
".",
"logical_and",
"(",
"voicing",
"!=",
"0",
",",
"voicing",
"!=",
"1",
")",
".",
"any",
"(",
")",
":",
"raise",
"ValueError",
"(",
"'Voicing arrays must be boolean.'",
")"
] |
Checks that voicing inputs to a metric are in the correct format.
Parameters
----------
ref_voicing : np.ndarray
Reference boolean voicing array
est_voicing : np.ndarray
Estimated boolean voicing array
|
[
"Checks",
"that",
"voicing",
"inputs",
"to",
"a",
"metric",
"are",
"in",
"the",
"correct",
"format",
"."
] |
f41c8dafaea04b411252a516d1965af43c7d531b
|
https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/melody.py#L61-L87
|
train
|
craffel/mir_eval
|
mir_eval/melody.py
|
hz2cents
|
def hz2cents(freq_hz, base_frequency=10.0):
"""Convert an array of frequency values in Hz to cents.
0 values are left in place.
Parameters
----------
freq_hz : np.ndarray
Array of frequencies in Hz.
base_frequency : float
Base frequency for conversion.
(Default value = 10.0)
Returns
-------
cent : np.ndarray
Array of frequencies in cents, relative to base_frequency
"""
freq_cent = np.zeros(freq_hz.shape[0])
freq_nonz_ind = np.flatnonzero(freq_hz)
normalized_frequency = np.abs(freq_hz[freq_nonz_ind])/base_frequency
freq_cent[freq_nonz_ind] = 1200*np.log2(normalized_frequency)
return freq_cent
|
python
|
def hz2cents(freq_hz, base_frequency=10.0):
"""Convert an array of frequency values in Hz to cents.
0 values are left in place.
Parameters
----------
freq_hz : np.ndarray
Array of frequencies in Hz.
base_frequency : float
Base frequency for conversion.
(Default value = 10.0)
Returns
-------
cent : np.ndarray
Array of frequencies in cents, relative to base_frequency
"""
freq_cent = np.zeros(freq_hz.shape[0])
freq_nonz_ind = np.flatnonzero(freq_hz)
normalized_frequency = np.abs(freq_hz[freq_nonz_ind])/base_frequency
freq_cent[freq_nonz_ind] = 1200*np.log2(normalized_frequency)
return freq_cent
|
[
"def",
"hz2cents",
"(",
"freq_hz",
",",
"base_frequency",
"=",
"10.0",
")",
":",
"freq_cent",
"=",
"np",
".",
"zeros",
"(",
"freq_hz",
".",
"shape",
"[",
"0",
"]",
")",
"freq_nonz_ind",
"=",
"np",
".",
"flatnonzero",
"(",
"freq_hz",
")",
"normalized_frequency",
"=",
"np",
".",
"abs",
"(",
"freq_hz",
"[",
"freq_nonz_ind",
"]",
")",
"/",
"base_frequency",
"freq_cent",
"[",
"freq_nonz_ind",
"]",
"=",
"1200",
"*",
"np",
".",
"log2",
"(",
"normalized_frequency",
")",
"return",
"freq_cent"
] |
Convert an array of frequency values in Hz to cents.
0 values are left in place.
Parameters
----------
freq_hz : np.ndarray
Array of frequencies in Hz.
base_frequency : float
Base frequency for conversion.
(Default value = 10.0)
Returns
-------
cent : np.ndarray
Array of frequencies in cents, relative to base_frequency
|
[
"Convert",
"an",
"array",
"of",
"frequency",
"values",
"in",
"Hz",
"to",
"cents",
".",
"0",
"values",
"are",
"left",
"in",
"place",
"."
] |
f41c8dafaea04b411252a516d1965af43c7d531b
|
https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/melody.py#L118-L141
|
train
|
craffel/mir_eval
|
mir_eval/melody.py
|
constant_hop_timebase
|
def constant_hop_timebase(hop, end_time):
"""Generates a time series from 0 to ``end_time`` with times spaced ``hop``
apart
Parameters
----------
hop : float
Spacing of samples in the time series
end_time : float
Time series will span ``[0, end_time]``
Returns
-------
times : np.ndarray
Generated timebase
"""
# Compute new timebase. Rounding/linspace is to avoid float problems.
end_time = np.round(end_time, 10)
times = np.linspace(0, hop*int(np.floor(end_time/hop)),
int(np.floor(end_time/hop)) + 1)
times = np.round(times, 10)
return times
|
python
|
def constant_hop_timebase(hop, end_time):
"""Generates a time series from 0 to ``end_time`` with times spaced ``hop``
apart
Parameters
----------
hop : float
Spacing of samples in the time series
end_time : float
Time series will span ``[0, end_time]``
Returns
-------
times : np.ndarray
Generated timebase
"""
# Compute new timebase. Rounding/linspace is to avoid float problems.
end_time = np.round(end_time, 10)
times = np.linspace(0, hop*int(np.floor(end_time/hop)),
int(np.floor(end_time/hop)) + 1)
times = np.round(times, 10)
return times
|
[
"def",
"constant_hop_timebase",
"(",
"hop",
",",
"end_time",
")",
":",
"# Compute new timebase. Rounding/linspace is to avoid float problems.",
"end_time",
"=",
"np",
".",
"round",
"(",
"end_time",
",",
"10",
")",
"times",
"=",
"np",
".",
"linspace",
"(",
"0",
",",
"hop",
"*",
"int",
"(",
"np",
".",
"floor",
"(",
"end_time",
"/",
"hop",
")",
")",
",",
"int",
"(",
"np",
".",
"floor",
"(",
"end_time",
"/",
"hop",
")",
")",
"+",
"1",
")",
"times",
"=",
"np",
".",
"round",
"(",
"times",
",",
"10",
")",
"return",
"times"
] |
Generates a time series from 0 to ``end_time`` with times spaced ``hop``
apart
Parameters
----------
hop : float
Spacing of samples in the time series
end_time : float
Time series will span ``[0, end_time]``
Returns
-------
times : np.ndarray
Generated timebase
|
[
"Generates",
"a",
"time",
"series",
"from",
"0",
"to",
"end_time",
"with",
"times",
"spaced",
"hop",
"apart"
] |
f41c8dafaea04b411252a516d1965af43c7d531b
|
https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/melody.py#L165-L187
|
train
|
craffel/mir_eval
|
mir_eval/segment.py
|
detection
|
def detection(reference_intervals, estimated_intervals,
window=0.5, beta=1.0, trim=False):
"""Boundary detection hit-rate.
A hit is counted whenever an reference boundary is within ``window`` of a
estimated boundary. Note that each boundary is matched at most once: this
is achieved by computing the size of a maximal matching between reference
and estimated boundary points, subject to the window constraint.
Examples
--------
>>> ref_intervals, _ = mir_eval.io.load_labeled_intervals('ref.lab')
>>> est_intervals, _ = mir_eval.io.load_labeled_intervals('est.lab')
>>> # With 0.5s windowing
>>> P05, R05, F05 = mir_eval.segment.detection(ref_intervals,
... est_intervals,
... window=0.5)
>>> # With 3s windowing
>>> P3, R3, F3 = mir_eval.segment.detection(ref_intervals,
... est_intervals,
... window=3)
>>> # Ignoring hits for the beginning and end of track
>>> P, R, F = mir_eval.segment.detection(ref_intervals,
... est_intervals,
... window=0.5,
... trim=True)
Parameters
----------
reference_intervals : np.ndarray, shape=(n, 2)
reference segment intervals, in the format returned by
:func:`mir_eval.io.load_intervals` or
:func:`mir_eval.io.load_labeled_intervals`.
estimated_intervals : np.ndarray, shape=(m, 2)
estimated segment intervals, in the format returned by
:func:`mir_eval.io.load_intervals` or
:func:`mir_eval.io.load_labeled_intervals`.
window : float > 0
size of the window of 'correctness' around ground-truth beats
(in seconds)
(Default value = 0.5)
beta : float > 0
weighting constant for F-measure.
(Default value = 1.0)
trim : boolean
if ``True``, the first and last boundary times are ignored.
Typically, these denote start (0) and end-markers.
(Default value = False)
Returns
-------
precision : float
precision of estimated predictions
recall : float
recall of reference reference boundaries
f_measure : float
F-measure (weighted harmonic mean of ``precision`` and ``recall``)
"""
validate_boundary(reference_intervals, estimated_intervals, trim)
# Convert intervals to boundaries
reference_boundaries = util.intervals_to_boundaries(reference_intervals)
estimated_boundaries = util.intervals_to_boundaries(estimated_intervals)
# Suppress the first and last intervals
if trim:
reference_boundaries = reference_boundaries[1:-1]
estimated_boundaries = estimated_boundaries[1:-1]
# If we have no boundaries, we get no score.
if len(reference_boundaries) == 0 or len(estimated_boundaries) == 0:
return 0.0, 0.0, 0.0
matching = util.match_events(reference_boundaries,
estimated_boundaries,
window)
precision = float(len(matching)) / len(estimated_boundaries)
recall = float(len(matching)) / len(reference_boundaries)
f_measure = util.f_measure(precision, recall, beta=beta)
return precision, recall, f_measure
|
python
|
def detection(reference_intervals, estimated_intervals,
window=0.5, beta=1.0, trim=False):
"""Boundary detection hit-rate.
A hit is counted whenever an reference boundary is within ``window`` of a
estimated boundary. Note that each boundary is matched at most once: this
is achieved by computing the size of a maximal matching between reference
and estimated boundary points, subject to the window constraint.
Examples
--------
>>> ref_intervals, _ = mir_eval.io.load_labeled_intervals('ref.lab')
>>> est_intervals, _ = mir_eval.io.load_labeled_intervals('est.lab')
>>> # With 0.5s windowing
>>> P05, R05, F05 = mir_eval.segment.detection(ref_intervals,
... est_intervals,
... window=0.5)
>>> # With 3s windowing
>>> P3, R3, F3 = mir_eval.segment.detection(ref_intervals,
... est_intervals,
... window=3)
>>> # Ignoring hits for the beginning and end of track
>>> P, R, F = mir_eval.segment.detection(ref_intervals,
... est_intervals,
... window=0.5,
... trim=True)
Parameters
----------
reference_intervals : np.ndarray, shape=(n, 2)
reference segment intervals, in the format returned by
:func:`mir_eval.io.load_intervals` or
:func:`mir_eval.io.load_labeled_intervals`.
estimated_intervals : np.ndarray, shape=(m, 2)
estimated segment intervals, in the format returned by
:func:`mir_eval.io.load_intervals` or
:func:`mir_eval.io.load_labeled_intervals`.
window : float > 0
size of the window of 'correctness' around ground-truth beats
(in seconds)
(Default value = 0.5)
beta : float > 0
weighting constant for F-measure.
(Default value = 1.0)
trim : boolean
if ``True``, the first and last boundary times are ignored.
Typically, these denote start (0) and end-markers.
(Default value = False)
Returns
-------
precision : float
precision of estimated predictions
recall : float
recall of reference reference boundaries
f_measure : float
F-measure (weighted harmonic mean of ``precision`` and ``recall``)
"""
validate_boundary(reference_intervals, estimated_intervals, trim)
# Convert intervals to boundaries
reference_boundaries = util.intervals_to_boundaries(reference_intervals)
estimated_boundaries = util.intervals_to_boundaries(estimated_intervals)
# Suppress the first and last intervals
if trim:
reference_boundaries = reference_boundaries[1:-1]
estimated_boundaries = estimated_boundaries[1:-1]
# If we have no boundaries, we get no score.
if len(reference_boundaries) == 0 or len(estimated_boundaries) == 0:
return 0.0, 0.0, 0.0
matching = util.match_events(reference_boundaries,
estimated_boundaries,
window)
precision = float(len(matching)) / len(estimated_boundaries)
recall = float(len(matching)) / len(reference_boundaries)
f_measure = util.f_measure(precision, recall, beta=beta)
return precision, recall, f_measure
|
[
"def",
"detection",
"(",
"reference_intervals",
",",
"estimated_intervals",
",",
"window",
"=",
"0.5",
",",
"beta",
"=",
"1.0",
",",
"trim",
"=",
"False",
")",
":",
"validate_boundary",
"(",
"reference_intervals",
",",
"estimated_intervals",
",",
"trim",
")",
"# Convert intervals to boundaries",
"reference_boundaries",
"=",
"util",
".",
"intervals_to_boundaries",
"(",
"reference_intervals",
")",
"estimated_boundaries",
"=",
"util",
".",
"intervals_to_boundaries",
"(",
"estimated_intervals",
")",
"# Suppress the first and last intervals",
"if",
"trim",
":",
"reference_boundaries",
"=",
"reference_boundaries",
"[",
"1",
":",
"-",
"1",
"]",
"estimated_boundaries",
"=",
"estimated_boundaries",
"[",
"1",
":",
"-",
"1",
"]",
"# If we have no boundaries, we get no score.",
"if",
"len",
"(",
"reference_boundaries",
")",
"==",
"0",
"or",
"len",
"(",
"estimated_boundaries",
")",
"==",
"0",
":",
"return",
"0.0",
",",
"0.0",
",",
"0.0",
"matching",
"=",
"util",
".",
"match_events",
"(",
"reference_boundaries",
",",
"estimated_boundaries",
",",
"window",
")",
"precision",
"=",
"float",
"(",
"len",
"(",
"matching",
")",
")",
"/",
"len",
"(",
"estimated_boundaries",
")",
"recall",
"=",
"float",
"(",
"len",
"(",
"matching",
")",
")",
"/",
"len",
"(",
"reference_boundaries",
")",
"f_measure",
"=",
"util",
".",
"f_measure",
"(",
"precision",
",",
"recall",
",",
"beta",
"=",
"beta",
")",
"return",
"precision",
",",
"recall",
",",
"f_measure"
] |
Boundary detection hit-rate.
A hit is counted whenever an reference boundary is within ``window`` of a
estimated boundary. Note that each boundary is matched at most once: this
is achieved by computing the size of a maximal matching between reference
and estimated boundary points, subject to the window constraint.
Examples
--------
>>> ref_intervals, _ = mir_eval.io.load_labeled_intervals('ref.lab')
>>> est_intervals, _ = mir_eval.io.load_labeled_intervals('est.lab')
>>> # With 0.5s windowing
>>> P05, R05, F05 = mir_eval.segment.detection(ref_intervals,
... est_intervals,
... window=0.5)
>>> # With 3s windowing
>>> P3, R3, F3 = mir_eval.segment.detection(ref_intervals,
... est_intervals,
... window=3)
>>> # Ignoring hits for the beginning and end of track
>>> P, R, F = mir_eval.segment.detection(ref_intervals,
... est_intervals,
... window=0.5,
... trim=True)
Parameters
----------
reference_intervals : np.ndarray, shape=(n, 2)
reference segment intervals, in the format returned by
:func:`mir_eval.io.load_intervals` or
:func:`mir_eval.io.load_labeled_intervals`.
estimated_intervals : np.ndarray, shape=(m, 2)
estimated segment intervals, in the format returned by
:func:`mir_eval.io.load_intervals` or
:func:`mir_eval.io.load_labeled_intervals`.
window : float > 0
size of the window of 'correctness' around ground-truth beats
(in seconds)
(Default value = 0.5)
beta : float > 0
weighting constant for F-measure.
(Default value = 1.0)
trim : boolean
if ``True``, the first and last boundary times are ignored.
Typically, these denote start (0) and end-markers.
(Default value = False)
Returns
-------
precision : float
precision of estimated predictions
recall : float
recall of reference reference boundaries
f_measure : float
F-measure (weighted harmonic mean of ``precision`` and ``recall``)
|
[
"Boundary",
"detection",
"hit",
"-",
"rate",
"."
] |
f41c8dafaea04b411252a516d1965af43c7d531b
|
https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/segment.py#L176-L260
|
train
|
craffel/mir_eval
|
mir_eval/segment.py
|
deviation
|
def deviation(reference_intervals, estimated_intervals, trim=False):
"""Compute the median deviations between reference
and estimated boundary times.
Examples
--------
>>> ref_intervals, _ = mir_eval.io.load_labeled_intervals('ref.lab')
>>> est_intervals, _ = mir_eval.io.load_labeled_intervals('est.lab')
>>> r_to_e, e_to_r = mir_eval.boundary.deviation(ref_intervals,
... est_intervals)
Parameters
----------
reference_intervals : np.ndarray, shape=(n, 2)
reference segment intervals, in the format returned by
:func:`mir_eval.io.load_intervals` or
:func:`mir_eval.io.load_labeled_intervals`.
estimated_intervals : np.ndarray, shape=(m, 2)
estimated segment intervals, in the format returned by
:func:`mir_eval.io.load_intervals` or
:func:`mir_eval.io.load_labeled_intervals`.
trim : boolean
if ``True``, the first and last intervals are ignored.
Typically, these denote start (0.0) and end-of-track markers.
(Default value = False)
Returns
-------
reference_to_estimated : float
median time from each reference boundary to the
closest estimated boundary
estimated_to_reference : float
median time from each estimated boundary to the
closest reference boundary
"""
validate_boundary(reference_intervals, estimated_intervals, trim)
# Convert intervals to boundaries
reference_boundaries = util.intervals_to_boundaries(reference_intervals)
estimated_boundaries = util.intervals_to_boundaries(estimated_intervals)
# Suppress the first and last intervals
if trim:
reference_boundaries = reference_boundaries[1:-1]
estimated_boundaries = estimated_boundaries[1:-1]
# If we have no boundaries, we get no score.
if len(reference_boundaries) == 0 or len(estimated_boundaries) == 0:
return np.nan, np.nan
dist = np.abs(np.subtract.outer(reference_boundaries,
estimated_boundaries))
estimated_to_reference = np.median(dist.min(axis=0))
reference_to_estimated = np.median(dist.min(axis=1))
return reference_to_estimated, estimated_to_reference
|
python
|
def deviation(reference_intervals, estimated_intervals, trim=False):
"""Compute the median deviations between reference
and estimated boundary times.
Examples
--------
>>> ref_intervals, _ = mir_eval.io.load_labeled_intervals('ref.lab')
>>> est_intervals, _ = mir_eval.io.load_labeled_intervals('est.lab')
>>> r_to_e, e_to_r = mir_eval.boundary.deviation(ref_intervals,
... est_intervals)
Parameters
----------
reference_intervals : np.ndarray, shape=(n, 2)
reference segment intervals, in the format returned by
:func:`mir_eval.io.load_intervals` or
:func:`mir_eval.io.load_labeled_intervals`.
estimated_intervals : np.ndarray, shape=(m, 2)
estimated segment intervals, in the format returned by
:func:`mir_eval.io.load_intervals` or
:func:`mir_eval.io.load_labeled_intervals`.
trim : boolean
if ``True``, the first and last intervals are ignored.
Typically, these denote start (0.0) and end-of-track markers.
(Default value = False)
Returns
-------
reference_to_estimated : float
median time from each reference boundary to the
closest estimated boundary
estimated_to_reference : float
median time from each estimated boundary to the
closest reference boundary
"""
validate_boundary(reference_intervals, estimated_intervals, trim)
# Convert intervals to boundaries
reference_boundaries = util.intervals_to_boundaries(reference_intervals)
estimated_boundaries = util.intervals_to_boundaries(estimated_intervals)
# Suppress the first and last intervals
if trim:
reference_boundaries = reference_boundaries[1:-1]
estimated_boundaries = estimated_boundaries[1:-1]
# If we have no boundaries, we get no score.
if len(reference_boundaries) == 0 or len(estimated_boundaries) == 0:
return np.nan, np.nan
dist = np.abs(np.subtract.outer(reference_boundaries,
estimated_boundaries))
estimated_to_reference = np.median(dist.min(axis=0))
reference_to_estimated = np.median(dist.min(axis=1))
return reference_to_estimated, estimated_to_reference
|
[
"def",
"deviation",
"(",
"reference_intervals",
",",
"estimated_intervals",
",",
"trim",
"=",
"False",
")",
":",
"validate_boundary",
"(",
"reference_intervals",
",",
"estimated_intervals",
",",
"trim",
")",
"# Convert intervals to boundaries",
"reference_boundaries",
"=",
"util",
".",
"intervals_to_boundaries",
"(",
"reference_intervals",
")",
"estimated_boundaries",
"=",
"util",
".",
"intervals_to_boundaries",
"(",
"estimated_intervals",
")",
"# Suppress the first and last intervals",
"if",
"trim",
":",
"reference_boundaries",
"=",
"reference_boundaries",
"[",
"1",
":",
"-",
"1",
"]",
"estimated_boundaries",
"=",
"estimated_boundaries",
"[",
"1",
":",
"-",
"1",
"]",
"# If we have no boundaries, we get no score.",
"if",
"len",
"(",
"reference_boundaries",
")",
"==",
"0",
"or",
"len",
"(",
"estimated_boundaries",
")",
"==",
"0",
":",
"return",
"np",
".",
"nan",
",",
"np",
".",
"nan",
"dist",
"=",
"np",
".",
"abs",
"(",
"np",
".",
"subtract",
".",
"outer",
"(",
"reference_boundaries",
",",
"estimated_boundaries",
")",
")",
"estimated_to_reference",
"=",
"np",
".",
"median",
"(",
"dist",
".",
"min",
"(",
"axis",
"=",
"0",
")",
")",
"reference_to_estimated",
"=",
"np",
".",
"median",
"(",
"dist",
".",
"min",
"(",
"axis",
"=",
"1",
")",
")",
"return",
"reference_to_estimated",
",",
"estimated_to_reference"
] |
Compute the median deviations between reference
and estimated boundary times.
Examples
--------
>>> ref_intervals, _ = mir_eval.io.load_labeled_intervals('ref.lab')
>>> est_intervals, _ = mir_eval.io.load_labeled_intervals('est.lab')
>>> r_to_e, e_to_r = mir_eval.boundary.deviation(ref_intervals,
... est_intervals)
Parameters
----------
reference_intervals : np.ndarray, shape=(n, 2)
reference segment intervals, in the format returned by
:func:`mir_eval.io.load_intervals` or
:func:`mir_eval.io.load_labeled_intervals`.
estimated_intervals : np.ndarray, shape=(m, 2)
estimated segment intervals, in the format returned by
:func:`mir_eval.io.load_intervals` or
:func:`mir_eval.io.load_labeled_intervals`.
trim : boolean
if ``True``, the first and last intervals are ignored.
Typically, these denote start (0.0) and end-of-track markers.
(Default value = False)
Returns
-------
reference_to_estimated : float
median time from each reference boundary to the
closest estimated boundary
estimated_to_reference : float
median time from each estimated boundary to the
closest reference boundary
|
[
"Compute",
"the",
"median",
"deviations",
"between",
"reference",
"and",
"estimated",
"boundary",
"times",
"."
] |
f41c8dafaea04b411252a516d1965af43c7d531b
|
https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/segment.py#L263-L321
|
train
|
craffel/mir_eval
|
mir_eval/segment.py
|
pairwise
|
def pairwise(reference_intervals, reference_labels,
estimated_intervals, estimated_labels,
frame_size=0.1, beta=1.0):
"""Frame-clustering segmentation evaluation by pair-wise agreement.
Examples
--------
>>> (ref_intervals,
... ref_labels) = mir_eval.io.load_labeled_intervals('ref.lab')
>>> (est_intervals,
... est_labels) = mir_eval.io.load_labeled_intervals('est.lab')
>>> # Trim or pad the estimate to match reference timing
>>> (ref_intervals,
... ref_labels) = mir_eval.util.adjust_intervals(ref_intervals,
... ref_labels,
... t_min=0)
>>> (est_intervals,
... est_labels) = mir_eval.util.adjust_intervals(
... est_intervals, est_labels, t_min=0, t_max=ref_intervals.max())
>>> precision, recall, f = mir_eval.structure.pairwise(ref_intervals,
... ref_labels,
... est_intervals,
... est_labels)
Parameters
----------
reference_intervals : np.ndarray, shape=(n, 2)
reference segment intervals, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
reference_labels : list, shape=(n,)
reference segment labels, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
estimated_intervals : np.ndarray, shape=(m, 2)
estimated segment intervals, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
estimated_labels : list, shape=(m,)
estimated segment labels, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
frame_size : float > 0
length (in seconds) of frames for clustering
(Default value = 0.1)
beta : float > 0
beta value for F-measure
(Default value = 1.0)
Returns
-------
precision : float > 0
Precision of detecting whether frames belong in the same cluster
recall : float > 0
Recall of detecting whether frames belong in the same cluster
f : float > 0
F-measure of detecting whether frames belong in the same cluster
"""
validate_structure(reference_intervals, reference_labels,
estimated_intervals, estimated_labels)
# Check for empty annotations. Don't need to check labels because
# validate_structure makes sure they're the same size as intervals
if reference_intervals.size == 0 or estimated_intervals.size == 0:
return 0., 0., 0.
# Generate the cluster labels
y_ref = util.intervals_to_samples(reference_intervals,
reference_labels,
sample_size=frame_size)[-1]
y_ref = util.index_labels(y_ref)[0]
# Map to index space
y_est = util.intervals_to_samples(estimated_intervals,
estimated_labels,
sample_size=frame_size)[-1]
y_est = util.index_labels(y_est)[0]
# Build the reference label agreement matrix
agree_ref = np.equal.outer(y_ref, y_ref)
# Count the unique pairs
n_agree_ref = (agree_ref.sum() - len(y_ref)) / 2.0
# Repeat for estimate
agree_est = np.equal.outer(y_est, y_est)
n_agree_est = (agree_est.sum() - len(y_est)) / 2.0
# Find where they agree
matches = np.logical_and(agree_ref, agree_est)
n_matches = (matches.sum() - len(y_ref)) / 2.0
precision = n_matches / n_agree_est
recall = n_matches / n_agree_ref
f_measure = util.f_measure(precision, recall, beta=beta)
return precision, recall, f_measure
|
python
|
def pairwise(reference_intervals, reference_labels,
estimated_intervals, estimated_labels,
frame_size=0.1, beta=1.0):
"""Frame-clustering segmentation evaluation by pair-wise agreement.
Examples
--------
>>> (ref_intervals,
... ref_labels) = mir_eval.io.load_labeled_intervals('ref.lab')
>>> (est_intervals,
... est_labels) = mir_eval.io.load_labeled_intervals('est.lab')
>>> # Trim or pad the estimate to match reference timing
>>> (ref_intervals,
... ref_labels) = mir_eval.util.adjust_intervals(ref_intervals,
... ref_labels,
... t_min=0)
>>> (est_intervals,
... est_labels) = mir_eval.util.adjust_intervals(
... est_intervals, est_labels, t_min=0, t_max=ref_intervals.max())
>>> precision, recall, f = mir_eval.structure.pairwise(ref_intervals,
... ref_labels,
... est_intervals,
... est_labels)
Parameters
----------
reference_intervals : np.ndarray, shape=(n, 2)
reference segment intervals, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
reference_labels : list, shape=(n,)
reference segment labels, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
estimated_intervals : np.ndarray, shape=(m, 2)
estimated segment intervals, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
estimated_labels : list, shape=(m,)
estimated segment labels, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
frame_size : float > 0
length (in seconds) of frames for clustering
(Default value = 0.1)
beta : float > 0
beta value for F-measure
(Default value = 1.0)
Returns
-------
precision : float > 0
Precision of detecting whether frames belong in the same cluster
recall : float > 0
Recall of detecting whether frames belong in the same cluster
f : float > 0
F-measure of detecting whether frames belong in the same cluster
"""
validate_structure(reference_intervals, reference_labels,
estimated_intervals, estimated_labels)
# Check for empty annotations. Don't need to check labels because
# validate_structure makes sure they're the same size as intervals
if reference_intervals.size == 0 or estimated_intervals.size == 0:
return 0., 0., 0.
# Generate the cluster labels
y_ref = util.intervals_to_samples(reference_intervals,
reference_labels,
sample_size=frame_size)[-1]
y_ref = util.index_labels(y_ref)[0]
# Map to index space
y_est = util.intervals_to_samples(estimated_intervals,
estimated_labels,
sample_size=frame_size)[-1]
y_est = util.index_labels(y_est)[0]
# Build the reference label agreement matrix
agree_ref = np.equal.outer(y_ref, y_ref)
# Count the unique pairs
n_agree_ref = (agree_ref.sum() - len(y_ref)) / 2.0
# Repeat for estimate
agree_est = np.equal.outer(y_est, y_est)
n_agree_est = (agree_est.sum() - len(y_est)) / 2.0
# Find where they agree
matches = np.logical_and(agree_ref, agree_est)
n_matches = (matches.sum() - len(y_ref)) / 2.0
precision = n_matches / n_agree_est
recall = n_matches / n_agree_ref
f_measure = util.f_measure(precision, recall, beta=beta)
return precision, recall, f_measure
|
[
"def",
"pairwise",
"(",
"reference_intervals",
",",
"reference_labels",
",",
"estimated_intervals",
",",
"estimated_labels",
",",
"frame_size",
"=",
"0.1",
",",
"beta",
"=",
"1.0",
")",
":",
"validate_structure",
"(",
"reference_intervals",
",",
"reference_labels",
",",
"estimated_intervals",
",",
"estimated_labels",
")",
"# Check for empty annotations. Don't need to check labels because",
"# validate_structure makes sure they're the same size as intervals",
"if",
"reference_intervals",
".",
"size",
"==",
"0",
"or",
"estimated_intervals",
".",
"size",
"==",
"0",
":",
"return",
"0.",
",",
"0.",
",",
"0.",
"# Generate the cluster labels",
"y_ref",
"=",
"util",
".",
"intervals_to_samples",
"(",
"reference_intervals",
",",
"reference_labels",
",",
"sample_size",
"=",
"frame_size",
")",
"[",
"-",
"1",
"]",
"y_ref",
"=",
"util",
".",
"index_labels",
"(",
"y_ref",
")",
"[",
"0",
"]",
"# Map to index space",
"y_est",
"=",
"util",
".",
"intervals_to_samples",
"(",
"estimated_intervals",
",",
"estimated_labels",
",",
"sample_size",
"=",
"frame_size",
")",
"[",
"-",
"1",
"]",
"y_est",
"=",
"util",
".",
"index_labels",
"(",
"y_est",
")",
"[",
"0",
"]",
"# Build the reference label agreement matrix",
"agree_ref",
"=",
"np",
".",
"equal",
".",
"outer",
"(",
"y_ref",
",",
"y_ref",
")",
"# Count the unique pairs",
"n_agree_ref",
"=",
"(",
"agree_ref",
".",
"sum",
"(",
")",
"-",
"len",
"(",
"y_ref",
")",
")",
"/",
"2.0",
"# Repeat for estimate",
"agree_est",
"=",
"np",
".",
"equal",
".",
"outer",
"(",
"y_est",
",",
"y_est",
")",
"n_agree_est",
"=",
"(",
"agree_est",
".",
"sum",
"(",
")",
"-",
"len",
"(",
"y_est",
")",
")",
"/",
"2.0",
"# Find where they agree",
"matches",
"=",
"np",
".",
"logical_and",
"(",
"agree_ref",
",",
"agree_est",
")",
"n_matches",
"=",
"(",
"matches",
".",
"sum",
"(",
")",
"-",
"len",
"(",
"y_ref",
")",
")",
"/",
"2.0",
"precision",
"=",
"n_matches",
"/",
"n_agree_est",
"recall",
"=",
"n_matches",
"/",
"n_agree_ref",
"f_measure",
"=",
"util",
".",
"f_measure",
"(",
"precision",
",",
"recall",
",",
"beta",
"=",
"beta",
")",
"return",
"precision",
",",
"recall",
",",
"f_measure"
] |
Frame-clustering segmentation evaluation by pair-wise agreement.
Examples
--------
>>> (ref_intervals,
... ref_labels) = mir_eval.io.load_labeled_intervals('ref.lab')
>>> (est_intervals,
... est_labels) = mir_eval.io.load_labeled_intervals('est.lab')
>>> # Trim or pad the estimate to match reference timing
>>> (ref_intervals,
... ref_labels) = mir_eval.util.adjust_intervals(ref_intervals,
... ref_labels,
... t_min=0)
>>> (est_intervals,
... est_labels) = mir_eval.util.adjust_intervals(
... est_intervals, est_labels, t_min=0, t_max=ref_intervals.max())
>>> precision, recall, f = mir_eval.structure.pairwise(ref_intervals,
... ref_labels,
... est_intervals,
... est_labels)
Parameters
----------
reference_intervals : np.ndarray, shape=(n, 2)
reference segment intervals, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
reference_labels : list, shape=(n,)
reference segment labels, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
estimated_intervals : np.ndarray, shape=(m, 2)
estimated segment intervals, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
estimated_labels : list, shape=(m,)
estimated segment labels, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
frame_size : float > 0
length (in seconds) of frames for clustering
(Default value = 0.1)
beta : float > 0
beta value for F-measure
(Default value = 1.0)
Returns
-------
precision : float > 0
Precision of detecting whether frames belong in the same cluster
recall : float > 0
Recall of detecting whether frames belong in the same cluster
f : float > 0
F-measure of detecting whether frames belong in the same cluster
|
[
"Frame",
"-",
"clustering",
"segmentation",
"evaluation",
"by",
"pair",
"-",
"wise",
"agreement",
"."
] |
f41c8dafaea04b411252a516d1965af43c7d531b
|
https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/segment.py#L324-L418
|
train
|
craffel/mir_eval
|
mir_eval/segment.py
|
_contingency_matrix
|
def _contingency_matrix(reference_indices, estimated_indices):
"""Computes the contingency matrix of a true labeling vs an estimated one.
Parameters
----------
reference_indices : np.ndarray
Array of reference indices
estimated_indices : np.ndarray
Array of estimated indices
Returns
-------
contingency_matrix : np.ndarray
Contingency matrix, shape=(#reference indices, #estimated indices)
.. note:: Based on sklearn.metrics.cluster.contingency_matrix
"""
ref_classes, ref_class_idx = np.unique(reference_indices,
return_inverse=True)
est_classes, est_class_idx = np.unique(estimated_indices,
return_inverse=True)
n_ref_classes = ref_classes.shape[0]
n_est_classes = est_classes.shape[0]
# Using coo_matrix is faster than histogram2d
return scipy.sparse.coo_matrix((np.ones(ref_class_idx.shape[0]),
(ref_class_idx, est_class_idx)),
shape=(n_ref_classes, n_est_classes),
dtype=np.int).toarray()
|
python
|
def _contingency_matrix(reference_indices, estimated_indices):
"""Computes the contingency matrix of a true labeling vs an estimated one.
Parameters
----------
reference_indices : np.ndarray
Array of reference indices
estimated_indices : np.ndarray
Array of estimated indices
Returns
-------
contingency_matrix : np.ndarray
Contingency matrix, shape=(#reference indices, #estimated indices)
.. note:: Based on sklearn.metrics.cluster.contingency_matrix
"""
ref_classes, ref_class_idx = np.unique(reference_indices,
return_inverse=True)
est_classes, est_class_idx = np.unique(estimated_indices,
return_inverse=True)
n_ref_classes = ref_classes.shape[0]
n_est_classes = est_classes.shape[0]
# Using coo_matrix is faster than histogram2d
return scipy.sparse.coo_matrix((np.ones(ref_class_idx.shape[0]),
(ref_class_idx, est_class_idx)),
shape=(n_ref_classes, n_est_classes),
dtype=np.int).toarray()
|
[
"def",
"_contingency_matrix",
"(",
"reference_indices",
",",
"estimated_indices",
")",
":",
"ref_classes",
",",
"ref_class_idx",
"=",
"np",
".",
"unique",
"(",
"reference_indices",
",",
"return_inverse",
"=",
"True",
")",
"est_classes",
",",
"est_class_idx",
"=",
"np",
".",
"unique",
"(",
"estimated_indices",
",",
"return_inverse",
"=",
"True",
")",
"n_ref_classes",
"=",
"ref_classes",
".",
"shape",
"[",
"0",
"]",
"n_est_classes",
"=",
"est_classes",
".",
"shape",
"[",
"0",
"]",
"# Using coo_matrix is faster than histogram2d",
"return",
"scipy",
".",
"sparse",
".",
"coo_matrix",
"(",
"(",
"np",
".",
"ones",
"(",
"ref_class_idx",
".",
"shape",
"[",
"0",
"]",
")",
",",
"(",
"ref_class_idx",
",",
"est_class_idx",
")",
")",
",",
"shape",
"=",
"(",
"n_ref_classes",
",",
"n_est_classes",
")",
",",
"dtype",
"=",
"np",
".",
"int",
")",
".",
"toarray",
"(",
")"
] |
Computes the contingency matrix of a true labeling vs an estimated one.
Parameters
----------
reference_indices : np.ndarray
Array of reference indices
estimated_indices : np.ndarray
Array of estimated indices
Returns
-------
contingency_matrix : np.ndarray
Contingency matrix, shape=(#reference indices, #estimated indices)
.. note:: Based on sklearn.metrics.cluster.contingency_matrix
|
[
"Computes",
"the",
"contingency",
"matrix",
"of",
"a",
"true",
"labeling",
"vs",
"an",
"estimated",
"one",
"."
] |
f41c8dafaea04b411252a516d1965af43c7d531b
|
https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/segment.py#L516-L543
|
train
|
craffel/mir_eval
|
mir_eval/segment.py
|
_adjusted_rand_index
|
def _adjusted_rand_index(reference_indices, estimated_indices):
"""Compute the Rand index, adjusted for change.
Parameters
----------
reference_indices : np.ndarray
Array of reference indices
estimated_indices : np.ndarray
Array of estimated indices
Returns
-------
ari : float
Adjusted Rand index
.. note:: Based on sklearn.metrics.cluster.adjusted_rand_score
"""
n_samples = len(reference_indices)
ref_classes = np.unique(reference_indices)
est_classes = np.unique(estimated_indices)
# Special limit cases: no clustering since the data is not split;
# or trivial clustering where each document is assigned a unique cluster.
# These are perfect matches hence return 1.0.
if (ref_classes.shape[0] == est_classes.shape[0] == 1 or
ref_classes.shape[0] == est_classes.shape[0] == 0 or
(ref_classes.shape[0] == est_classes.shape[0] ==
len(reference_indices))):
return 1.0
contingency = _contingency_matrix(reference_indices, estimated_indices)
# Compute the ARI using the contingency data
sum_comb_c = sum(scipy.special.comb(n_c, 2, exact=1) for n_c in
contingency.sum(axis=1))
sum_comb_k = sum(scipy.special.comb(n_k, 2, exact=1) for n_k in
contingency.sum(axis=0))
sum_comb = sum((scipy.special.comb(n_ij, 2, exact=1) for n_ij in
contingency.flatten()))
prod_comb = (sum_comb_c * sum_comb_k)/float(scipy.special.comb(n_samples,
2))
mean_comb = (sum_comb_k + sum_comb_c)/2.
return (sum_comb - prod_comb)/(mean_comb - prod_comb)
|
python
|
def _adjusted_rand_index(reference_indices, estimated_indices):
"""Compute the Rand index, adjusted for change.
Parameters
----------
reference_indices : np.ndarray
Array of reference indices
estimated_indices : np.ndarray
Array of estimated indices
Returns
-------
ari : float
Adjusted Rand index
.. note:: Based on sklearn.metrics.cluster.adjusted_rand_score
"""
n_samples = len(reference_indices)
ref_classes = np.unique(reference_indices)
est_classes = np.unique(estimated_indices)
# Special limit cases: no clustering since the data is not split;
# or trivial clustering where each document is assigned a unique cluster.
# These are perfect matches hence return 1.0.
if (ref_classes.shape[0] == est_classes.shape[0] == 1 or
ref_classes.shape[0] == est_classes.shape[0] == 0 or
(ref_classes.shape[0] == est_classes.shape[0] ==
len(reference_indices))):
return 1.0
contingency = _contingency_matrix(reference_indices, estimated_indices)
# Compute the ARI using the contingency data
sum_comb_c = sum(scipy.special.comb(n_c, 2, exact=1) for n_c in
contingency.sum(axis=1))
sum_comb_k = sum(scipy.special.comb(n_k, 2, exact=1) for n_k in
contingency.sum(axis=0))
sum_comb = sum((scipy.special.comb(n_ij, 2, exact=1) for n_ij in
contingency.flatten()))
prod_comb = (sum_comb_c * sum_comb_k)/float(scipy.special.comb(n_samples,
2))
mean_comb = (sum_comb_k + sum_comb_c)/2.
return (sum_comb - prod_comb)/(mean_comb - prod_comb)
|
[
"def",
"_adjusted_rand_index",
"(",
"reference_indices",
",",
"estimated_indices",
")",
":",
"n_samples",
"=",
"len",
"(",
"reference_indices",
")",
"ref_classes",
"=",
"np",
".",
"unique",
"(",
"reference_indices",
")",
"est_classes",
"=",
"np",
".",
"unique",
"(",
"estimated_indices",
")",
"# Special limit cases: no clustering since the data is not split;",
"# or trivial clustering where each document is assigned a unique cluster.",
"# These are perfect matches hence return 1.0.",
"if",
"(",
"ref_classes",
".",
"shape",
"[",
"0",
"]",
"==",
"est_classes",
".",
"shape",
"[",
"0",
"]",
"==",
"1",
"or",
"ref_classes",
".",
"shape",
"[",
"0",
"]",
"==",
"est_classes",
".",
"shape",
"[",
"0",
"]",
"==",
"0",
"or",
"(",
"ref_classes",
".",
"shape",
"[",
"0",
"]",
"==",
"est_classes",
".",
"shape",
"[",
"0",
"]",
"==",
"len",
"(",
"reference_indices",
")",
")",
")",
":",
"return",
"1.0",
"contingency",
"=",
"_contingency_matrix",
"(",
"reference_indices",
",",
"estimated_indices",
")",
"# Compute the ARI using the contingency data",
"sum_comb_c",
"=",
"sum",
"(",
"scipy",
".",
"special",
".",
"comb",
"(",
"n_c",
",",
"2",
",",
"exact",
"=",
"1",
")",
"for",
"n_c",
"in",
"contingency",
".",
"sum",
"(",
"axis",
"=",
"1",
")",
")",
"sum_comb_k",
"=",
"sum",
"(",
"scipy",
".",
"special",
".",
"comb",
"(",
"n_k",
",",
"2",
",",
"exact",
"=",
"1",
")",
"for",
"n_k",
"in",
"contingency",
".",
"sum",
"(",
"axis",
"=",
"0",
")",
")",
"sum_comb",
"=",
"sum",
"(",
"(",
"scipy",
".",
"special",
".",
"comb",
"(",
"n_ij",
",",
"2",
",",
"exact",
"=",
"1",
")",
"for",
"n_ij",
"in",
"contingency",
".",
"flatten",
"(",
")",
")",
")",
"prod_comb",
"=",
"(",
"sum_comb_c",
"*",
"sum_comb_k",
")",
"/",
"float",
"(",
"scipy",
".",
"special",
".",
"comb",
"(",
"n_samples",
",",
"2",
")",
")",
"mean_comb",
"=",
"(",
"sum_comb_k",
"+",
"sum_comb_c",
")",
"/",
"2.",
"return",
"(",
"sum_comb",
"-",
"prod_comb",
")",
"/",
"(",
"mean_comb",
"-",
"prod_comb",
")"
] |
Compute the Rand index, adjusted for change.
Parameters
----------
reference_indices : np.ndarray
Array of reference indices
estimated_indices : np.ndarray
Array of estimated indices
Returns
-------
ari : float
Adjusted Rand index
.. note:: Based on sklearn.metrics.cluster.adjusted_rand_score
|
[
"Compute",
"the",
"Rand",
"index",
"adjusted",
"for",
"change",
"."
] |
f41c8dafaea04b411252a516d1965af43c7d531b
|
https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/segment.py#L546-L589
|
train
|
craffel/mir_eval
|
mir_eval/segment.py
|
_mutual_info_score
|
def _mutual_info_score(reference_indices, estimated_indices, contingency=None):
"""Compute the mutual information between two sequence labelings.
Parameters
----------
reference_indices : np.ndarray
Array of reference indices
estimated_indices : np.ndarray
Array of estimated indices
contingency : np.ndarray
Pre-computed contingency matrix. If None, one will be computed.
(Default value = None)
Returns
-------
mi : float
Mutual information
.. note:: Based on sklearn.metrics.cluster.mutual_info_score
"""
if contingency is None:
contingency = _contingency_matrix(reference_indices,
estimated_indices).astype(float)
contingency_sum = np.sum(contingency)
pi = np.sum(contingency, axis=1)
pj = np.sum(contingency, axis=0)
outer = np.outer(pi, pj)
nnz = contingency != 0.0
# normalized contingency
contingency_nm = contingency[nnz]
log_contingency_nm = np.log(contingency_nm)
contingency_nm /= contingency_sum
# log(a / b) should be calculated as log(a) - log(b) for
# possible loss of precision
log_outer = -np.log(outer[nnz]) + np.log(pi.sum()) + np.log(pj.sum())
mi = (contingency_nm * (log_contingency_nm - np.log(contingency_sum)) +
contingency_nm * log_outer)
return mi.sum()
|
python
|
def _mutual_info_score(reference_indices, estimated_indices, contingency=None):
"""Compute the mutual information between two sequence labelings.
Parameters
----------
reference_indices : np.ndarray
Array of reference indices
estimated_indices : np.ndarray
Array of estimated indices
contingency : np.ndarray
Pre-computed contingency matrix. If None, one will be computed.
(Default value = None)
Returns
-------
mi : float
Mutual information
.. note:: Based on sklearn.metrics.cluster.mutual_info_score
"""
if contingency is None:
contingency = _contingency_matrix(reference_indices,
estimated_indices).astype(float)
contingency_sum = np.sum(contingency)
pi = np.sum(contingency, axis=1)
pj = np.sum(contingency, axis=0)
outer = np.outer(pi, pj)
nnz = contingency != 0.0
# normalized contingency
contingency_nm = contingency[nnz]
log_contingency_nm = np.log(contingency_nm)
contingency_nm /= contingency_sum
# log(a / b) should be calculated as log(a) - log(b) for
# possible loss of precision
log_outer = -np.log(outer[nnz]) + np.log(pi.sum()) + np.log(pj.sum())
mi = (contingency_nm * (log_contingency_nm - np.log(contingency_sum)) +
contingency_nm * log_outer)
return mi.sum()
|
[
"def",
"_mutual_info_score",
"(",
"reference_indices",
",",
"estimated_indices",
",",
"contingency",
"=",
"None",
")",
":",
"if",
"contingency",
"is",
"None",
":",
"contingency",
"=",
"_contingency_matrix",
"(",
"reference_indices",
",",
"estimated_indices",
")",
".",
"astype",
"(",
"float",
")",
"contingency_sum",
"=",
"np",
".",
"sum",
"(",
"contingency",
")",
"pi",
"=",
"np",
".",
"sum",
"(",
"contingency",
",",
"axis",
"=",
"1",
")",
"pj",
"=",
"np",
".",
"sum",
"(",
"contingency",
",",
"axis",
"=",
"0",
")",
"outer",
"=",
"np",
".",
"outer",
"(",
"pi",
",",
"pj",
")",
"nnz",
"=",
"contingency",
"!=",
"0.0",
"# normalized contingency",
"contingency_nm",
"=",
"contingency",
"[",
"nnz",
"]",
"log_contingency_nm",
"=",
"np",
".",
"log",
"(",
"contingency_nm",
")",
"contingency_nm",
"/=",
"contingency_sum",
"# log(a / b) should be calculated as log(a) - log(b) for",
"# possible loss of precision",
"log_outer",
"=",
"-",
"np",
".",
"log",
"(",
"outer",
"[",
"nnz",
"]",
")",
"+",
"np",
".",
"log",
"(",
"pi",
".",
"sum",
"(",
")",
")",
"+",
"np",
".",
"log",
"(",
"pj",
".",
"sum",
"(",
")",
")",
"mi",
"=",
"(",
"contingency_nm",
"*",
"(",
"log_contingency_nm",
"-",
"np",
".",
"log",
"(",
"contingency_sum",
")",
")",
"+",
"contingency_nm",
"*",
"log_outer",
")",
"return",
"mi",
".",
"sum",
"(",
")"
] |
Compute the mutual information between two sequence labelings.
Parameters
----------
reference_indices : np.ndarray
Array of reference indices
estimated_indices : np.ndarray
Array of estimated indices
contingency : np.ndarray
Pre-computed contingency matrix. If None, one will be computed.
(Default value = None)
Returns
-------
mi : float
Mutual information
.. note:: Based on sklearn.metrics.cluster.mutual_info_score
|
[
"Compute",
"the",
"mutual",
"information",
"between",
"two",
"sequence",
"labelings",
"."
] |
f41c8dafaea04b411252a516d1965af43c7d531b
|
https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/segment.py#L663-L701
|
train
|
craffel/mir_eval
|
mir_eval/segment.py
|
_entropy
|
def _entropy(labels):
"""Calculates the entropy for a labeling.
Parameters
----------
labels : list-like
List of labels.
Returns
-------
entropy : float
Entropy of the labeling.
.. note:: Based on sklearn.metrics.cluster.entropy
"""
if len(labels) == 0:
return 1.0
label_idx = np.unique(labels, return_inverse=True)[1]
pi = np.bincount(label_idx).astype(np.float)
pi = pi[pi > 0]
pi_sum = np.sum(pi)
# log(a / b) should be calculated as log(a) - log(b) for
# possible loss of precision
return -np.sum((pi / pi_sum) * (np.log(pi) - np.log(pi_sum)))
|
python
|
def _entropy(labels):
"""Calculates the entropy for a labeling.
Parameters
----------
labels : list-like
List of labels.
Returns
-------
entropy : float
Entropy of the labeling.
.. note:: Based on sklearn.metrics.cluster.entropy
"""
if len(labels) == 0:
return 1.0
label_idx = np.unique(labels, return_inverse=True)[1]
pi = np.bincount(label_idx).astype(np.float)
pi = pi[pi > 0]
pi_sum = np.sum(pi)
# log(a / b) should be calculated as log(a) - log(b) for
# possible loss of precision
return -np.sum((pi / pi_sum) * (np.log(pi) - np.log(pi_sum)))
|
[
"def",
"_entropy",
"(",
"labels",
")",
":",
"if",
"len",
"(",
"labels",
")",
"==",
"0",
":",
"return",
"1.0",
"label_idx",
"=",
"np",
".",
"unique",
"(",
"labels",
",",
"return_inverse",
"=",
"True",
")",
"[",
"1",
"]",
"pi",
"=",
"np",
".",
"bincount",
"(",
"label_idx",
")",
".",
"astype",
"(",
"np",
".",
"float",
")",
"pi",
"=",
"pi",
"[",
"pi",
">",
"0",
"]",
"pi_sum",
"=",
"np",
".",
"sum",
"(",
"pi",
")",
"# log(a / b) should be calculated as log(a) - log(b) for",
"# possible loss of precision",
"return",
"-",
"np",
".",
"sum",
"(",
"(",
"pi",
"/",
"pi_sum",
")",
"*",
"(",
"np",
".",
"log",
"(",
"pi",
")",
"-",
"np",
".",
"log",
"(",
"pi_sum",
")",
")",
")"
] |
Calculates the entropy for a labeling.
Parameters
----------
labels : list-like
List of labels.
Returns
-------
entropy : float
Entropy of the labeling.
.. note:: Based on sklearn.metrics.cluster.entropy
|
[
"Calculates",
"the",
"entropy",
"for",
"a",
"labeling",
"."
] |
f41c8dafaea04b411252a516d1965af43c7d531b
|
https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/segment.py#L704-L728
|
train
|
craffel/mir_eval
|
mir_eval/tempo.py
|
validate_tempi
|
def validate_tempi(tempi, reference=True):
"""Checks that there are two non-negative tempi.
For a reference value, at least one tempo has to be greater than zero.
Parameters
----------
tempi : np.ndarray
length-2 array of tempo, in bpm
reference : bool
indicates a reference value
"""
if tempi.size != 2:
raise ValueError('tempi must have exactly two values')
if not np.all(np.isfinite(tempi)) or np.any(tempi < 0):
raise ValueError('tempi={} must be non-negative numbers'.format(tempi))
if reference and np.all(tempi == 0):
raise ValueError('reference tempi={} must have one'
' value greater than zero'.format(tempi))
|
python
|
def validate_tempi(tempi, reference=True):
"""Checks that there are two non-negative tempi.
For a reference value, at least one tempo has to be greater than zero.
Parameters
----------
tempi : np.ndarray
length-2 array of tempo, in bpm
reference : bool
indicates a reference value
"""
if tempi.size != 2:
raise ValueError('tempi must have exactly two values')
if not np.all(np.isfinite(tempi)) or np.any(tempi < 0):
raise ValueError('tempi={} must be non-negative numbers'.format(tempi))
if reference and np.all(tempi == 0):
raise ValueError('reference tempi={} must have one'
' value greater than zero'.format(tempi))
|
[
"def",
"validate_tempi",
"(",
"tempi",
",",
"reference",
"=",
"True",
")",
":",
"if",
"tempi",
".",
"size",
"!=",
"2",
":",
"raise",
"ValueError",
"(",
"'tempi must have exactly two values'",
")",
"if",
"not",
"np",
".",
"all",
"(",
"np",
".",
"isfinite",
"(",
"tempi",
")",
")",
"or",
"np",
".",
"any",
"(",
"tempi",
"<",
"0",
")",
":",
"raise",
"ValueError",
"(",
"'tempi={} must be non-negative numbers'",
".",
"format",
"(",
"tempi",
")",
")",
"if",
"reference",
"and",
"np",
".",
"all",
"(",
"tempi",
"==",
"0",
")",
":",
"raise",
"ValueError",
"(",
"'reference tempi={} must have one'",
"' value greater than zero'",
".",
"format",
"(",
"tempi",
")",
")"
] |
Checks that there are two non-negative tempi.
For a reference value, at least one tempo has to be greater than zero.
Parameters
----------
tempi : np.ndarray
length-2 array of tempo, in bpm
reference : bool
indicates a reference value
|
[
"Checks",
"that",
"there",
"are",
"two",
"non",
"-",
"negative",
"tempi",
".",
"For",
"a",
"reference",
"value",
"at",
"least",
"one",
"tempo",
"has",
"to",
"be",
"greater",
"than",
"zero",
"."
] |
f41c8dafaea04b411252a516d1965af43c7d531b
|
https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/tempo.py#L29-L51
|
train
|
craffel/mir_eval
|
mir_eval/tempo.py
|
validate
|
def validate(reference_tempi, reference_weight, estimated_tempi):
"""Checks that the input annotations to a metric look like valid tempo
annotations.
Parameters
----------
reference_tempi : np.ndarray
reference tempo values, in bpm
reference_weight : float
perceptual weight of slow vs fast in reference
estimated_tempi : np.ndarray
estimated tempo values, in bpm
"""
validate_tempi(reference_tempi, reference=True)
validate_tempi(estimated_tempi, reference=False)
if reference_weight < 0 or reference_weight > 1:
raise ValueError('Reference weight must lie in range [0, 1]')
|
python
|
def validate(reference_tempi, reference_weight, estimated_tempi):
"""Checks that the input annotations to a metric look like valid tempo
annotations.
Parameters
----------
reference_tempi : np.ndarray
reference tempo values, in bpm
reference_weight : float
perceptual weight of slow vs fast in reference
estimated_tempi : np.ndarray
estimated tempo values, in bpm
"""
validate_tempi(reference_tempi, reference=True)
validate_tempi(estimated_tempi, reference=False)
if reference_weight < 0 or reference_weight > 1:
raise ValueError('Reference weight must lie in range [0, 1]')
|
[
"def",
"validate",
"(",
"reference_tempi",
",",
"reference_weight",
",",
"estimated_tempi",
")",
":",
"validate_tempi",
"(",
"reference_tempi",
",",
"reference",
"=",
"True",
")",
"validate_tempi",
"(",
"estimated_tempi",
",",
"reference",
"=",
"False",
")",
"if",
"reference_weight",
"<",
"0",
"or",
"reference_weight",
">",
"1",
":",
"raise",
"ValueError",
"(",
"'Reference weight must lie in range [0, 1]'",
")"
] |
Checks that the input annotations to a metric look like valid tempo
annotations.
Parameters
----------
reference_tempi : np.ndarray
reference tempo values, in bpm
reference_weight : float
perceptual weight of slow vs fast in reference
estimated_tempi : np.ndarray
estimated tempo values, in bpm
|
[
"Checks",
"that",
"the",
"input",
"annotations",
"to",
"a",
"metric",
"look",
"like",
"valid",
"tempo",
"annotations",
"."
] |
f41c8dafaea04b411252a516d1965af43c7d531b
|
https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/tempo.py#L54-L74
|
train
|
craffel/mir_eval
|
mir_eval/tempo.py
|
detection
|
def detection(reference_tempi, reference_weight, estimated_tempi, tol=0.08):
"""Compute the tempo detection accuracy metric.
Parameters
----------
reference_tempi : np.ndarray, shape=(2,)
Two non-negative reference tempi
reference_weight : float > 0
The relative strength of ``reference_tempi[0]`` vs
``reference_tempi[1]``.
estimated_tempi : np.ndarray, shape=(2,)
Two non-negative estimated tempi.
tol : float in [0, 1]:
The maximum allowable deviation from a reference tempo to
count as a hit.
``|est_t - ref_t| <= tol * ref_t``
(Default value = 0.08)
Returns
-------
p_score : float in [0, 1]
Weighted average of recalls:
``reference_weight * hits[0] + (1 - reference_weight) * hits[1]``
one_correct : bool
True if at least one reference tempo was correctly estimated
both_correct : bool
True if both reference tempi were correctly estimated
Raises
------
ValueError
If the input tempi are ill-formed
If the reference weight is not in the range [0, 1]
If ``tol < 0`` or ``tol > 1``.
"""
validate(reference_tempi, reference_weight, estimated_tempi)
if tol < 0 or tol > 1:
raise ValueError('invalid tolerance {}: must lie in the range '
'[0, 1]'.format(tol))
if tol == 0.:
warnings.warn('A tolerance of 0.0 may not '
'lead to the results you expect.')
hits = [False, False]
for i, ref_t in enumerate(reference_tempi):
if ref_t > 0:
# Compute the relative error for this reference tempo
f_ref_t = float(ref_t)
relative_error = np.min(np.abs(ref_t - estimated_tempi) / f_ref_t)
# Count the hits
hits[i] = relative_error <= tol
p_score = reference_weight * hits[0] + (1.0-reference_weight) * hits[1]
one_correct = bool(np.max(hits))
both_correct = bool(np.min(hits))
return p_score, one_correct, both_correct
|
python
|
def detection(reference_tempi, reference_weight, estimated_tempi, tol=0.08):
"""Compute the tempo detection accuracy metric.
Parameters
----------
reference_tempi : np.ndarray, shape=(2,)
Two non-negative reference tempi
reference_weight : float > 0
The relative strength of ``reference_tempi[0]`` vs
``reference_tempi[1]``.
estimated_tempi : np.ndarray, shape=(2,)
Two non-negative estimated tempi.
tol : float in [0, 1]:
The maximum allowable deviation from a reference tempo to
count as a hit.
``|est_t - ref_t| <= tol * ref_t``
(Default value = 0.08)
Returns
-------
p_score : float in [0, 1]
Weighted average of recalls:
``reference_weight * hits[0] + (1 - reference_weight) * hits[1]``
one_correct : bool
True if at least one reference tempo was correctly estimated
both_correct : bool
True if both reference tempi were correctly estimated
Raises
------
ValueError
If the input tempi are ill-formed
If the reference weight is not in the range [0, 1]
If ``tol < 0`` or ``tol > 1``.
"""
validate(reference_tempi, reference_weight, estimated_tempi)
if tol < 0 or tol > 1:
raise ValueError('invalid tolerance {}: must lie in the range '
'[0, 1]'.format(tol))
if tol == 0.:
warnings.warn('A tolerance of 0.0 may not '
'lead to the results you expect.')
hits = [False, False]
for i, ref_t in enumerate(reference_tempi):
if ref_t > 0:
# Compute the relative error for this reference tempo
f_ref_t = float(ref_t)
relative_error = np.min(np.abs(ref_t - estimated_tempi) / f_ref_t)
# Count the hits
hits[i] = relative_error <= tol
p_score = reference_weight * hits[0] + (1.0-reference_weight) * hits[1]
one_correct = bool(np.max(hits))
both_correct = bool(np.min(hits))
return p_score, one_correct, both_correct
|
[
"def",
"detection",
"(",
"reference_tempi",
",",
"reference_weight",
",",
"estimated_tempi",
",",
"tol",
"=",
"0.08",
")",
":",
"validate",
"(",
"reference_tempi",
",",
"reference_weight",
",",
"estimated_tempi",
")",
"if",
"tol",
"<",
"0",
"or",
"tol",
">",
"1",
":",
"raise",
"ValueError",
"(",
"'invalid tolerance {}: must lie in the range '",
"'[0, 1]'",
".",
"format",
"(",
"tol",
")",
")",
"if",
"tol",
"==",
"0.",
":",
"warnings",
".",
"warn",
"(",
"'A tolerance of 0.0 may not '",
"'lead to the results you expect.'",
")",
"hits",
"=",
"[",
"False",
",",
"False",
"]",
"for",
"i",
",",
"ref_t",
"in",
"enumerate",
"(",
"reference_tempi",
")",
":",
"if",
"ref_t",
">",
"0",
":",
"# Compute the relative error for this reference tempo",
"f_ref_t",
"=",
"float",
"(",
"ref_t",
")",
"relative_error",
"=",
"np",
".",
"min",
"(",
"np",
".",
"abs",
"(",
"ref_t",
"-",
"estimated_tempi",
")",
"/",
"f_ref_t",
")",
"# Count the hits",
"hits",
"[",
"i",
"]",
"=",
"relative_error",
"<=",
"tol",
"p_score",
"=",
"reference_weight",
"*",
"hits",
"[",
"0",
"]",
"+",
"(",
"1.0",
"-",
"reference_weight",
")",
"*",
"hits",
"[",
"1",
"]",
"one_correct",
"=",
"bool",
"(",
"np",
".",
"max",
"(",
"hits",
")",
")",
"both_correct",
"=",
"bool",
"(",
"np",
".",
"min",
"(",
"hits",
")",
")",
"return",
"p_score",
",",
"one_correct",
",",
"both_correct"
] |
Compute the tempo detection accuracy metric.
Parameters
----------
reference_tempi : np.ndarray, shape=(2,)
Two non-negative reference tempi
reference_weight : float > 0
The relative strength of ``reference_tempi[0]`` vs
``reference_tempi[1]``.
estimated_tempi : np.ndarray, shape=(2,)
Two non-negative estimated tempi.
tol : float in [0, 1]:
The maximum allowable deviation from a reference tempo to
count as a hit.
``|est_t - ref_t| <= tol * ref_t``
(Default value = 0.08)
Returns
-------
p_score : float in [0, 1]
Weighted average of recalls:
``reference_weight * hits[0] + (1 - reference_weight) * hits[1]``
one_correct : bool
True if at least one reference tempo was correctly estimated
both_correct : bool
True if both reference tempi were correctly estimated
Raises
------
ValueError
If the input tempi are ill-formed
If the reference weight is not in the range [0, 1]
If ``tol < 0`` or ``tol > 1``.
|
[
"Compute",
"the",
"tempo",
"detection",
"accuracy",
"metric",
"."
] |
f41c8dafaea04b411252a516d1965af43c7d531b
|
https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/tempo.py#L77-L145
|
train
|
craffel/mir_eval
|
mir_eval/multipitch.py
|
validate
|
def validate(ref_time, ref_freqs, est_time, est_freqs):
"""Checks that the time and frequency inputs are well-formed.
Parameters
----------
ref_time : np.ndarray
reference time stamps in seconds
ref_freqs : list of np.ndarray
reference frequencies in Hz
est_time : np.ndarray
estimate time stamps in seconds
est_freqs : list of np.ndarray
estimated frequencies in Hz
"""
util.validate_events(ref_time, max_time=MAX_TIME)
util.validate_events(est_time, max_time=MAX_TIME)
if ref_time.size == 0:
warnings.warn("Reference times are empty.")
if ref_time.ndim != 1:
raise ValueError("Reference times have invalid dimension")
if len(ref_freqs) == 0:
warnings.warn("Reference frequencies are empty.")
if est_time.size == 0:
warnings.warn("Estimated times are empty.")
if est_time.ndim != 1:
raise ValueError("Estimated times have invalid dimension")
if len(est_freqs) == 0:
warnings.warn("Estimated frequencies are empty.")
if ref_time.size != len(ref_freqs):
raise ValueError('Reference times and frequencies have unequal '
'lengths.')
if est_time.size != len(est_freqs):
raise ValueError('Estimate times and frequencies have unequal '
'lengths.')
for freq in ref_freqs:
util.validate_frequencies(freq, max_freq=MAX_FREQ, min_freq=MIN_FREQ,
allow_negatives=False)
for freq in est_freqs:
util.validate_frequencies(freq, max_freq=MAX_FREQ, min_freq=MIN_FREQ,
allow_negatives=False)
|
python
|
def validate(ref_time, ref_freqs, est_time, est_freqs):
"""Checks that the time and frequency inputs are well-formed.
Parameters
----------
ref_time : np.ndarray
reference time stamps in seconds
ref_freqs : list of np.ndarray
reference frequencies in Hz
est_time : np.ndarray
estimate time stamps in seconds
est_freqs : list of np.ndarray
estimated frequencies in Hz
"""
util.validate_events(ref_time, max_time=MAX_TIME)
util.validate_events(est_time, max_time=MAX_TIME)
if ref_time.size == 0:
warnings.warn("Reference times are empty.")
if ref_time.ndim != 1:
raise ValueError("Reference times have invalid dimension")
if len(ref_freqs) == 0:
warnings.warn("Reference frequencies are empty.")
if est_time.size == 0:
warnings.warn("Estimated times are empty.")
if est_time.ndim != 1:
raise ValueError("Estimated times have invalid dimension")
if len(est_freqs) == 0:
warnings.warn("Estimated frequencies are empty.")
if ref_time.size != len(ref_freqs):
raise ValueError('Reference times and frequencies have unequal '
'lengths.')
if est_time.size != len(est_freqs):
raise ValueError('Estimate times and frequencies have unequal '
'lengths.')
for freq in ref_freqs:
util.validate_frequencies(freq, max_freq=MAX_FREQ, min_freq=MIN_FREQ,
allow_negatives=False)
for freq in est_freqs:
util.validate_frequencies(freq, max_freq=MAX_FREQ, min_freq=MIN_FREQ,
allow_negatives=False)
|
[
"def",
"validate",
"(",
"ref_time",
",",
"ref_freqs",
",",
"est_time",
",",
"est_freqs",
")",
":",
"util",
".",
"validate_events",
"(",
"ref_time",
",",
"max_time",
"=",
"MAX_TIME",
")",
"util",
".",
"validate_events",
"(",
"est_time",
",",
"max_time",
"=",
"MAX_TIME",
")",
"if",
"ref_time",
".",
"size",
"==",
"0",
":",
"warnings",
".",
"warn",
"(",
"\"Reference times are empty.\"",
")",
"if",
"ref_time",
".",
"ndim",
"!=",
"1",
":",
"raise",
"ValueError",
"(",
"\"Reference times have invalid dimension\"",
")",
"if",
"len",
"(",
"ref_freqs",
")",
"==",
"0",
":",
"warnings",
".",
"warn",
"(",
"\"Reference frequencies are empty.\"",
")",
"if",
"est_time",
".",
"size",
"==",
"0",
":",
"warnings",
".",
"warn",
"(",
"\"Estimated times are empty.\"",
")",
"if",
"est_time",
".",
"ndim",
"!=",
"1",
":",
"raise",
"ValueError",
"(",
"\"Estimated times have invalid dimension\"",
")",
"if",
"len",
"(",
"est_freqs",
")",
"==",
"0",
":",
"warnings",
".",
"warn",
"(",
"\"Estimated frequencies are empty.\"",
")",
"if",
"ref_time",
".",
"size",
"!=",
"len",
"(",
"ref_freqs",
")",
":",
"raise",
"ValueError",
"(",
"'Reference times and frequencies have unequal '",
"'lengths.'",
")",
"if",
"est_time",
".",
"size",
"!=",
"len",
"(",
"est_freqs",
")",
":",
"raise",
"ValueError",
"(",
"'Estimate times and frequencies have unequal '",
"'lengths.'",
")",
"for",
"freq",
"in",
"ref_freqs",
":",
"util",
".",
"validate_frequencies",
"(",
"freq",
",",
"max_freq",
"=",
"MAX_FREQ",
",",
"min_freq",
"=",
"MIN_FREQ",
",",
"allow_negatives",
"=",
"False",
")",
"for",
"freq",
"in",
"est_freqs",
":",
"util",
".",
"validate_frequencies",
"(",
"freq",
",",
"max_freq",
"=",
"MAX_FREQ",
",",
"min_freq",
"=",
"MIN_FREQ",
",",
"allow_negatives",
"=",
"False",
")"
] |
Checks that the time and frequency inputs are well-formed.
Parameters
----------
ref_time : np.ndarray
reference time stamps in seconds
ref_freqs : list of np.ndarray
reference frequencies in Hz
est_time : np.ndarray
estimate time stamps in seconds
est_freqs : list of np.ndarray
estimated frequencies in Hz
|
[
"Checks",
"that",
"the",
"time",
"and",
"frequency",
"inputs",
"are",
"well",
"-",
"formed",
"."
] |
f41c8dafaea04b411252a516d1965af43c7d531b
|
https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/multipitch.py#L57-L101
|
train
|
craffel/mir_eval
|
mir_eval/multipitch.py
|
resample_multipitch
|
def resample_multipitch(times, frequencies, target_times):
"""Resamples multipitch time series to a new timescale. Values in
``target_times`` outside the range of ``times`` return no pitch estimate.
Parameters
----------
times : np.ndarray
Array of time stamps
frequencies : list of np.ndarray
List of np.ndarrays of frequency values
target_times : np.ndarray
Array of target time stamps
Returns
-------
frequencies_resampled : list of numpy arrays
Frequency list of lists resampled to new timebase
"""
if target_times.size == 0:
return []
if times.size == 0:
return [np.array([])]*len(target_times)
n_times = len(frequencies)
# scipy's interpolate doesn't handle ragged arrays. Instead, we interpolate
# the frequency index and then map back to the frequency values.
# This only works because we're using a nearest neighbor interpolator!
frequency_index = np.arange(0, n_times)
# times are already ordered so assume_sorted=True for efficiency
# since we're interpolating the index, fill_value is set to the first index
# that is out of range. We handle this in the next line.
new_frequency_index = scipy.interpolate.interp1d(
times, frequency_index, kind='nearest', bounds_error=False,
assume_sorted=True, fill_value=n_times)(target_times)
# create array of frequencies plus additional empty element at the end for
# target time stamps that are out of the interpolation range
freq_vals = frequencies + [np.array([])]
# map interpolated indices back to frequency values
frequencies_resampled = [
freq_vals[i] for i in new_frequency_index.astype(int)]
return frequencies_resampled
|
python
|
def resample_multipitch(times, frequencies, target_times):
"""Resamples multipitch time series to a new timescale. Values in
``target_times`` outside the range of ``times`` return no pitch estimate.
Parameters
----------
times : np.ndarray
Array of time stamps
frequencies : list of np.ndarray
List of np.ndarrays of frequency values
target_times : np.ndarray
Array of target time stamps
Returns
-------
frequencies_resampled : list of numpy arrays
Frequency list of lists resampled to new timebase
"""
if target_times.size == 0:
return []
if times.size == 0:
return [np.array([])]*len(target_times)
n_times = len(frequencies)
# scipy's interpolate doesn't handle ragged arrays. Instead, we interpolate
# the frequency index and then map back to the frequency values.
# This only works because we're using a nearest neighbor interpolator!
frequency_index = np.arange(0, n_times)
# times are already ordered so assume_sorted=True for efficiency
# since we're interpolating the index, fill_value is set to the first index
# that is out of range. We handle this in the next line.
new_frequency_index = scipy.interpolate.interp1d(
times, frequency_index, kind='nearest', bounds_error=False,
assume_sorted=True, fill_value=n_times)(target_times)
# create array of frequencies plus additional empty element at the end for
# target time stamps that are out of the interpolation range
freq_vals = frequencies + [np.array([])]
# map interpolated indices back to frequency values
frequencies_resampled = [
freq_vals[i] for i in new_frequency_index.astype(int)]
return frequencies_resampled
|
[
"def",
"resample_multipitch",
"(",
"times",
",",
"frequencies",
",",
"target_times",
")",
":",
"if",
"target_times",
".",
"size",
"==",
"0",
":",
"return",
"[",
"]",
"if",
"times",
".",
"size",
"==",
"0",
":",
"return",
"[",
"np",
".",
"array",
"(",
"[",
"]",
")",
"]",
"*",
"len",
"(",
"target_times",
")",
"n_times",
"=",
"len",
"(",
"frequencies",
")",
"# scipy's interpolate doesn't handle ragged arrays. Instead, we interpolate",
"# the frequency index and then map back to the frequency values.",
"# This only works because we're using a nearest neighbor interpolator!",
"frequency_index",
"=",
"np",
".",
"arange",
"(",
"0",
",",
"n_times",
")",
"# times are already ordered so assume_sorted=True for efficiency",
"# since we're interpolating the index, fill_value is set to the first index",
"# that is out of range. We handle this in the next line.",
"new_frequency_index",
"=",
"scipy",
".",
"interpolate",
".",
"interp1d",
"(",
"times",
",",
"frequency_index",
",",
"kind",
"=",
"'nearest'",
",",
"bounds_error",
"=",
"False",
",",
"assume_sorted",
"=",
"True",
",",
"fill_value",
"=",
"n_times",
")",
"(",
"target_times",
")",
"# create array of frequencies plus additional empty element at the end for",
"# target time stamps that are out of the interpolation range",
"freq_vals",
"=",
"frequencies",
"+",
"[",
"np",
".",
"array",
"(",
"[",
"]",
")",
"]",
"# map interpolated indices back to frequency values",
"frequencies_resampled",
"=",
"[",
"freq_vals",
"[",
"i",
"]",
"for",
"i",
"in",
"new_frequency_index",
".",
"astype",
"(",
"int",
")",
"]",
"return",
"frequencies_resampled"
] |
Resamples multipitch time series to a new timescale. Values in
``target_times`` outside the range of ``times`` return no pitch estimate.
Parameters
----------
times : np.ndarray
Array of time stamps
frequencies : list of np.ndarray
List of np.ndarrays of frequency values
target_times : np.ndarray
Array of target time stamps
Returns
-------
frequencies_resampled : list of numpy arrays
Frequency list of lists resampled to new timebase
|
[
"Resamples",
"multipitch",
"time",
"series",
"to",
"a",
"new",
"timescale",
".",
"Values",
"in",
"target_times",
"outside",
"the",
"range",
"of",
"times",
"return",
"no",
"pitch",
"estimate",
"."
] |
f41c8dafaea04b411252a516d1965af43c7d531b
|
https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/multipitch.py#L104-L150
|
train
|
craffel/mir_eval
|
mir_eval/multipitch.py
|
compute_num_true_positives
|
def compute_num_true_positives(ref_freqs, est_freqs, window=0.5, chroma=False):
"""Compute the number of true positives in an estimate given a reference.
A frequency is correct if it is within a quartertone of the
correct frequency.
Parameters
----------
ref_freqs : list of np.ndarray
reference frequencies (MIDI)
est_freqs : list of np.ndarray
estimated frequencies (MIDI)
window : float
Window size, in semitones
chroma : bool
If True, computes distances modulo n.
If True, ``ref_freqs`` and ``est_freqs`` should be wrapped modulo n.
Returns
-------
true_positives : np.ndarray
Array the same length as ref_freqs containing the number of true
positives.
"""
n_frames = len(ref_freqs)
true_positives = np.zeros((n_frames, ))
for i, (ref_frame, est_frame) in enumerate(zip(ref_freqs, est_freqs)):
if chroma:
# match chroma-wrapped frequency events
matching = util.match_events(
ref_frame, est_frame, window,
distance=util._outer_distance_mod_n)
else:
# match frequency events within tolerance window in semitones
matching = util.match_events(ref_frame, est_frame, window)
true_positives[i] = len(matching)
return true_positives
|
python
|
def compute_num_true_positives(ref_freqs, est_freqs, window=0.5, chroma=False):
"""Compute the number of true positives in an estimate given a reference.
A frequency is correct if it is within a quartertone of the
correct frequency.
Parameters
----------
ref_freqs : list of np.ndarray
reference frequencies (MIDI)
est_freqs : list of np.ndarray
estimated frequencies (MIDI)
window : float
Window size, in semitones
chroma : bool
If True, computes distances modulo n.
If True, ``ref_freqs`` and ``est_freqs`` should be wrapped modulo n.
Returns
-------
true_positives : np.ndarray
Array the same length as ref_freqs containing the number of true
positives.
"""
n_frames = len(ref_freqs)
true_positives = np.zeros((n_frames, ))
for i, (ref_frame, est_frame) in enumerate(zip(ref_freqs, est_freqs)):
if chroma:
# match chroma-wrapped frequency events
matching = util.match_events(
ref_frame, est_frame, window,
distance=util._outer_distance_mod_n)
else:
# match frequency events within tolerance window in semitones
matching = util.match_events(ref_frame, est_frame, window)
true_positives[i] = len(matching)
return true_positives
|
[
"def",
"compute_num_true_positives",
"(",
"ref_freqs",
",",
"est_freqs",
",",
"window",
"=",
"0.5",
",",
"chroma",
"=",
"False",
")",
":",
"n_frames",
"=",
"len",
"(",
"ref_freqs",
")",
"true_positives",
"=",
"np",
".",
"zeros",
"(",
"(",
"n_frames",
",",
")",
")",
"for",
"i",
",",
"(",
"ref_frame",
",",
"est_frame",
")",
"in",
"enumerate",
"(",
"zip",
"(",
"ref_freqs",
",",
"est_freqs",
")",
")",
":",
"if",
"chroma",
":",
"# match chroma-wrapped frequency events",
"matching",
"=",
"util",
".",
"match_events",
"(",
"ref_frame",
",",
"est_frame",
",",
"window",
",",
"distance",
"=",
"util",
".",
"_outer_distance_mod_n",
")",
"else",
":",
"# match frequency events within tolerance window in semitones",
"matching",
"=",
"util",
".",
"match_events",
"(",
"ref_frame",
",",
"est_frame",
",",
"window",
")",
"true_positives",
"[",
"i",
"]",
"=",
"len",
"(",
"matching",
")",
"return",
"true_positives"
] |
Compute the number of true positives in an estimate given a reference.
A frequency is correct if it is within a quartertone of the
correct frequency.
Parameters
----------
ref_freqs : list of np.ndarray
reference frequencies (MIDI)
est_freqs : list of np.ndarray
estimated frequencies (MIDI)
window : float
Window size, in semitones
chroma : bool
If True, computes distances modulo n.
If True, ``ref_freqs`` and ``est_freqs`` should be wrapped modulo n.
Returns
-------
true_positives : np.ndarray
Array the same length as ref_freqs containing the number of true
positives.
|
[
"Compute",
"the",
"number",
"of",
"true",
"positives",
"in",
"an",
"estimate",
"given",
"a",
"reference",
".",
"A",
"frequency",
"is",
"correct",
"if",
"it",
"is",
"within",
"a",
"quartertone",
"of",
"the",
"correct",
"frequency",
"."
] |
f41c8dafaea04b411252a516d1965af43c7d531b
|
https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/multipitch.py#L204-L243
|
train
|
craffel/mir_eval
|
mir_eval/multipitch.py
|
compute_accuracy
|
def compute_accuracy(true_positives, n_ref, n_est):
"""Compute accuracy metrics.
Parameters
----------
true_positives : np.ndarray
Array containing the number of true positives at each time point.
n_ref : np.ndarray
Array containing the number of reference frequencies at each time
point.
n_est : np.ndarray
Array containing the number of estimate frequencies at each time point.
Returns
-------
precision : float
``sum(true_positives)/sum(n_est)``
recall : float
``sum(true_positives)/sum(n_ref)``
acc : float
``sum(true_positives)/sum(n_est + n_ref - true_positives)``
"""
true_positive_sum = float(true_positives.sum())
n_est_sum = n_est.sum()
if n_est_sum > 0:
precision = true_positive_sum/n_est.sum()
else:
warnings.warn("Estimate frequencies are all empty.")
precision = 0.0
n_ref_sum = n_ref.sum()
if n_ref_sum > 0:
recall = true_positive_sum/n_ref.sum()
else:
warnings.warn("Reference frequencies are all empty.")
recall = 0.0
acc_denom = (n_est + n_ref - true_positives).sum()
if acc_denom > 0:
acc = true_positive_sum/acc_denom
else:
acc = 0.0
return precision, recall, acc
|
python
|
def compute_accuracy(true_positives, n_ref, n_est):
"""Compute accuracy metrics.
Parameters
----------
true_positives : np.ndarray
Array containing the number of true positives at each time point.
n_ref : np.ndarray
Array containing the number of reference frequencies at each time
point.
n_est : np.ndarray
Array containing the number of estimate frequencies at each time point.
Returns
-------
precision : float
``sum(true_positives)/sum(n_est)``
recall : float
``sum(true_positives)/sum(n_ref)``
acc : float
``sum(true_positives)/sum(n_est + n_ref - true_positives)``
"""
true_positive_sum = float(true_positives.sum())
n_est_sum = n_est.sum()
if n_est_sum > 0:
precision = true_positive_sum/n_est.sum()
else:
warnings.warn("Estimate frequencies are all empty.")
precision = 0.0
n_ref_sum = n_ref.sum()
if n_ref_sum > 0:
recall = true_positive_sum/n_ref.sum()
else:
warnings.warn("Reference frequencies are all empty.")
recall = 0.0
acc_denom = (n_est + n_ref - true_positives).sum()
if acc_denom > 0:
acc = true_positive_sum/acc_denom
else:
acc = 0.0
return precision, recall, acc
|
[
"def",
"compute_accuracy",
"(",
"true_positives",
",",
"n_ref",
",",
"n_est",
")",
":",
"true_positive_sum",
"=",
"float",
"(",
"true_positives",
".",
"sum",
"(",
")",
")",
"n_est_sum",
"=",
"n_est",
".",
"sum",
"(",
")",
"if",
"n_est_sum",
">",
"0",
":",
"precision",
"=",
"true_positive_sum",
"/",
"n_est",
".",
"sum",
"(",
")",
"else",
":",
"warnings",
".",
"warn",
"(",
"\"Estimate frequencies are all empty.\"",
")",
"precision",
"=",
"0.0",
"n_ref_sum",
"=",
"n_ref",
".",
"sum",
"(",
")",
"if",
"n_ref_sum",
">",
"0",
":",
"recall",
"=",
"true_positive_sum",
"/",
"n_ref",
".",
"sum",
"(",
")",
"else",
":",
"warnings",
".",
"warn",
"(",
"\"Reference frequencies are all empty.\"",
")",
"recall",
"=",
"0.0",
"acc_denom",
"=",
"(",
"n_est",
"+",
"n_ref",
"-",
"true_positives",
")",
".",
"sum",
"(",
")",
"if",
"acc_denom",
">",
"0",
":",
"acc",
"=",
"true_positive_sum",
"/",
"acc_denom",
"else",
":",
"acc",
"=",
"0.0",
"return",
"precision",
",",
"recall",
",",
"acc"
] |
Compute accuracy metrics.
Parameters
----------
true_positives : np.ndarray
Array containing the number of true positives at each time point.
n_ref : np.ndarray
Array containing the number of reference frequencies at each time
point.
n_est : np.ndarray
Array containing the number of estimate frequencies at each time point.
Returns
-------
precision : float
``sum(true_positives)/sum(n_est)``
recall : float
``sum(true_positives)/sum(n_ref)``
acc : float
``sum(true_positives)/sum(n_est + n_ref - true_positives)``
|
[
"Compute",
"accuracy",
"metrics",
"."
] |
f41c8dafaea04b411252a516d1965af43c7d531b
|
https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/multipitch.py#L246-L291
|
train
|
craffel/mir_eval
|
mir_eval/multipitch.py
|
compute_err_score
|
def compute_err_score(true_positives, n_ref, n_est):
"""Compute error score metrics.
Parameters
----------
true_positives : np.ndarray
Array containing the number of true positives at each time point.
n_ref : np.ndarray
Array containing the number of reference frequencies at each time
point.
n_est : np.ndarray
Array containing the number of estimate frequencies at each time point.
Returns
-------
e_sub : float
Substitution error
e_miss : float
Miss error
e_fa : float
False alarm error
e_tot : float
Total error
"""
n_ref_sum = float(n_ref.sum())
if n_ref_sum == 0:
warnings.warn("Reference frequencies are all empty.")
return 0., 0., 0., 0.
# Substitution error
e_sub = (np.min([n_ref, n_est], axis=0) - true_positives).sum()/n_ref_sum
# compute the max of (n_ref - n_est) and 0
e_miss_numerator = n_ref - n_est
e_miss_numerator[e_miss_numerator < 0] = 0
# Miss error
e_miss = e_miss_numerator.sum()/n_ref_sum
# compute the max of (n_est - n_ref) and 0
e_fa_numerator = n_est - n_ref
e_fa_numerator[e_fa_numerator < 0] = 0
# False alarm error
e_fa = e_fa_numerator.sum()/n_ref_sum
# total error
e_tot = (np.max([n_ref, n_est], axis=0) - true_positives).sum()/n_ref_sum
return e_sub, e_miss, e_fa, e_tot
|
python
|
def compute_err_score(true_positives, n_ref, n_est):
"""Compute error score metrics.
Parameters
----------
true_positives : np.ndarray
Array containing the number of true positives at each time point.
n_ref : np.ndarray
Array containing the number of reference frequencies at each time
point.
n_est : np.ndarray
Array containing the number of estimate frequencies at each time point.
Returns
-------
e_sub : float
Substitution error
e_miss : float
Miss error
e_fa : float
False alarm error
e_tot : float
Total error
"""
n_ref_sum = float(n_ref.sum())
if n_ref_sum == 0:
warnings.warn("Reference frequencies are all empty.")
return 0., 0., 0., 0.
# Substitution error
e_sub = (np.min([n_ref, n_est], axis=0) - true_positives).sum()/n_ref_sum
# compute the max of (n_ref - n_est) and 0
e_miss_numerator = n_ref - n_est
e_miss_numerator[e_miss_numerator < 0] = 0
# Miss error
e_miss = e_miss_numerator.sum()/n_ref_sum
# compute the max of (n_est - n_ref) and 0
e_fa_numerator = n_est - n_ref
e_fa_numerator[e_fa_numerator < 0] = 0
# False alarm error
e_fa = e_fa_numerator.sum()/n_ref_sum
# total error
e_tot = (np.max([n_ref, n_est], axis=0) - true_positives).sum()/n_ref_sum
return e_sub, e_miss, e_fa, e_tot
|
[
"def",
"compute_err_score",
"(",
"true_positives",
",",
"n_ref",
",",
"n_est",
")",
":",
"n_ref_sum",
"=",
"float",
"(",
"n_ref",
".",
"sum",
"(",
")",
")",
"if",
"n_ref_sum",
"==",
"0",
":",
"warnings",
".",
"warn",
"(",
"\"Reference frequencies are all empty.\"",
")",
"return",
"0.",
",",
"0.",
",",
"0.",
",",
"0.",
"# Substitution error",
"e_sub",
"=",
"(",
"np",
".",
"min",
"(",
"[",
"n_ref",
",",
"n_est",
"]",
",",
"axis",
"=",
"0",
")",
"-",
"true_positives",
")",
".",
"sum",
"(",
")",
"/",
"n_ref_sum",
"# compute the max of (n_ref - n_est) and 0",
"e_miss_numerator",
"=",
"n_ref",
"-",
"n_est",
"e_miss_numerator",
"[",
"e_miss_numerator",
"<",
"0",
"]",
"=",
"0",
"# Miss error",
"e_miss",
"=",
"e_miss_numerator",
".",
"sum",
"(",
")",
"/",
"n_ref_sum",
"# compute the max of (n_est - n_ref) and 0",
"e_fa_numerator",
"=",
"n_est",
"-",
"n_ref",
"e_fa_numerator",
"[",
"e_fa_numerator",
"<",
"0",
"]",
"=",
"0",
"# False alarm error",
"e_fa",
"=",
"e_fa_numerator",
".",
"sum",
"(",
")",
"/",
"n_ref_sum",
"# total error",
"e_tot",
"=",
"(",
"np",
".",
"max",
"(",
"[",
"n_ref",
",",
"n_est",
"]",
",",
"axis",
"=",
"0",
")",
"-",
"true_positives",
")",
".",
"sum",
"(",
")",
"/",
"n_ref_sum",
"return",
"e_sub",
",",
"e_miss",
",",
"e_fa",
",",
"e_tot"
] |
Compute error score metrics.
Parameters
----------
true_positives : np.ndarray
Array containing the number of true positives at each time point.
n_ref : np.ndarray
Array containing the number of reference frequencies at each time
point.
n_est : np.ndarray
Array containing the number of estimate frequencies at each time point.
Returns
-------
e_sub : float
Substitution error
e_miss : float
Miss error
e_fa : float
False alarm error
e_tot : float
Total error
|
[
"Compute",
"error",
"score",
"metrics",
"."
] |
f41c8dafaea04b411252a516d1965af43c7d531b
|
https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/multipitch.py#L294-L343
|
train
|
craffel/mir_eval
|
mir_eval/hierarchy.py
|
_hierarchy_bounds
|
def _hierarchy_bounds(intervals_hier):
'''Compute the covered time range of a hierarchical segmentation.
Parameters
----------
intervals_hier : list of ndarray
A hierarchical segmentation, encoded as a list of arrays of segment
intervals.
Returns
-------
t_min : float
t_max : float
The minimum and maximum times spanned by the annotation
'''
boundaries = list(itertools.chain(*list(itertools.chain(*intervals_hier))))
return min(boundaries), max(boundaries)
|
python
|
def _hierarchy_bounds(intervals_hier):
'''Compute the covered time range of a hierarchical segmentation.
Parameters
----------
intervals_hier : list of ndarray
A hierarchical segmentation, encoded as a list of arrays of segment
intervals.
Returns
-------
t_min : float
t_max : float
The minimum and maximum times spanned by the annotation
'''
boundaries = list(itertools.chain(*list(itertools.chain(*intervals_hier))))
return min(boundaries), max(boundaries)
|
[
"def",
"_hierarchy_bounds",
"(",
"intervals_hier",
")",
":",
"boundaries",
"=",
"list",
"(",
"itertools",
".",
"chain",
"(",
"*",
"list",
"(",
"itertools",
".",
"chain",
"(",
"*",
"intervals_hier",
")",
")",
")",
")",
"return",
"min",
"(",
"boundaries",
")",
",",
"max",
"(",
"boundaries",
")"
] |
Compute the covered time range of a hierarchical segmentation.
Parameters
----------
intervals_hier : list of ndarray
A hierarchical segmentation, encoded as a list of arrays of segment
intervals.
Returns
-------
t_min : float
t_max : float
The minimum and maximum times spanned by the annotation
|
[
"Compute",
"the",
"covered",
"time",
"range",
"of",
"a",
"hierarchical",
"segmentation",
"."
] |
f41c8dafaea04b411252a516d1965af43c7d531b
|
https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/hierarchy.py#L83-L100
|
train
|
craffel/mir_eval
|
mir_eval/hierarchy.py
|
_align_intervals
|
def _align_intervals(int_hier, lab_hier, t_min=0.0, t_max=None):
'''Align a hierarchical annotation to span a fixed start and end time.
Parameters
----------
int_hier : list of list of intervals
lab_hier : list of list of str
Hierarchical segment annotations, encoded as a
list of list of intervals (int_hier) and list of
list of strings (lab_hier)
t_min : None or number >= 0
The minimum time value for the segmentation
t_max : None or number >= t_min
The maximum time value for the segmentation
Returns
-------
intervals_hier : list of list of intervals
labels_hier : list of list of str
`int_hier` `lab_hier` aligned to span `[t_min, t_max]`.
'''
return [list(_) for _ in zip(*[util.adjust_intervals(np.asarray(ival),
labels=lab,
t_min=t_min,
t_max=t_max)
for ival, lab in zip(int_hier, lab_hier)])]
|
python
|
def _align_intervals(int_hier, lab_hier, t_min=0.0, t_max=None):
'''Align a hierarchical annotation to span a fixed start and end time.
Parameters
----------
int_hier : list of list of intervals
lab_hier : list of list of str
Hierarchical segment annotations, encoded as a
list of list of intervals (int_hier) and list of
list of strings (lab_hier)
t_min : None or number >= 0
The minimum time value for the segmentation
t_max : None or number >= t_min
The maximum time value for the segmentation
Returns
-------
intervals_hier : list of list of intervals
labels_hier : list of list of str
`int_hier` `lab_hier` aligned to span `[t_min, t_max]`.
'''
return [list(_) for _ in zip(*[util.adjust_intervals(np.asarray(ival),
labels=lab,
t_min=t_min,
t_max=t_max)
for ival, lab in zip(int_hier, lab_hier)])]
|
[
"def",
"_align_intervals",
"(",
"int_hier",
",",
"lab_hier",
",",
"t_min",
"=",
"0.0",
",",
"t_max",
"=",
"None",
")",
":",
"return",
"[",
"list",
"(",
"_",
")",
"for",
"_",
"in",
"zip",
"(",
"*",
"[",
"util",
".",
"adjust_intervals",
"(",
"np",
".",
"asarray",
"(",
"ival",
")",
",",
"labels",
"=",
"lab",
",",
"t_min",
"=",
"t_min",
",",
"t_max",
"=",
"t_max",
")",
"for",
"ival",
",",
"lab",
"in",
"zip",
"(",
"int_hier",
",",
"lab_hier",
")",
"]",
")",
"]"
] |
Align a hierarchical annotation to span a fixed start and end time.
Parameters
----------
int_hier : list of list of intervals
lab_hier : list of list of str
Hierarchical segment annotations, encoded as a
list of list of intervals (int_hier) and list of
list of strings (lab_hier)
t_min : None or number >= 0
The minimum time value for the segmentation
t_max : None or number >= t_min
The maximum time value for the segmentation
Returns
-------
intervals_hier : list of list of intervals
labels_hier : list of list of str
`int_hier` `lab_hier` aligned to span `[t_min, t_max]`.
|
[
"Align",
"a",
"hierarchical",
"annotation",
"to",
"span",
"a",
"fixed",
"start",
"and",
"end",
"time",
"."
] |
f41c8dafaea04b411252a516d1965af43c7d531b
|
https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/hierarchy.py#L103-L130
|
train
|
craffel/mir_eval
|
mir_eval/hierarchy.py
|
_compare_frame_rankings
|
def _compare_frame_rankings(ref, est, transitive=False):
'''Compute the number of ranking disagreements in two lists.
Parameters
----------
ref : np.ndarray, shape=(n,)
est : np.ndarray, shape=(n,)
Reference and estimate ranked lists.
`ref[i]` is the relevance score for point `i`.
transitive : bool
If true, all pairs of reference levels are compared.
If false, only adjacent pairs of reference levels are compared.
Returns
-------
inversions : int
The number of pairs of indices `i, j` where
`ref[i] < ref[j]` but `est[i] >= est[j]`.
normalizer : float
The total number of pairs (i, j) under consideration.
If transitive=True, then this is |{(i,j) : ref[i] < ref[j]}|
If transitive=False, then this is |{i,j) : ref[i] +1 = ref[j]}|
'''
idx = np.argsort(ref)
ref_sorted = ref[idx]
est_sorted = est[idx]
# Find the break-points in ref_sorted
levels, positions, counts = np.unique(ref_sorted,
return_index=True,
return_counts=True)
positions = list(positions)
positions.append(len(ref_sorted))
index = collections.defaultdict(lambda: slice(0))
ref_map = collections.defaultdict(lambda: 0)
for level, cnt, start, end in zip(levels, counts,
positions[:-1], positions[1:]):
index[level] = slice(start, end)
ref_map[level] = cnt
# Now that we have values sorted, apply the inversion-counter to
# pairs of reference values
if transitive:
level_pairs = itertools.combinations(levels, 2)
else:
level_pairs = [(i, i+1) for i in levels]
level_pairs, lcounter = itertools.tee(level_pairs)
normalizer = float(sum([ref_map[i] * ref_map[j] for (i, j) in lcounter]))
if normalizer == 0:
return 0, 0.0
inversions = 0
for level_1, level_2 in level_pairs:
inversions += _count_inversions(est_sorted[index[level_1]],
est_sorted[index[level_2]])
return inversions, float(normalizer)
|
python
|
def _compare_frame_rankings(ref, est, transitive=False):
'''Compute the number of ranking disagreements in two lists.
Parameters
----------
ref : np.ndarray, shape=(n,)
est : np.ndarray, shape=(n,)
Reference and estimate ranked lists.
`ref[i]` is the relevance score for point `i`.
transitive : bool
If true, all pairs of reference levels are compared.
If false, only adjacent pairs of reference levels are compared.
Returns
-------
inversions : int
The number of pairs of indices `i, j` where
`ref[i] < ref[j]` but `est[i] >= est[j]`.
normalizer : float
The total number of pairs (i, j) under consideration.
If transitive=True, then this is |{(i,j) : ref[i] < ref[j]}|
If transitive=False, then this is |{i,j) : ref[i] +1 = ref[j]}|
'''
idx = np.argsort(ref)
ref_sorted = ref[idx]
est_sorted = est[idx]
# Find the break-points in ref_sorted
levels, positions, counts = np.unique(ref_sorted,
return_index=True,
return_counts=True)
positions = list(positions)
positions.append(len(ref_sorted))
index = collections.defaultdict(lambda: slice(0))
ref_map = collections.defaultdict(lambda: 0)
for level, cnt, start, end in zip(levels, counts,
positions[:-1], positions[1:]):
index[level] = slice(start, end)
ref_map[level] = cnt
# Now that we have values sorted, apply the inversion-counter to
# pairs of reference values
if transitive:
level_pairs = itertools.combinations(levels, 2)
else:
level_pairs = [(i, i+1) for i in levels]
level_pairs, lcounter = itertools.tee(level_pairs)
normalizer = float(sum([ref_map[i] * ref_map[j] for (i, j) in lcounter]))
if normalizer == 0:
return 0, 0.0
inversions = 0
for level_1, level_2 in level_pairs:
inversions += _count_inversions(est_sorted[index[level_1]],
est_sorted[index[level_2]])
return inversions, float(normalizer)
|
[
"def",
"_compare_frame_rankings",
"(",
"ref",
",",
"est",
",",
"transitive",
"=",
"False",
")",
":",
"idx",
"=",
"np",
".",
"argsort",
"(",
"ref",
")",
"ref_sorted",
"=",
"ref",
"[",
"idx",
"]",
"est_sorted",
"=",
"est",
"[",
"idx",
"]",
"# Find the break-points in ref_sorted",
"levels",
",",
"positions",
",",
"counts",
"=",
"np",
".",
"unique",
"(",
"ref_sorted",
",",
"return_index",
"=",
"True",
",",
"return_counts",
"=",
"True",
")",
"positions",
"=",
"list",
"(",
"positions",
")",
"positions",
".",
"append",
"(",
"len",
"(",
"ref_sorted",
")",
")",
"index",
"=",
"collections",
".",
"defaultdict",
"(",
"lambda",
":",
"slice",
"(",
"0",
")",
")",
"ref_map",
"=",
"collections",
".",
"defaultdict",
"(",
"lambda",
":",
"0",
")",
"for",
"level",
",",
"cnt",
",",
"start",
",",
"end",
"in",
"zip",
"(",
"levels",
",",
"counts",
",",
"positions",
"[",
":",
"-",
"1",
"]",
",",
"positions",
"[",
"1",
":",
"]",
")",
":",
"index",
"[",
"level",
"]",
"=",
"slice",
"(",
"start",
",",
"end",
")",
"ref_map",
"[",
"level",
"]",
"=",
"cnt",
"# Now that we have values sorted, apply the inversion-counter to",
"# pairs of reference values",
"if",
"transitive",
":",
"level_pairs",
"=",
"itertools",
".",
"combinations",
"(",
"levels",
",",
"2",
")",
"else",
":",
"level_pairs",
"=",
"[",
"(",
"i",
",",
"i",
"+",
"1",
")",
"for",
"i",
"in",
"levels",
"]",
"level_pairs",
",",
"lcounter",
"=",
"itertools",
".",
"tee",
"(",
"level_pairs",
")",
"normalizer",
"=",
"float",
"(",
"sum",
"(",
"[",
"ref_map",
"[",
"i",
"]",
"*",
"ref_map",
"[",
"j",
"]",
"for",
"(",
"i",
",",
"j",
")",
"in",
"lcounter",
"]",
")",
")",
"if",
"normalizer",
"==",
"0",
":",
"return",
"0",
",",
"0.0",
"inversions",
"=",
"0",
"for",
"level_1",
",",
"level_2",
"in",
"level_pairs",
":",
"inversions",
"+=",
"_count_inversions",
"(",
"est_sorted",
"[",
"index",
"[",
"level_1",
"]",
"]",
",",
"est_sorted",
"[",
"index",
"[",
"level_2",
"]",
"]",
")",
"return",
"inversions",
",",
"float",
"(",
"normalizer",
")"
] |
Compute the number of ranking disagreements in two lists.
Parameters
----------
ref : np.ndarray, shape=(n,)
est : np.ndarray, shape=(n,)
Reference and estimate ranked lists.
`ref[i]` is the relevance score for point `i`.
transitive : bool
If true, all pairs of reference levels are compared.
If false, only adjacent pairs of reference levels are compared.
Returns
-------
inversions : int
The number of pairs of indices `i, j` where
`ref[i] < ref[j]` but `est[i] >= est[j]`.
normalizer : float
The total number of pairs (i, j) under consideration.
If transitive=True, then this is |{(i,j) : ref[i] < ref[j]}|
If transitive=False, then this is |{i,j) : ref[i] +1 = ref[j]}|
|
[
"Compute",
"the",
"number",
"of",
"ranking",
"disagreements",
"in",
"two",
"lists",
"."
] |
f41c8dafaea04b411252a516d1965af43c7d531b
|
https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/hierarchy.py#L370-L436
|
train
|
craffel/mir_eval
|
mir_eval/hierarchy.py
|
validate_hier_intervals
|
def validate_hier_intervals(intervals_hier):
'''Validate a hierarchical segment annotation.
Parameters
----------
intervals_hier : ordered list of segmentations
Raises
------
ValueError
If any segmentation does not span the full duration of the top-level
segmentation.
If any segmentation does not start at 0.
'''
# Synthesize a label array for the top layer.
label_top = util.generate_labels(intervals_hier[0])
boundaries = set(util.intervals_to_boundaries(intervals_hier[0]))
for level, intervals in enumerate(intervals_hier[1:], 1):
# Make sure this level is consistent with the root
label_current = util.generate_labels(intervals)
validate_structure(intervals_hier[0], label_top,
intervals, label_current)
# Make sure all previous boundaries are accounted for
new_bounds = set(util.intervals_to_boundaries(intervals))
if boundaries - new_bounds:
warnings.warn('Segment hierarchy is inconsistent '
'at level {:d}'.format(level))
boundaries |= new_bounds
|
python
|
def validate_hier_intervals(intervals_hier):
'''Validate a hierarchical segment annotation.
Parameters
----------
intervals_hier : ordered list of segmentations
Raises
------
ValueError
If any segmentation does not span the full duration of the top-level
segmentation.
If any segmentation does not start at 0.
'''
# Synthesize a label array for the top layer.
label_top = util.generate_labels(intervals_hier[0])
boundaries = set(util.intervals_to_boundaries(intervals_hier[0]))
for level, intervals in enumerate(intervals_hier[1:], 1):
# Make sure this level is consistent with the root
label_current = util.generate_labels(intervals)
validate_structure(intervals_hier[0], label_top,
intervals, label_current)
# Make sure all previous boundaries are accounted for
new_bounds = set(util.intervals_to_boundaries(intervals))
if boundaries - new_bounds:
warnings.warn('Segment hierarchy is inconsistent '
'at level {:d}'.format(level))
boundaries |= new_bounds
|
[
"def",
"validate_hier_intervals",
"(",
"intervals_hier",
")",
":",
"# Synthesize a label array for the top layer.",
"label_top",
"=",
"util",
".",
"generate_labels",
"(",
"intervals_hier",
"[",
"0",
"]",
")",
"boundaries",
"=",
"set",
"(",
"util",
".",
"intervals_to_boundaries",
"(",
"intervals_hier",
"[",
"0",
"]",
")",
")",
"for",
"level",
",",
"intervals",
"in",
"enumerate",
"(",
"intervals_hier",
"[",
"1",
":",
"]",
",",
"1",
")",
":",
"# Make sure this level is consistent with the root",
"label_current",
"=",
"util",
".",
"generate_labels",
"(",
"intervals",
")",
"validate_structure",
"(",
"intervals_hier",
"[",
"0",
"]",
",",
"label_top",
",",
"intervals",
",",
"label_current",
")",
"# Make sure all previous boundaries are accounted for",
"new_bounds",
"=",
"set",
"(",
"util",
".",
"intervals_to_boundaries",
"(",
"intervals",
")",
")",
"if",
"boundaries",
"-",
"new_bounds",
":",
"warnings",
".",
"warn",
"(",
"'Segment hierarchy is inconsistent '",
"'at level {:d}'",
".",
"format",
"(",
"level",
")",
")",
"boundaries",
"|=",
"new_bounds"
] |
Validate a hierarchical segment annotation.
Parameters
----------
intervals_hier : ordered list of segmentations
Raises
------
ValueError
If any segmentation does not span the full duration of the top-level
segmentation.
If any segmentation does not start at 0.
|
[
"Validate",
"a",
"hierarchical",
"segment",
"annotation",
"."
] |
f41c8dafaea04b411252a516d1965af43c7d531b
|
https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/hierarchy.py#L439-L472
|
train
|
craffel/mir_eval
|
mir_eval/hierarchy.py
|
evaluate
|
def evaluate(ref_intervals_hier, ref_labels_hier,
est_intervals_hier, est_labels_hier, **kwargs):
'''Compute all hierarchical structure metrics for the given reference and
estimated annotations.
Examples
--------
A toy example with two two-layer annotations
>>> ref_i = [[[0, 30], [30, 60]], [[0, 15], [15, 30], [30, 45], [45, 60]]]
>>> est_i = [[[0, 45], [45, 60]], [[0, 15], [15, 30], [30, 45], [45, 60]]]
>>> ref_l = [ ['A', 'B'], ['a', 'b', 'a', 'c'] ]
>>> est_l = [ ['A', 'B'], ['a', 'a', 'b', 'b'] ]
>>> scores = mir_eval.hierarchy.evaluate(ref_i, ref_l, est_i, est_l)
>>> dict(scores)
{'T-Measure full': 0.94822745804853459,
'T-Measure reduced': 0.8732458222764804,
'T-Precision full': 0.96569179094693058,
'T-Precision reduced': 0.89939075137018787,
'T-Recall full': 0.93138358189386117,
'T-Recall reduced': 0.84857799953694923}
A more realistic example, using SALAMI pre-parsed annotations
>>> def load_salami(filename):
... "load SALAMI event format as labeled intervals"
... events, labels = mir_eval.io.load_labeled_events(filename)
... intervals = mir_eval.util.boundaries_to_intervals(events)[0]
... return intervals, labels[:len(intervals)]
>>> ref_files = ['data/10/parsed/textfile1_uppercase.txt',
... 'data/10/parsed/textfile1_lowercase.txt']
>>> est_files = ['data/10/parsed/textfile2_uppercase.txt',
... 'data/10/parsed/textfile2_lowercase.txt']
>>> ref = [load_salami(fname) for fname in ref_files]
>>> ref_int = [seg[0] for seg in ref]
>>> ref_lab = [seg[1] for seg in ref]
>>> est = [load_salami(fname) for fname in est_files]
>>> est_int = [seg[0] for seg in est]
>>> est_lab = [seg[1] for seg in est]
>>> scores = mir_eval.hierarchy.evaluate(ref_int, ref_lab,
... est_hier, est_lab)
>>> dict(scores)
{'T-Measure full': 0.66029225561405358,
'T-Measure reduced': 0.62001868041578034,
'T-Precision full': 0.66844764668949885,
'T-Precision reduced': 0.63252297209957919,
'T-Recall full': 0.6523334654992341,
'T-Recall reduced': 0.60799919710921635}
Parameters
----------
ref_intervals_hier : list of list-like
ref_labels_hier : list of list of str
est_intervals_hier : list of list-like
est_labels_hier : list of list of str
Hierarchical annotations are encoded as an ordered list
of segmentations. Each segmentation itself is a list (or list-like)
of intervals (\*_intervals_hier) and a list of lists of labels
(\*_labels_hier).
kwargs
additional keyword arguments to the evaluation metrics.
Returns
-------
scores : OrderedDict
Dictionary of scores, where the key is the metric name (str) and
the value is the (float) score achieved.
T-measures are computed in both the "full" (``transitive=True``) and
"reduced" (``transitive=False``) modes.
Raises
------
ValueError
Thrown when the provided annotations are not valid.
'''
# First, find the maximum length of the reference
_, t_end = _hierarchy_bounds(ref_intervals_hier)
# Pre-process the intervals to match the range of the reference,
# and start at 0
ref_intervals_hier, ref_labels_hier = _align_intervals(ref_intervals_hier,
ref_labels_hier,
t_min=0.0,
t_max=None)
est_intervals_hier, est_labels_hier = _align_intervals(est_intervals_hier,
est_labels_hier,
t_min=0.0,
t_max=t_end)
scores = collections.OrderedDict()
# Force the transitivity setting
kwargs['transitive'] = False
(scores['T-Precision reduced'],
scores['T-Recall reduced'],
scores['T-Measure reduced']) = util.filter_kwargs(tmeasure,
ref_intervals_hier,
est_intervals_hier,
**kwargs)
kwargs['transitive'] = True
(scores['T-Precision full'],
scores['T-Recall full'],
scores['T-Measure full']) = util.filter_kwargs(tmeasure,
ref_intervals_hier,
est_intervals_hier,
**kwargs)
(scores['L-Precision'],
scores['L-Recall'],
scores['L-Measure']) = util.filter_kwargs(lmeasure,
ref_intervals_hier,
ref_labels_hier,
est_intervals_hier,
est_labels_hier,
**kwargs)
return scores
|
python
|
def evaluate(ref_intervals_hier, ref_labels_hier,
est_intervals_hier, est_labels_hier, **kwargs):
'''Compute all hierarchical structure metrics for the given reference and
estimated annotations.
Examples
--------
A toy example with two two-layer annotations
>>> ref_i = [[[0, 30], [30, 60]], [[0, 15], [15, 30], [30, 45], [45, 60]]]
>>> est_i = [[[0, 45], [45, 60]], [[0, 15], [15, 30], [30, 45], [45, 60]]]
>>> ref_l = [ ['A', 'B'], ['a', 'b', 'a', 'c'] ]
>>> est_l = [ ['A', 'B'], ['a', 'a', 'b', 'b'] ]
>>> scores = mir_eval.hierarchy.evaluate(ref_i, ref_l, est_i, est_l)
>>> dict(scores)
{'T-Measure full': 0.94822745804853459,
'T-Measure reduced': 0.8732458222764804,
'T-Precision full': 0.96569179094693058,
'T-Precision reduced': 0.89939075137018787,
'T-Recall full': 0.93138358189386117,
'T-Recall reduced': 0.84857799953694923}
A more realistic example, using SALAMI pre-parsed annotations
>>> def load_salami(filename):
... "load SALAMI event format as labeled intervals"
... events, labels = mir_eval.io.load_labeled_events(filename)
... intervals = mir_eval.util.boundaries_to_intervals(events)[0]
... return intervals, labels[:len(intervals)]
>>> ref_files = ['data/10/parsed/textfile1_uppercase.txt',
... 'data/10/parsed/textfile1_lowercase.txt']
>>> est_files = ['data/10/parsed/textfile2_uppercase.txt',
... 'data/10/parsed/textfile2_lowercase.txt']
>>> ref = [load_salami(fname) for fname in ref_files]
>>> ref_int = [seg[0] for seg in ref]
>>> ref_lab = [seg[1] for seg in ref]
>>> est = [load_salami(fname) for fname in est_files]
>>> est_int = [seg[0] for seg in est]
>>> est_lab = [seg[1] for seg in est]
>>> scores = mir_eval.hierarchy.evaluate(ref_int, ref_lab,
... est_hier, est_lab)
>>> dict(scores)
{'T-Measure full': 0.66029225561405358,
'T-Measure reduced': 0.62001868041578034,
'T-Precision full': 0.66844764668949885,
'T-Precision reduced': 0.63252297209957919,
'T-Recall full': 0.6523334654992341,
'T-Recall reduced': 0.60799919710921635}
Parameters
----------
ref_intervals_hier : list of list-like
ref_labels_hier : list of list of str
est_intervals_hier : list of list-like
est_labels_hier : list of list of str
Hierarchical annotations are encoded as an ordered list
of segmentations. Each segmentation itself is a list (or list-like)
of intervals (\*_intervals_hier) and a list of lists of labels
(\*_labels_hier).
kwargs
additional keyword arguments to the evaluation metrics.
Returns
-------
scores : OrderedDict
Dictionary of scores, where the key is the metric name (str) and
the value is the (float) score achieved.
T-measures are computed in both the "full" (``transitive=True``) and
"reduced" (``transitive=False``) modes.
Raises
------
ValueError
Thrown when the provided annotations are not valid.
'''
# First, find the maximum length of the reference
_, t_end = _hierarchy_bounds(ref_intervals_hier)
# Pre-process the intervals to match the range of the reference,
# and start at 0
ref_intervals_hier, ref_labels_hier = _align_intervals(ref_intervals_hier,
ref_labels_hier,
t_min=0.0,
t_max=None)
est_intervals_hier, est_labels_hier = _align_intervals(est_intervals_hier,
est_labels_hier,
t_min=0.0,
t_max=t_end)
scores = collections.OrderedDict()
# Force the transitivity setting
kwargs['transitive'] = False
(scores['T-Precision reduced'],
scores['T-Recall reduced'],
scores['T-Measure reduced']) = util.filter_kwargs(tmeasure,
ref_intervals_hier,
est_intervals_hier,
**kwargs)
kwargs['transitive'] = True
(scores['T-Precision full'],
scores['T-Recall full'],
scores['T-Measure full']) = util.filter_kwargs(tmeasure,
ref_intervals_hier,
est_intervals_hier,
**kwargs)
(scores['L-Precision'],
scores['L-Recall'],
scores['L-Measure']) = util.filter_kwargs(lmeasure,
ref_intervals_hier,
ref_labels_hier,
est_intervals_hier,
est_labels_hier,
**kwargs)
return scores
|
[
"def",
"evaluate",
"(",
"ref_intervals_hier",
",",
"ref_labels_hier",
",",
"est_intervals_hier",
",",
"est_labels_hier",
",",
"*",
"*",
"kwargs",
")",
":",
"# First, find the maximum length of the reference",
"_",
",",
"t_end",
"=",
"_hierarchy_bounds",
"(",
"ref_intervals_hier",
")",
"# Pre-process the intervals to match the range of the reference,",
"# and start at 0",
"ref_intervals_hier",
",",
"ref_labels_hier",
"=",
"_align_intervals",
"(",
"ref_intervals_hier",
",",
"ref_labels_hier",
",",
"t_min",
"=",
"0.0",
",",
"t_max",
"=",
"None",
")",
"est_intervals_hier",
",",
"est_labels_hier",
"=",
"_align_intervals",
"(",
"est_intervals_hier",
",",
"est_labels_hier",
",",
"t_min",
"=",
"0.0",
",",
"t_max",
"=",
"t_end",
")",
"scores",
"=",
"collections",
".",
"OrderedDict",
"(",
")",
"# Force the transitivity setting",
"kwargs",
"[",
"'transitive'",
"]",
"=",
"False",
"(",
"scores",
"[",
"'T-Precision reduced'",
"]",
",",
"scores",
"[",
"'T-Recall reduced'",
"]",
",",
"scores",
"[",
"'T-Measure reduced'",
"]",
")",
"=",
"util",
".",
"filter_kwargs",
"(",
"tmeasure",
",",
"ref_intervals_hier",
",",
"est_intervals_hier",
",",
"*",
"*",
"kwargs",
")",
"kwargs",
"[",
"'transitive'",
"]",
"=",
"True",
"(",
"scores",
"[",
"'T-Precision full'",
"]",
",",
"scores",
"[",
"'T-Recall full'",
"]",
",",
"scores",
"[",
"'T-Measure full'",
"]",
")",
"=",
"util",
".",
"filter_kwargs",
"(",
"tmeasure",
",",
"ref_intervals_hier",
",",
"est_intervals_hier",
",",
"*",
"*",
"kwargs",
")",
"(",
"scores",
"[",
"'L-Precision'",
"]",
",",
"scores",
"[",
"'L-Recall'",
"]",
",",
"scores",
"[",
"'L-Measure'",
"]",
")",
"=",
"util",
".",
"filter_kwargs",
"(",
"lmeasure",
",",
"ref_intervals_hier",
",",
"ref_labels_hier",
",",
"est_intervals_hier",
",",
"est_labels_hier",
",",
"*",
"*",
"kwargs",
")",
"return",
"scores"
] |
Compute all hierarchical structure metrics for the given reference and
estimated annotations.
Examples
--------
A toy example with two two-layer annotations
>>> ref_i = [[[0, 30], [30, 60]], [[0, 15], [15, 30], [30, 45], [45, 60]]]
>>> est_i = [[[0, 45], [45, 60]], [[0, 15], [15, 30], [30, 45], [45, 60]]]
>>> ref_l = [ ['A', 'B'], ['a', 'b', 'a', 'c'] ]
>>> est_l = [ ['A', 'B'], ['a', 'a', 'b', 'b'] ]
>>> scores = mir_eval.hierarchy.evaluate(ref_i, ref_l, est_i, est_l)
>>> dict(scores)
{'T-Measure full': 0.94822745804853459,
'T-Measure reduced': 0.8732458222764804,
'T-Precision full': 0.96569179094693058,
'T-Precision reduced': 0.89939075137018787,
'T-Recall full': 0.93138358189386117,
'T-Recall reduced': 0.84857799953694923}
A more realistic example, using SALAMI pre-parsed annotations
>>> def load_salami(filename):
... "load SALAMI event format as labeled intervals"
... events, labels = mir_eval.io.load_labeled_events(filename)
... intervals = mir_eval.util.boundaries_to_intervals(events)[0]
... return intervals, labels[:len(intervals)]
>>> ref_files = ['data/10/parsed/textfile1_uppercase.txt',
... 'data/10/parsed/textfile1_lowercase.txt']
>>> est_files = ['data/10/parsed/textfile2_uppercase.txt',
... 'data/10/parsed/textfile2_lowercase.txt']
>>> ref = [load_salami(fname) for fname in ref_files]
>>> ref_int = [seg[0] for seg in ref]
>>> ref_lab = [seg[1] for seg in ref]
>>> est = [load_salami(fname) for fname in est_files]
>>> est_int = [seg[0] for seg in est]
>>> est_lab = [seg[1] for seg in est]
>>> scores = mir_eval.hierarchy.evaluate(ref_int, ref_lab,
... est_hier, est_lab)
>>> dict(scores)
{'T-Measure full': 0.66029225561405358,
'T-Measure reduced': 0.62001868041578034,
'T-Precision full': 0.66844764668949885,
'T-Precision reduced': 0.63252297209957919,
'T-Recall full': 0.6523334654992341,
'T-Recall reduced': 0.60799919710921635}
Parameters
----------
ref_intervals_hier : list of list-like
ref_labels_hier : list of list of str
est_intervals_hier : list of list-like
est_labels_hier : list of list of str
Hierarchical annotations are encoded as an ordered list
of segmentations. Each segmentation itself is a list (or list-like)
of intervals (\*_intervals_hier) and a list of lists of labels
(\*_labels_hier).
kwargs
additional keyword arguments to the evaluation metrics.
Returns
-------
scores : OrderedDict
Dictionary of scores, where the key is the metric name (str) and
the value is the (float) score achieved.
T-measures are computed in both the "full" (``transitive=True``) and
"reduced" (``transitive=False``) modes.
Raises
------
ValueError
Thrown when the provided annotations are not valid.
|
[
"Compute",
"all",
"hierarchical",
"structure",
"metrics",
"for",
"the",
"given",
"reference",
"and",
"estimated",
"annotations",
"."
] |
f41c8dafaea04b411252a516d1965af43c7d531b
|
https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/hierarchy.py#L630-L751
|
train
|
craffel/mir_eval
|
mir_eval/display.py
|
__expand_limits
|
def __expand_limits(ax, limits, which='x'):
'''Helper function to expand axis limits'''
if which == 'x':
getter, setter = ax.get_xlim, ax.set_xlim
elif which == 'y':
getter, setter = ax.get_ylim, ax.set_ylim
else:
raise ValueError('invalid axis: {}'.format(which))
old_lims = getter()
new_lims = list(limits)
# infinite limits occur on new axis objects with no data
if np.isfinite(old_lims[0]):
new_lims[0] = min(old_lims[0], limits[0])
if np.isfinite(old_lims[1]):
new_lims[1] = max(old_lims[1], limits[1])
setter(new_lims)
|
python
|
def __expand_limits(ax, limits, which='x'):
'''Helper function to expand axis limits'''
if which == 'x':
getter, setter = ax.get_xlim, ax.set_xlim
elif which == 'y':
getter, setter = ax.get_ylim, ax.set_ylim
else:
raise ValueError('invalid axis: {}'.format(which))
old_lims = getter()
new_lims = list(limits)
# infinite limits occur on new axis objects with no data
if np.isfinite(old_lims[0]):
new_lims[0] = min(old_lims[0], limits[0])
if np.isfinite(old_lims[1]):
new_lims[1] = max(old_lims[1], limits[1])
setter(new_lims)
|
[
"def",
"__expand_limits",
"(",
"ax",
",",
"limits",
",",
"which",
"=",
"'x'",
")",
":",
"if",
"which",
"==",
"'x'",
":",
"getter",
",",
"setter",
"=",
"ax",
".",
"get_xlim",
",",
"ax",
".",
"set_xlim",
"elif",
"which",
"==",
"'y'",
":",
"getter",
",",
"setter",
"=",
"ax",
".",
"get_ylim",
",",
"ax",
".",
"set_ylim",
"else",
":",
"raise",
"ValueError",
"(",
"'invalid axis: {}'",
".",
"format",
"(",
"which",
")",
")",
"old_lims",
"=",
"getter",
"(",
")",
"new_lims",
"=",
"list",
"(",
"limits",
")",
"# infinite limits occur on new axis objects with no data",
"if",
"np",
".",
"isfinite",
"(",
"old_lims",
"[",
"0",
"]",
")",
":",
"new_lims",
"[",
"0",
"]",
"=",
"min",
"(",
"old_lims",
"[",
"0",
"]",
",",
"limits",
"[",
"0",
"]",
")",
"if",
"np",
".",
"isfinite",
"(",
"old_lims",
"[",
"1",
"]",
")",
":",
"new_lims",
"[",
"1",
"]",
"=",
"max",
"(",
"old_lims",
"[",
"1",
"]",
",",
"limits",
"[",
"1",
"]",
")",
"setter",
"(",
"new_lims",
")"
] |
Helper function to expand axis limits
|
[
"Helper",
"function",
"to",
"expand",
"axis",
"limits"
] |
f41c8dafaea04b411252a516d1965af43c7d531b
|
https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/display.py#L19-L39
|
train
|
craffel/mir_eval
|
mir_eval/display.py
|
__get_axes
|
def __get_axes(ax=None, fig=None):
'''Get or construct the target axes object for a new plot.
Parameters
----------
ax : matplotlib.pyplot.axes, optional
If provided, return this axes object directly.
fig : matplotlib.figure.Figure, optional
The figure to query for axes.
By default, uses the current figure `plt.gcf()`.
Returns
-------
ax : matplotlib.pyplot.axes
An axis handle on which to draw the segmentation.
If none is provided, a new set of axes is created.
new_axes : bool
If `True`, the axis object was newly constructed.
If `False`, the axis object already existed.
'''
new_axes = False
if ax is not None:
return ax, new_axes
if fig is None:
import matplotlib.pyplot as plt
fig = plt.gcf()
if not fig.get_axes():
new_axes = True
return fig.gca(), new_axes
|
python
|
def __get_axes(ax=None, fig=None):
'''Get or construct the target axes object for a new plot.
Parameters
----------
ax : matplotlib.pyplot.axes, optional
If provided, return this axes object directly.
fig : matplotlib.figure.Figure, optional
The figure to query for axes.
By default, uses the current figure `plt.gcf()`.
Returns
-------
ax : matplotlib.pyplot.axes
An axis handle on which to draw the segmentation.
If none is provided, a new set of axes is created.
new_axes : bool
If `True`, the axis object was newly constructed.
If `False`, the axis object already existed.
'''
new_axes = False
if ax is not None:
return ax, new_axes
if fig is None:
import matplotlib.pyplot as plt
fig = plt.gcf()
if not fig.get_axes():
new_axes = True
return fig.gca(), new_axes
|
[
"def",
"__get_axes",
"(",
"ax",
"=",
"None",
",",
"fig",
"=",
"None",
")",
":",
"new_axes",
"=",
"False",
"if",
"ax",
"is",
"not",
"None",
":",
"return",
"ax",
",",
"new_axes",
"if",
"fig",
"is",
"None",
":",
"import",
"matplotlib",
".",
"pyplot",
"as",
"plt",
"fig",
"=",
"plt",
".",
"gcf",
"(",
")",
"if",
"not",
"fig",
".",
"get_axes",
"(",
")",
":",
"new_axes",
"=",
"True",
"return",
"fig",
".",
"gca",
"(",
")",
",",
"new_axes"
] |
Get or construct the target axes object for a new plot.
Parameters
----------
ax : matplotlib.pyplot.axes, optional
If provided, return this axes object directly.
fig : matplotlib.figure.Figure, optional
The figure to query for axes.
By default, uses the current figure `plt.gcf()`.
Returns
-------
ax : matplotlib.pyplot.axes
An axis handle on which to draw the segmentation.
If none is provided, a new set of axes is created.
new_axes : bool
If `True`, the axis object was newly constructed.
If `False`, the axis object already existed.
|
[
"Get",
"or",
"construct",
"the",
"target",
"axes",
"object",
"for",
"a",
"new",
"plot",
"."
] |
f41c8dafaea04b411252a516d1965af43c7d531b
|
https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/display.py#L42-L79
|
train
|
craffel/mir_eval
|
mir_eval/display.py
|
segments
|
def segments(intervals, labels, base=None, height=None, text=False,
text_kw=None, ax=None, **kwargs):
'''Plot a segmentation as a set of disjoint rectangles.
Parameters
----------
intervals : np.ndarray, shape=(n, 2)
segment intervals, in the format returned by
:func:`mir_eval.io.load_intervals` or
:func:`mir_eval.io.load_labeled_intervals`.
labels : list, shape=(n,)
reference segment labels, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
base : number
The vertical position of the base of the rectangles.
By default, this will be the bottom of the plot.
height : number
The height of the rectangles.
By default, this will be the top of the plot (minus ``base``).
text : bool
If true, each segment's label is displayed in its
upper-left corner
text_kw : dict
If ``text == True``, the properties of the text
object can be specified here.
See ``matplotlib.pyplot.Text`` for valid parameters
ax : matplotlib.pyplot.axes
An axis handle on which to draw the segmentation.
If none is provided, a new set of axes is created.
kwargs
Additional keyword arguments to pass to
``matplotlib.patches.Rectangle``.
Returns
-------
ax : matplotlib.pyplot.axes._subplots.AxesSubplot
A handle to the (possibly constructed) plot axes
'''
if text_kw is None:
text_kw = dict()
text_kw.setdefault('va', 'top')
text_kw.setdefault('clip_on', True)
text_kw.setdefault('bbox', dict(boxstyle='round', facecolor='white'))
# Make sure we have a numpy array
intervals = np.atleast_2d(intervals)
seg_def_style = dict(linewidth=1)
ax, new_axes = __get_axes(ax=ax)
if new_axes:
ax.set_ylim([0, 1])
# Infer height
if base is None:
base = ax.get_ylim()[0]
if height is None:
height = ax.get_ylim()[1]
cycler = ax._get_patches_for_fill.prop_cycler
seg_map = dict()
for lab in labels:
if lab in seg_map:
continue
style = next(cycler)
seg_map[lab] = seg_def_style.copy()
seg_map[lab].update(style)
# Swap color -> facecolor here so we preserve edgecolor on rects
seg_map[lab]['facecolor'] = seg_map[lab].pop('color')
seg_map[lab].update(kwargs)
seg_map[lab]['label'] = lab
for ival, lab in zip(intervals, labels):
rect = Rectangle((ival[0], base), ival[1] - ival[0], height,
**seg_map[lab])
ax.add_patch(rect)
seg_map[lab].pop('label', None)
if text:
ann = ax.annotate(lab,
xy=(ival[0], height), xycoords='data',
xytext=(8, -10), textcoords='offset points',
**text_kw)
ann.set_clip_path(rect)
if new_axes:
ax.set_yticks([])
# Only expand if we have data
if intervals.size:
__expand_limits(ax, [intervals.min(), intervals.max()], which='x')
return ax
|
python
|
def segments(intervals, labels, base=None, height=None, text=False,
text_kw=None, ax=None, **kwargs):
'''Plot a segmentation as a set of disjoint rectangles.
Parameters
----------
intervals : np.ndarray, shape=(n, 2)
segment intervals, in the format returned by
:func:`mir_eval.io.load_intervals` or
:func:`mir_eval.io.load_labeled_intervals`.
labels : list, shape=(n,)
reference segment labels, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
base : number
The vertical position of the base of the rectangles.
By default, this will be the bottom of the plot.
height : number
The height of the rectangles.
By default, this will be the top of the plot (minus ``base``).
text : bool
If true, each segment's label is displayed in its
upper-left corner
text_kw : dict
If ``text == True``, the properties of the text
object can be specified here.
See ``matplotlib.pyplot.Text`` for valid parameters
ax : matplotlib.pyplot.axes
An axis handle on which to draw the segmentation.
If none is provided, a new set of axes is created.
kwargs
Additional keyword arguments to pass to
``matplotlib.patches.Rectangle``.
Returns
-------
ax : matplotlib.pyplot.axes._subplots.AxesSubplot
A handle to the (possibly constructed) plot axes
'''
if text_kw is None:
text_kw = dict()
text_kw.setdefault('va', 'top')
text_kw.setdefault('clip_on', True)
text_kw.setdefault('bbox', dict(boxstyle='round', facecolor='white'))
# Make sure we have a numpy array
intervals = np.atleast_2d(intervals)
seg_def_style = dict(linewidth=1)
ax, new_axes = __get_axes(ax=ax)
if new_axes:
ax.set_ylim([0, 1])
# Infer height
if base is None:
base = ax.get_ylim()[0]
if height is None:
height = ax.get_ylim()[1]
cycler = ax._get_patches_for_fill.prop_cycler
seg_map = dict()
for lab in labels:
if lab in seg_map:
continue
style = next(cycler)
seg_map[lab] = seg_def_style.copy()
seg_map[lab].update(style)
# Swap color -> facecolor here so we preserve edgecolor on rects
seg_map[lab]['facecolor'] = seg_map[lab].pop('color')
seg_map[lab].update(kwargs)
seg_map[lab]['label'] = lab
for ival, lab in zip(intervals, labels):
rect = Rectangle((ival[0], base), ival[1] - ival[0], height,
**seg_map[lab])
ax.add_patch(rect)
seg_map[lab].pop('label', None)
if text:
ann = ax.annotate(lab,
xy=(ival[0], height), xycoords='data',
xytext=(8, -10), textcoords='offset points',
**text_kw)
ann.set_clip_path(rect)
if new_axes:
ax.set_yticks([])
# Only expand if we have data
if intervals.size:
__expand_limits(ax, [intervals.min(), intervals.max()], which='x')
return ax
|
[
"def",
"segments",
"(",
"intervals",
",",
"labels",
",",
"base",
"=",
"None",
",",
"height",
"=",
"None",
",",
"text",
"=",
"False",
",",
"text_kw",
"=",
"None",
",",
"ax",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"text_kw",
"is",
"None",
":",
"text_kw",
"=",
"dict",
"(",
")",
"text_kw",
".",
"setdefault",
"(",
"'va'",
",",
"'top'",
")",
"text_kw",
".",
"setdefault",
"(",
"'clip_on'",
",",
"True",
")",
"text_kw",
".",
"setdefault",
"(",
"'bbox'",
",",
"dict",
"(",
"boxstyle",
"=",
"'round'",
",",
"facecolor",
"=",
"'white'",
")",
")",
"# Make sure we have a numpy array",
"intervals",
"=",
"np",
".",
"atleast_2d",
"(",
"intervals",
")",
"seg_def_style",
"=",
"dict",
"(",
"linewidth",
"=",
"1",
")",
"ax",
",",
"new_axes",
"=",
"__get_axes",
"(",
"ax",
"=",
"ax",
")",
"if",
"new_axes",
":",
"ax",
".",
"set_ylim",
"(",
"[",
"0",
",",
"1",
"]",
")",
"# Infer height",
"if",
"base",
"is",
"None",
":",
"base",
"=",
"ax",
".",
"get_ylim",
"(",
")",
"[",
"0",
"]",
"if",
"height",
"is",
"None",
":",
"height",
"=",
"ax",
".",
"get_ylim",
"(",
")",
"[",
"1",
"]",
"cycler",
"=",
"ax",
".",
"_get_patches_for_fill",
".",
"prop_cycler",
"seg_map",
"=",
"dict",
"(",
")",
"for",
"lab",
"in",
"labels",
":",
"if",
"lab",
"in",
"seg_map",
":",
"continue",
"style",
"=",
"next",
"(",
"cycler",
")",
"seg_map",
"[",
"lab",
"]",
"=",
"seg_def_style",
".",
"copy",
"(",
")",
"seg_map",
"[",
"lab",
"]",
".",
"update",
"(",
"style",
")",
"# Swap color -> facecolor here so we preserve edgecolor on rects",
"seg_map",
"[",
"lab",
"]",
"[",
"'facecolor'",
"]",
"=",
"seg_map",
"[",
"lab",
"]",
".",
"pop",
"(",
"'color'",
")",
"seg_map",
"[",
"lab",
"]",
".",
"update",
"(",
"kwargs",
")",
"seg_map",
"[",
"lab",
"]",
"[",
"'label'",
"]",
"=",
"lab",
"for",
"ival",
",",
"lab",
"in",
"zip",
"(",
"intervals",
",",
"labels",
")",
":",
"rect",
"=",
"Rectangle",
"(",
"(",
"ival",
"[",
"0",
"]",
",",
"base",
")",
",",
"ival",
"[",
"1",
"]",
"-",
"ival",
"[",
"0",
"]",
",",
"height",
",",
"*",
"*",
"seg_map",
"[",
"lab",
"]",
")",
"ax",
".",
"add_patch",
"(",
"rect",
")",
"seg_map",
"[",
"lab",
"]",
".",
"pop",
"(",
"'label'",
",",
"None",
")",
"if",
"text",
":",
"ann",
"=",
"ax",
".",
"annotate",
"(",
"lab",
",",
"xy",
"=",
"(",
"ival",
"[",
"0",
"]",
",",
"height",
")",
",",
"xycoords",
"=",
"'data'",
",",
"xytext",
"=",
"(",
"8",
",",
"-",
"10",
")",
",",
"textcoords",
"=",
"'offset points'",
",",
"*",
"*",
"text_kw",
")",
"ann",
".",
"set_clip_path",
"(",
"rect",
")",
"if",
"new_axes",
":",
"ax",
".",
"set_yticks",
"(",
"[",
"]",
")",
"# Only expand if we have data",
"if",
"intervals",
".",
"size",
":",
"__expand_limits",
"(",
"ax",
",",
"[",
"intervals",
".",
"min",
"(",
")",
",",
"intervals",
".",
"max",
"(",
")",
"]",
",",
"which",
"=",
"'x'",
")",
"return",
"ax"
] |
Plot a segmentation as a set of disjoint rectangles.
Parameters
----------
intervals : np.ndarray, shape=(n, 2)
segment intervals, in the format returned by
:func:`mir_eval.io.load_intervals` or
:func:`mir_eval.io.load_labeled_intervals`.
labels : list, shape=(n,)
reference segment labels, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
base : number
The vertical position of the base of the rectangles.
By default, this will be the bottom of the plot.
height : number
The height of the rectangles.
By default, this will be the top of the plot (minus ``base``).
text : bool
If true, each segment's label is displayed in its
upper-left corner
text_kw : dict
If ``text == True``, the properties of the text
object can be specified here.
See ``matplotlib.pyplot.Text`` for valid parameters
ax : matplotlib.pyplot.axes
An axis handle on which to draw the segmentation.
If none is provided, a new set of axes is created.
kwargs
Additional keyword arguments to pass to
``matplotlib.patches.Rectangle``.
Returns
-------
ax : matplotlib.pyplot.axes._subplots.AxesSubplot
A handle to the (possibly constructed) plot axes
|
[
"Plot",
"a",
"segmentation",
"as",
"a",
"set",
"of",
"disjoint",
"rectangles",
"."
] |
f41c8dafaea04b411252a516d1965af43c7d531b
|
https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/display.py#L82-L186
|
train
|
craffel/mir_eval
|
mir_eval/display.py
|
labeled_intervals
|
def labeled_intervals(intervals, labels, label_set=None,
base=None, height=None, extend_labels=True,
ax=None, tick=True, **kwargs):
'''Plot labeled intervals with each label on its own row.
Parameters
----------
intervals : np.ndarray, shape=(n, 2)
segment intervals, in the format returned by
:func:`mir_eval.io.load_intervals` or
:func:`mir_eval.io.load_labeled_intervals`.
labels : list, shape=(n,)
reference segment labels, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
label_set : list
An (ordered) list of labels to determine the plotting order.
If not provided, the labels will be inferred from
``ax.get_yticklabels()``.
If no ``yticklabels`` exist, then the sorted set of unique values
in ``labels`` is taken as the label set.
base : np.ndarray, shape=(n,), optional
Vertical positions of each label.
By default, labels are positioned at integers
``np.arange(len(labels))``.
height : scalar or np.ndarray, shape=(n,), optional
Height for each label.
If scalar, the same value is applied to all labels.
By default, each label has ``height=1``.
extend_labels : bool
If ``False``, only values of ``labels`` that also exist in
``label_set`` will be shown.
If ``True``, all labels are shown, with those in `labels` but
not in `label_set` appended to the top of the plot.
A horizontal line is drawn to indicate the separation between
values in or out of ``label_set``.
ax : matplotlib.pyplot.axes
An axis handle on which to draw the intervals.
If none is provided, a new set of axes is created.
tick : bool
If ``True``, sets tick positions and labels on the y-axis.
kwargs
Additional keyword arguments to pass to
`matplotlib.collection.BrokenBarHCollection`.
Returns
-------
ax : matplotlib.pyplot.axes._subplots.AxesSubplot
A handle to the (possibly constructed) plot axes
'''
# Get the axes handle
ax, _ = __get_axes(ax=ax)
# Make sure we have a numpy array
intervals = np.atleast_2d(intervals)
if label_set is None:
# If we have non-empty pre-existing tick labels, use them
label_set = [_.get_text() for _ in ax.get_yticklabels()]
# If none of the label strings have content, treat it as empty
if not any(label_set):
label_set = []
else:
label_set = list(label_set)
# Put additional labels at the end, in order
if extend_labels:
ticks = label_set + sorted(set(labels) - set(label_set))
elif label_set:
ticks = label_set
else:
ticks = sorted(set(labels))
style = dict(linewidth=1)
style.update(next(ax._get_patches_for_fill.prop_cycler))
# Swap color -> facecolor here so we preserve edgecolor on rects
style['facecolor'] = style.pop('color')
style.update(kwargs)
if base is None:
base = np.arange(len(ticks))
if height is None:
height = 1
if np.isscalar(height):
height = height * np.ones_like(base)
seg_y = dict()
for ybase, yheight, lab in zip(base, height, ticks):
seg_y[lab] = (ybase, yheight)
xvals = defaultdict(list)
for ival, lab in zip(intervals, labels):
if lab not in seg_y:
continue
xvals[lab].append((ival[0], ival[1] - ival[0]))
for lab in seg_y:
ax.add_collection(BrokenBarHCollection(xvals[lab], seg_y[lab],
**style))
# Pop the label after the first time we see it, so we only get
# one legend entry
style.pop('label', None)
# Draw a line separating the new labels from pre-existing labels
if label_set != ticks:
ax.axhline(len(label_set), color='k', alpha=0.5)
if tick:
ax.grid(True, axis='y')
ax.set_yticks([])
ax.set_yticks(base)
ax.set_yticklabels(ticks, va='bottom')
ax.yaxis.set_major_formatter(IntervalFormatter(base, ticks))
if base.size:
__expand_limits(ax, [base.min(), (base + height).max()], which='y')
if intervals.size:
__expand_limits(ax, [intervals.min(), intervals.max()], which='x')
return ax
|
python
|
def labeled_intervals(intervals, labels, label_set=None,
base=None, height=None, extend_labels=True,
ax=None, tick=True, **kwargs):
'''Plot labeled intervals with each label on its own row.
Parameters
----------
intervals : np.ndarray, shape=(n, 2)
segment intervals, in the format returned by
:func:`mir_eval.io.load_intervals` or
:func:`mir_eval.io.load_labeled_intervals`.
labels : list, shape=(n,)
reference segment labels, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
label_set : list
An (ordered) list of labels to determine the plotting order.
If not provided, the labels will be inferred from
``ax.get_yticklabels()``.
If no ``yticklabels`` exist, then the sorted set of unique values
in ``labels`` is taken as the label set.
base : np.ndarray, shape=(n,), optional
Vertical positions of each label.
By default, labels are positioned at integers
``np.arange(len(labels))``.
height : scalar or np.ndarray, shape=(n,), optional
Height for each label.
If scalar, the same value is applied to all labels.
By default, each label has ``height=1``.
extend_labels : bool
If ``False``, only values of ``labels`` that also exist in
``label_set`` will be shown.
If ``True``, all labels are shown, with those in `labels` but
not in `label_set` appended to the top of the plot.
A horizontal line is drawn to indicate the separation between
values in or out of ``label_set``.
ax : matplotlib.pyplot.axes
An axis handle on which to draw the intervals.
If none is provided, a new set of axes is created.
tick : bool
If ``True``, sets tick positions and labels on the y-axis.
kwargs
Additional keyword arguments to pass to
`matplotlib.collection.BrokenBarHCollection`.
Returns
-------
ax : matplotlib.pyplot.axes._subplots.AxesSubplot
A handle to the (possibly constructed) plot axes
'''
# Get the axes handle
ax, _ = __get_axes(ax=ax)
# Make sure we have a numpy array
intervals = np.atleast_2d(intervals)
if label_set is None:
# If we have non-empty pre-existing tick labels, use them
label_set = [_.get_text() for _ in ax.get_yticklabels()]
# If none of the label strings have content, treat it as empty
if not any(label_set):
label_set = []
else:
label_set = list(label_set)
# Put additional labels at the end, in order
if extend_labels:
ticks = label_set + sorted(set(labels) - set(label_set))
elif label_set:
ticks = label_set
else:
ticks = sorted(set(labels))
style = dict(linewidth=1)
style.update(next(ax._get_patches_for_fill.prop_cycler))
# Swap color -> facecolor here so we preserve edgecolor on rects
style['facecolor'] = style.pop('color')
style.update(kwargs)
if base is None:
base = np.arange(len(ticks))
if height is None:
height = 1
if np.isscalar(height):
height = height * np.ones_like(base)
seg_y = dict()
for ybase, yheight, lab in zip(base, height, ticks):
seg_y[lab] = (ybase, yheight)
xvals = defaultdict(list)
for ival, lab in zip(intervals, labels):
if lab not in seg_y:
continue
xvals[lab].append((ival[0], ival[1] - ival[0]))
for lab in seg_y:
ax.add_collection(BrokenBarHCollection(xvals[lab], seg_y[lab],
**style))
# Pop the label after the first time we see it, so we only get
# one legend entry
style.pop('label', None)
# Draw a line separating the new labels from pre-existing labels
if label_set != ticks:
ax.axhline(len(label_set), color='k', alpha=0.5)
if tick:
ax.grid(True, axis='y')
ax.set_yticks([])
ax.set_yticks(base)
ax.set_yticklabels(ticks, va='bottom')
ax.yaxis.set_major_formatter(IntervalFormatter(base, ticks))
if base.size:
__expand_limits(ax, [base.min(), (base + height).max()], which='y')
if intervals.size:
__expand_limits(ax, [intervals.min(), intervals.max()], which='x')
return ax
|
[
"def",
"labeled_intervals",
"(",
"intervals",
",",
"labels",
",",
"label_set",
"=",
"None",
",",
"base",
"=",
"None",
",",
"height",
"=",
"None",
",",
"extend_labels",
"=",
"True",
",",
"ax",
"=",
"None",
",",
"tick",
"=",
"True",
",",
"*",
"*",
"kwargs",
")",
":",
"# Get the axes handle",
"ax",
",",
"_",
"=",
"__get_axes",
"(",
"ax",
"=",
"ax",
")",
"# Make sure we have a numpy array",
"intervals",
"=",
"np",
".",
"atleast_2d",
"(",
"intervals",
")",
"if",
"label_set",
"is",
"None",
":",
"# If we have non-empty pre-existing tick labels, use them",
"label_set",
"=",
"[",
"_",
".",
"get_text",
"(",
")",
"for",
"_",
"in",
"ax",
".",
"get_yticklabels",
"(",
")",
"]",
"# If none of the label strings have content, treat it as empty",
"if",
"not",
"any",
"(",
"label_set",
")",
":",
"label_set",
"=",
"[",
"]",
"else",
":",
"label_set",
"=",
"list",
"(",
"label_set",
")",
"# Put additional labels at the end, in order",
"if",
"extend_labels",
":",
"ticks",
"=",
"label_set",
"+",
"sorted",
"(",
"set",
"(",
"labels",
")",
"-",
"set",
"(",
"label_set",
")",
")",
"elif",
"label_set",
":",
"ticks",
"=",
"label_set",
"else",
":",
"ticks",
"=",
"sorted",
"(",
"set",
"(",
"labels",
")",
")",
"style",
"=",
"dict",
"(",
"linewidth",
"=",
"1",
")",
"style",
".",
"update",
"(",
"next",
"(",
"ax",
".",
"_get_patches_for_fill",
".",
"prop_cycler",
")",
")",
"# Swap color -> facecolor here so we preserve edgecolor on rects",
"style",
"[",
"'facecolor'",
"]",
"=",
"style",
".",
"pop",
"(",
"'color'",
")",
"style",
".",
"update",
"(",
"kwargs",
")",
"if",
"base",
"is",
"None",
":",
"base",
"=",
"np",
".",
"arange",
"(",
"len",
"(",
"ticks",
")",
")",
"if",
"height",
"is",
"None",
":",
"height",
"=",
"1",
"if",
"np",
".",
"isscalar",
"(",
"height",
")",
":",
"height",
"=",
"height",
"*",
"np",
".",
"ones_like",
"(",
"base",
")",
"seg_y",
"=",
"dict",
"(",
")",
"for",
"ybase",
",",
"yheight",
",",
"lab",
"in",
"zip",
"(",
"base",
",",
"height",
",",
"ticks",
")",
":",
"seg_y",
"[",
"lab",
"]",
"=",
"(",
"ybase",
",",
"yheight",
")",
"xvals",
"=",
"defaultdict",
"(",
"list",
")",
"for",
"ival",
",",
"lab",
"in",
"zip",
"(",
"intervals",
",",
"labels",
")",
":",
"if",
"lab",
"not",
"in",
"seg_y",
":",
"continue",
"xvals",
"[",
"lab",
"]",
".",
"append",
"(",
"(",
"ival",
"[",
"0",
"]",
",",
"ival",
"[",
"1",
"]",
"-",
"ival",
"[",
"0",
"]",
")",
")",
"for",
"lab",
"in",
"seg_y",
":",
"ax",
".",
"add_collection",
"(",
"BrokenBarHCollection",
"(",
"xvals",
"[",
"lab",
"]",
",",
"seg_y",
"[",
"lab",
"]",
",",
"*",
"*",
"style",
")",
")",
"# Pop the label after the first time we see it, so we only get",
"# one legend entry",
"style",
".",
"pop",
"(",
"'label'",
",",
"None",
")",
"# Draw a line separating the new labels from pre-existing labels",
"if",
"label_set",
"!=",
"ticks",
":",
"ax",
".",
"axhline",
"(",
"len",
"(",
"label_set",
")",
",",
"color",
"=",
"'k'",
",",
"alpha",
"=",
"0.5",
")",
"if",
"tick",
":",
"ax",
".",
"grid",
"(",
"True",
",",
"axis",
"=",
"'y'",
")",
"ax",
".",
"set_yticks",
"(",
"[",
"]",
")",
"ax",
".",
"set_yticks",
"(",
"base",
")",
"ax",
".",
"set_yticklabels",
"(",
"ticks",
",",
"va",
"=",
"'bottom'",
")",
"ax",
".",
"yaxis",
".",
"set_major_formatter",
"(",
"IntervalFormatter",
"(",
"base",
",",
"ticks",
")",
")",
"if",
"base",
".",
"size",
":",
"__expand_limits",
"(",
"ax",
",",
"[",
"base",
".",
"min",
"(",
")",
",",
"(",
"base",
"+",
"height",
")",
".",
"max",
"(",
")",
"]",
",",
"which",
"=",
"'y'",
")",
"if",
"intervals",
".",
"size",
":",
"__expand_limits",
"(",
"ax",
",",
"[",
"intervals",
".",
"min",
"(",
")",
",",
"intervals",
".",
"max",
"(",
")",
"]",
",",
"which",
"=",
"'x'",
")",
"return",
"ax"
] |
Plot labeled intervals with each label on its own row.
Parameters
----------
intervals : np.ndarray, shape=(n, 2)
segment intervals, in the format returned by
:func:`mir_eval.io.load_intervals` or
:func:`mir_eval.io.load_labeled_intervals`.
labels : list, shape=(n,)
reference segment labels, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
label_set : list
An (ordered) list of labels to determine the plotting order.
If not provided, the labels will be inferred from
``ax.get_yticklabels()``.
If no ``yticklabels`` exist, then the sorted set of unique values
in ``labels`` is taken as the label set.
base : np.ndarray, shape=(n,), optional
Vertical positions of each label.
By default, labels are positioned at integers
``np.arange(len(labels))``.
height : scalar or np.ndarray, shape=(n,), optional
Height for each label.
If scalar, the same value is applied to all labels.
By default, each label has ``height=1``.
extend_labels : bool
If ``False``, only values of ``labels`` that also exist in
``label_set`` will be shown.
If ``True``, all labels are shown, with those in `labels` but
not in `label_set` appended to the top of the plot.
A horizontal line is drawn to indicate the separation between
values in or out of ``label_set``.
ax : matplotlib.pyplot.axes
An axis handle on which to draw the intervals.
If none is provided, a new set of axes is created.
tick : bool
If ``True``, sets tick positions and labels on the y-axis.
kwargs
Additional keyword arguments to pass to
`matplotlib.collection.BrokenBarHCollection`.
Returns
-------
ax : matplotlib.pyplot.axes._subplots.AxesSubplot
A handle to the (possibly constructed) plot axes
|
[
"Plot",
"labeled",
"intervals",
"with",
"each",
"label",
"on",
"its",
"own",
"row",
"."
] |
f41c8dafaea04b411252a516d1965af43c7d531b
|
https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/display.py#L189-L320
|
train
|
craffel/mir_eval
|
mir_eval/display.py
|
hierarchy
|
def hierarchy(intervals_hier, labels_hier, levels=None, ax=None, **kwargs):
'''Plot a hierarchical segmentation
Parameters
----------
intervals_hier : list of np.ndarray
A list of segmentation intervals. Each element should be
an n-by-2 array of segment intervals, in the format returned by
:func:`mir_eval.io.load_intervals` or
:func:`mir_eval.io.load_labeled_intervals`.
Segmentations should be ordered by increasing specificity.
labels_hier : list of list-like
A list of segmentation labels. Each element should
be a list of labels for the corresponding element in
`intervals_hier`.
levels : list of string
Each element ``levels[i]`` is a label for the ```i`` th segmentation.
This is used in the legend to denote the levels in a segment hierarchy.
kwargs
Additional keyword arguments to `labeled_intervals`.
Returns
-------
ax : matplotlib.pyplot.axes._subplots.AxesSubplot
A handle to the (possibly constructed) plot axes
'''
# This will break if a segment label exists in multiple levels
if levels is None:
levels = list(range(len(intervals_hier)))
# Get the axes handle
ax, _ = __get_axes(ax=ax)
# Count the pre-existing patches
n_patches = len(ax.patches)
for ints, labs, key in zip(intervals_hier[::-1],
labels_hier[::-1],
levels[::-1]):
labeled_intervals(ints, labs, label=key, ax=ax, **kwargs)
# Reverse the patch ordering for anything we've added.
# This way, intervals are listed in the legend from top to bottom
ax.patches[n_patches:] = ax.patches[n_patches:][::-1]
return ax
|
python
|
def hierarchy(intervals_hier, labels_hier, levels=None, ax=None, **kwargs):
'''Plot a hierarchical segmentation
Parameters
----------
intervals_hier : list of np.ndarray
A list of segmentation intervals. Each element should be
an n-by-2 array of segment intervals, in the format returned by
:func:`mir_eval.io.load_intervals` or
:func:`mir_eval.io.load_labeled_intervals`.
Segmentations should be ordered by increasing specificity.
labels_hier : list of list-like
A list of segmentation labels. Each element should
be a list of labels for the corresponding element in
`intervals_hier`.
levels : list of string
Each element ``levels[i]`` is a label for the ```i`` th segmentation.
This is used in the legend to denote the levels in a segment hierarchy.
kwargs
Additional keyword arguments to `labeled_intervals`.
Returns
-------
ax : matplotlib.pyplot.axes._subplots.AxesSubplot
A handle to the (possibly constructed) plot axes
'''
# This will break if a segment label exists in multiple levels
if levels is None:
levels = list(range(len(intervals_hier)))
# Get the axes handle
ax, _ = __get_axes(ax=ax)
# Count the pre-existing patches
n_patches = len(ax.patches)
for ints, labs, key in zip(intervals_hier[::-1],
labels_hier[::-1],
levels[::-1]):
labeled_intervals(ints, labs, label=key, ax=ax, **kwargs)
# Reverse the patch ordering for anything we've added.
# This way, intervals are listed in the legend from top to bottom
ax.patches[n_patches:] = ax.patches[n_patches:][::-1]
return ax
|
[
"def",
"hierarchy",
"(",
"intervals_hier",
",",
"labels_hier",
",",
"levels",
"=",
"None",
",",
"ax",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"# This will break if a segment label exists in multiple levels",
"if",
"levels",
"is",
"None",
":",
"levels",
"=",
"list",
"(",
"range",
"(",
"len",
"(",
"intervals_hier",
")",
")",
")",
"# Get the axes handle",
"ax",
",",
"_",
"=",
"__get_axes",
"(",
"ax",
"=",
"ax",
")",
"# Count the pre-existing patches",
"n_patches",
"=",
"len",
"(",
"ax",
".",
"patches",
")",
"for",
"ints",
",",
"labs",
",",
"key",
"in",
"zip",
"(",
"intervals_hier",
"[",
":",
":",
"-",
"1",
"]",
",",
"labels_hier",
"[",
":",
":",
"-",
"1",
"]",
",",
"levels",
"[",
":",
":",
"-",
"1",
"]",
")",
":",
"labeled_intervals",
"(",
"ints",
",",
"labs",
",",
"label",
"=",
"key",
",",
"ax",
"=",
"ax",
",",
"*",
"*",
"kwargs",
")",
"# Reverse the patch ordering for anything we've added.",
"# This way, intervals are listed in the legend from top to bottom",
"ax",
".",
"patches",
"[",
"n_patches",
":",
"]",
"=",
"ax",
".",
"patches",
"[",
"n_patches",
":",
"]",
"[",
":",
":",
"-",
"1",
"]",
"return",
"ax"
] |
Plot a hierarchical segmentation
Parameters
----------
intervals_hier : list of np.ndarray
A list of segmentation intervals. Each element should be
an n-by-2 array of segment intervals, in the format returned by
:func:`mir_eval.io.load_intervals` or
:func:`mir_eval.io.load_labeled_intervals`.
Segmentations should be ordered by increasing specificity.
labels_hier : list of list-like
A list of segmentation labels. Each element should
be a list of labels for the corresponding element in
`intervals_hier`.
levels : list of string
Each element ``levels[i]`` is a label for the ```i`` th segmentation.
This is used in the legend to denote the levels in a segment hierarchy.
kwargs
Additional keyword arguments to `labeled_intervals`.
Returns
-------
ax : matplotlib.pyplot.axes._subplots.AxesSubplot
A handle to the (possibly constructed) plot axes
|
[
"Plot",
"a",
"hierarchical",
"segmentation"
] |
f41c8dafaea04b411252a516d1965af43c7d531b
|
https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/display.py#L343-L391
|
train
|
craffel/mir_eval
|
mir_eval/display.py
|
events
|
def events(times, labels=None, base=None, height=None, ax=None, text_kw=None,
**kwargs):
'''Plot event times as a set of vertical lines
Parameters
----------
times : np.ndarray, shape=(n,)
event times, in the format returned by
:func:`mir_eval.io.load_events` or
:func:`mir_eval.io.load_labeled_events`.
labels : list, shape=(n,), optional
event labels, in the format returned by
:func:`mir_eval.io.load_labeled_events`.
base : number
The vertical position of the base of the line.
By default, this will be the bottom of the plot.
height : number
The height of the lines.
By default, this will be the top of the plot (minus `base`).
ax : matplotlib.pyplot.axes
An axis handle on which to draw the segmentation.
If none is provided, a new set of axes is created.
text_kw : dict
If `labels` is provided, the properties of the text
objects can be specified here.
See `matplotlib.pyplot.Text` for valid parameters
kwargs
Additional keyword arguments to pass to
`matplotlib.pyplot.vlines`.
Returns
-------
ax : matplotlib.pyplot.axes._subplots.AxesSubplot
A handle to the (possibly constructed) plot axes
'''
if text_kw is None:
text_kw = dict()
text_kw.setdefault('va', 'top')
text_kw.setdefault('clip_on', True)
text_kw.setdefault('bbox', dict(boxstyle='round', facecolor='white'))
# make sure we have an array for times
times = np.asarray(times)
# Get the axes handle
ax, new_axes = __get_axes(ax=ax)
# If we have fresh axes, set the limits
if new_axes:
# Infer base and height
if base is None:
base = 0
if height is None:
height = 1
ax.set_ylim([base, height])
else:
if base is None:
base = ax.get_ylim()[0]
if height is None:
height = ax.get_ylim()[1]
cycler = ax._get_patches_for_fill.prop_cycler
style = next(cycler).copy()
style.update(kwargs)
# If the user provided 'colors', don't override it with 'color'
if 'colors' in style:
style.pop('color', None)
lines = ax.vlines(times, base, base + height, **style)
if labels:
for path, lab in zip(lines.get_paths(), labels):
ax.annotate(lab,
xy=(path.vertices[0][0], height),
xycoords='data',
xytext=(8, -10), textcoords='offset points',
**text_kw)
if new_axes:
ax.set_yticks([])
__expand_limits(ax, [base, base + height], which='y')
if times.size:
__expand_limits(ax, [times.min(), times.max()], which='x')
return ax
|
python
|
def events(times, labels=None, base=None, height=None, ax=None, text_kw=None,
**kwargs):
'''Plot event times as a set of vertical lines
Parameters
----------
times : np.ndarray, shape=(n,)
event times, in the format returned by
:func:`mir_eval.io.load_events` or
:func:`mir_eval.io.load_labeled_events`.
labels : list, shape=(n,), optional
event labels, in the format returned by
:func:`mir_eval.io.load_labeled_events`.
base : number
The vertical position of the base of the line.
By default, this will be the bottom of the plot.
height : number
The height of the lines.
By default, this will be the top of the plot (minus `base`).
ax : matplotlib.pyplot.axes
An axis handle on which to draw the segmentation.
If none is provided, a new set of axes is created.
text_kw : dict
If `labels` is provided, the properties of the text
objects can be specified here.
See `matplotlib.pyplot.Text` for valid parameters
kwargs
Additional keyword arguments to pass to
`matplotlib.pyplot.vlines`.
Returns
-------
ax : matplotlib.pyplot.axes._subplots.AxesSubplot
A handle to the (possibly constructed) plot axes
'''
if text_kw is None:
text_kw = dict()
text_kw.setdefault('va', 'top')
text_kw.setdefault('clip_on', True)
text_kw.setdefault('bbox', dict(boxstyle='round', facecolor='white'))
# make sure we have an array for times
times = np.asarray(times)
# Get the axes handle
ax, new_axes = __get_axes(ax=ax)
# If we have fresh axes, set the limits
if new_axes:
# Infer base and height
if base is None:
base = 0
if height is None:
height = 1
ax.set_ylim([base, height])
else:
if base is None:
base = ax.get_ylim()[0]
if height is None:
height = ax.get_ylim()[1]
cycler = ax._get_patches_for_fill.prop_cycler
style = next(cycler).copy()
style.update(kwargs)
# If the user provided 'colors', don't override it with 'color'
if 'colors' in style:
style.pop('color', None)
lines = ax.vlines(times, base, base + height, **style)
if labels:
for path, lab in zip(lines.get_paths(), labels):
ax.annotate(lab,
xy=(path.vertices[0][0], height),
xycoords='data',
xytext=(8, -10), textcoords='offset points',
**text_kw)
if new_axes:
ax.set_yticks([])
__expand_limits(ax, [base, base + height], which='y')
if times.size:
__expand_limits(ax, [times.min(), times.max()], which='x')
return ax
|
[
"def",
"events",
"(",
"times",
",",
"labels",
"=",
"None",
",",
"base",
"=",
"None",
",",
"height",
"=",
"None",
",",
"ax",
"=",
"None",
",",
"text_kw",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"text_kw",
"is",
"None",
":",
"text_kw",
"=",
"dict",
"(",
")",
"text_kw",
".",
"setdefault",
"(",
"'va'",
",",
"'top'",
")",
"text_kw",
".",
"setdefault",
"(",
"'clip_on'",
",",
"True",
")",
"text_kw",
".",
"setdefault",
"(",
"'bbox'",
",",
"dict",
"(",
"boxstyle",
"=",
"'round'",
",",
"facecolor",
"=",
"'white'",
")",
")",
"# make sure we have an array for times",
"times",
"=",
"np",
".",
"asarray",
"(",
"times",
")",
"# Get the axes handle",
"ax",
",",
"new_axes",
"=",
"__get_axes",
"(",
"ax",
"=",
"ax",
")",
"# If we have fresh axes, set the limits",
"if",
"new_axes",
":",
"# Infer base and height",
"if",
"base",
"is",
"None",
":",
"base",
"=",
"0",
"if",
"height",
"is",
"None",
":",
"height",
"=",
"1",
"ax",
".",
"set_ylim",
"(",
"[",
"base",
",",
"height",
"]",
")",
"else",
":",
"if",
"base",
"is",
"None",
":",
"base",
"=",
"ax",
".",
"get_ylim",
"(",
")",
"[",
"0",
"]",
"if",
"height",
"is",
"None",
":",
"height",
"=",
"ax",
".",
"get_ylim",
"(",
")",
"[",
"1",
"]",
"cycler",
"=",
"ax",
".",
"_get_patches_for_fill",
".",
"prop_cycler",
"style",
"=",
"next",
"(",
"cycler",
")",
".",
"copy",
"(",
")",
"style",
".",
"update",
"(",
"kwargs",
")",
"# If the user provided 'colors', don't override it with 'color'",
"if",
"'colors'",
"in",
"style",
":",
"style",
".",
"pop",
"(",
"'color'",
",",
"None",
")",
"lines",
"=",
"ax",
".",
"vlines",
"(",
"times",
",",
"base",
",",
"base",
"+",
"height",
",",
"*",
"*",
"style",
")",
"if",
"labels",
":",
"for",
"path",
",",
"lab",
"in",
"zip",
"(",
"lines",
".",
"get_paths",
"(",
")",
",",
"labels",
")",
":",
"ax",
".",
"annotate",
"(",
"lab",
",",
"xy",
"=",
"(",
"path",
".",
"vertices",
"[",
"0",
"]",
"[",
"0",
"]",
",",
"height",
")",
",",
"xycoords",
"=",
"'data'",
",",
"xytext",
"=",
"(",
"8",
",",
"-",
"10",
")",
",",
"textcoords",
"=",
"'offset points'",
",",
"*",
"*",
"text_kw",
")",
"if",
"new_axes",
":",
"ax",
".",
"set_yticks",
"(",
"[",
"]",
")",
"__expand_limits",
"(",
"ax",
",",
"[",
"base",
",",
"base",
"+",
"height",
"]",
",",
"which",
"=",
"'y'",
")",
"if",
"times",
".",
"size",
":",
"__expand_limits",
"(",
"ax",
",",
"[",
"times",
".",
"min",
"(",
")",
",",
"times",
".",
"max",
"(",
")",
"]",
",",
"which",
"=",
"'x'",
")",
"return",
"ax"
] |
Plot event times as a set of vertical lines
Parameters
----------
times : np.ndarray, shape=(n,)
event times, in the format returned by
:func:`mir_eval.io.load_events` or
:func:`mir_eval.io.load_labeled_events`.
labels : list, shape=(n,), optional
event labels, in the format returned by
:func:`mir_eval.io.load_labeled_events`.
base : number
The vertical position of the base of the line.
By default, this will be the bottom of the plot.
height : number
The height of the lines.
By default, this will be the top of the plot (minus `base`).
ax : matplotlib.pyplot.axes
An axis handle on which to draw the segmentation.
If none is provided, a new set of axes is created.
text_kw : dict
If `labels` is provided, the properties of the text
objects can be specified here.
See `matplotlib.pyplot.Text` for valid parameters
kwargs
Additional keyword arguments to pass to
`matplotlib.pyplot.vlines`.
Returns
-------
ax : matplotlib.pyplot.axes._subplots.AxesSubplot
A handle to the (possibly constructed) plot axes
|
[
"Plot",
"event",
"times",
"as",
"a",
"set",
"of",
"vertical",
"lines"
] |
f41c8dafaea04b411252a516d1965af43c7d531b
|
https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/display.py#L394-L490
|
train
|
craffel/mir_eval
|
mir_eval/display.py
|
pitch
|
def pitch(times, frequencies, midi=False, unvoiced=False, ax=None, **kwargs):
'''Visualize pitch contours
Parameters
----------
times : np.ndarray, shape=(n,)
Sample times of frequencies
frequencies : np.ndarray, shape=(n,)
frequencies (in Hz) of the pitch contours.
Voicing is indicated by sign (positive for voiced,
non-positive for non-voiced).
midi : bool
If `True`, plot on a MIDI-numbered vertical axis.
Otherwise, plot on a linear frequency axis.
unvoiced : bool
If `True`, unvoiced pitch contours are plotted and indicated
by transparency.
Otherwise, unvoiced pitch contours are omitted from the display.
ax : matplotlib.pyplot.axes
An axis handle on which to draw the pitch contours.
If none is provided, a new set of axes is created.
kwargs
Additional keyword arguments to `matplotlib.pyplot.plot`.
Returns
-------
ax : matplotlib.pyplot.axes._subplots.AxesSubplot
A handle to the (possibly constructed) plot axes
'''
ax, _ = __get_axes(ax=ax)
times = np.asarray(times)
# First, segment into contiguously voiced contours
frequencies, voicings = freq_to_voicing(np.asarray(frequencies,
dtype=np.float))
# Here are all the change-points
v_changes = 1 + np.flatnonzero(voicings[1:] != voicings[:-1])
v_changes = np.unique(np.concatenate([[0], v_changes, [len(voicings)]]))
# Set up arrays of slices for voiced and unvoiced regions
v_slices, u_slices = [], []
for start, end in zip(v_changes, v_changes[1:]):
idx = slice(start, end)
# A region is voiced if its starting sample is voiced
# It's unvoiced if none of the samples in the region are voiced.
if voicings[start]:
v_slices.append(idx)
elif frequencies[idx].all():
u_slices.append(idx)
# Now we just need to plot the contour
style = dict()
style.update(next(ax._get_lines.prop_cycler))
style.update(kwargs)
if midi:
idx = frequencies > 0
frequencies[idx] = hz_to_midi(frequencies[idx])
# Tick at integer midi notes
ax.yaxis.set_minor_locator(MultipleLocator(1))
for idx in v_slices:
ax.plot(times[idx], frequencies[idx], **style)
style.pop('label', None)
# Plot the unvoiced portions
if unvoiced:
style['alpha'] = style.get('alpha', 1.0) * 0.5
for idx in u_slices:
ax.plot(times[idx], frequencies[idx], **style)
return ax
|
python
|
def pitch(times, frequencies, midi=False, unvoiced=False, ax=None, **kwargs):
'''Visualize pitch contours
Parameters
----------
times : np.ndarray, shape=(n,)
Sample times of frequencies
frequencies : np.ndarray, shape=(n,)
frequencies (in Hz) of the pitch contours.
Voicing is indicated by sign (positive for voiced,
non-positive for non-voiced).
midi : bool
If `True`, plot on a MIDI-numbered vertical axis.
Otherwise, plot on a linear frequency axis.
unvoiced : bool
If `True`, unvoiced pitch contours are plotted and indicated
by transparency.
Otherwise, unvoiced pitch contours are omitted from the display.
ax : matplotlib.pyplot.axes
An axis handle on which to draw the pitch contours.
If none is provided, a new set of axes is created.
kwargs
Additional keyword arguments to `matplotlib.pyplot.plot`.
Returns
-------
ax : matplotlib.pyplot.axes._subplots.AxesSubplot
A handle to the (possibly constructed) plot axes
'''
ax, _ = __get_axes(ax=ax)
times = np.asarray(times)
# First, segment into contiguously voiced contours
frequencies, voicings = freq_to_voicing(np.asarray(frequencies,
dtype=np.float))
# Here are all the change-points
v_changes = 1 + np.flatnonzero(voicings[1:] != voicings[:-1])
v_changes = np.unique(np.concatenate([[0], v_changes, [len(voicings)]]))
# Set up arrays of slices for voiced and unvoiced regions
v_slices, u_slices = [], []
for start, end in zip(v_changes, v_changes[1:]):
idx = slice(start, end)
# A region is voiced if its starting sample is voiced
# It's unvoiced if none of the samples in the region are voiced.
if voicings[start]:
v_slices.append(idx)
elif frequencies[idx].all():
u_slices.append(idx)
# Now we just need to plot the contour
style = dict()
style.update(next(ax._get_lines.prop_cycler))
style.update(kwargs)
if midi:
idx = frequencies > 0
frequencies[idx] = hz_to_midi(frequencies[idx])
# Tick at integer midi notes
ax.yaxis.set_minor_locator(MultipleLocator(1))
for idx in v_slices:
ax.plot(times[idx], frequencies[idx], **style)
style.pop('label', None)
# Plot the unvoiced portions
if unvoiced:
style['alpha'] = style.get('alpha', 1.0) * 0.5
for idx in u_slices:
ax.plot(times[idx], frequencies[idx], **style)
return ax
|
[
"def",
"pitch",
"(",
"times",
",",
"frequencies",
",",
"midi",
"=",
"False",
",",
"unvoiced",
"=",
"False",
",",
"ax",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"ax",
",",
"_",
"=",
"__get_axes",
"(",
"ax",
"=",
"ax",
")",
"times",
"=",
"np",
".",
"asarray",
"(",
"times",
")",
"# First, segment into contiguously voiced contours",
"frequencies",
",",
"voicings",
"=",
"freq_to_voicing",
"(",
"np",
".",
"asarray",
"(",
"frequencies",
",",
"dtype",
"=",
"np",
".",
"float",
")",
")",
"# Here are all the change-points",
"v_changes",
"=",
"1",
"+",
"np",
".",
"flatnonzero",
"(",
"voicings",
"[",
"1",
":",
"]",
"!=",
"voicings",
"[",
":",
"-",
"1",
"]",
")",
"v_changes",
"=",
"np",
".",
"unique",
"(",
"np",
".",
"concatenate",
"(",
"[",
"[",
"0",
"]",
",",
"v_changes",
",",
"[",
"len",
"(",
"voicings",
")",
"]",
"]",
")",
")",
"# Set up arrays of slices for voiced and unvoiced regions",
"v_slices",
",",
"u_slices",
"=",
"[",
"]",
",",
"[",
"]",
"for",
"start",
",",
"end",
"in",
"zip",
"(",
"v_changes",
",",
"v_changes",
"[",
"1",
":",
"]",
")",
":",
"idx",
"=",
"slice",
"(",
"start",
",",
"end",
")",
"# A region is voiced if its starting sample is voiced",
"# It's unvoiced if none of the samples in the region are voiced.",
"if",
"voicings",
"[",
"start",
"]",
":",
"v_slices",
".",
"append",
"(",
"idx",
")",
"elif",
"frequencies",
"[",
"idx",
"]",
".",
"all",
"(",
")",
":",
"u_slices",
".",
"append",
"(",
"idx",
")",
"# Now we just need to plot the contour",
"style",
"=",
"dict",
"(",
")",
"style",
".",
"update",
"(",
"next",
"(",
"ax",
".",
"_get_lines",
".",
"prop_cycler",
")",
")",
"style",
".",
"update",
"(",
"kwargs",
")",
"if",
"midi",
":",
"idx",
"=",
"frequencies",
">",
"0",
"frequencies",
"[",
"idx",
"]",
"=",
"hz_to_midi",
"(",
"frequencies",
"[",
"idx",
"]",
")",
"# Tick at integer midi notes",
"ax",
".",
"yaxis",
".",
"set_minor_locator",
"(",
"MultipleLocator",
"(",
"1",
")",
")",
"for",
"idx",
"in",
"v_slices",
":",
"ax",
".",
"plot",
"(",
"times",
"[",
"idx",
"]",
",",
"frequencies",
"[",
"idx",
"]",
",",
"*",
"*",
"style",
")",
"style",
".",
"pop",
"(",
"'label'",
",",
"None",
")",
"# Plot the unvoiced portions",
"if",
"unvoiced",
":",
"style",
"[",
"'alpha'",
"]",
"=",
"style",
".",
"get",
"(",
"'alpha'",
",",
"1.0",
")",
"*",
"0.5",
"for",
"idx",
"in",
"u_slices",
":",
"ax",
".",
"plot",
"(",
"times",
"[",
"idx",
"]",
",",
"frequencies",
"[",
"idx",
"]",
",",
"*",
"*",
"style",
")",
"return",
"ax"
] |
Visualize pitch contours
Parameters
----------
times : np.ndarray, shape=(n,)
Sample times of frequencies
frequencies : np.ndarray, shape=(n,)
frequencies (in Hz) of the pitch contours.
Voicing is indicated by sign (positive for voiced,
non-positive for non-voiced).
midi : bool
If `True`, plot on a MIDI-numbered vertical axis.
Otherwise, plot on a linear frequency axis.
unvoiced : bool
If `True`, unvoiced pitch contours are plotted and indicated
by transparency.
Otherwise, unvoiced pitch contours are omitted from the display.
ax : matplotlib.pyplot.axes
An axis handle on which to draw the pitch contours.
If none is provided, a new set of axes is created.
kwargs
Additional keyword arguments to `matplotlib.pyplot.plot`.
Returns
-------
ax : matplotlib.pyplot.axes._subplots.AxesSubplot
A handle to the (possibly constructed) plot axes
|
[
"Visualize",
"pitch",
"contours"
] |
f41c8dafaea04b411252a516d1965af43c7d531b
|
https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/display.py#L493-L574
|
train
|
craffel/mir_eval
|
mir_eval/display.py
|
multipitch
|
def multipitch(times, frequencies, midi=False, unvoiced=False, ax=None,
**kwargs):
'''Visualize multiple f0 measurements
Parameters
----------
times : np.ndarray, shape=(n,)
Sample times of frequencies
frequencies : list of np.ndarray
frequencies (in Hz) of the pitch measurements.
Voicing is indicated by sign (positive for voiced,
non-positive for non-voiced).
`times` and `frequencies` should be in the format produced by
:func:`mir_eval.io.load_ragged_time_series`
midi : bool
If `True`, plot on a MIDI-numbered vertical axis.
Otherwise, plot on a linear frequency axis.
unvoiced : bool
If `True`, unvoiced pitches are plotted and indicated
by transparency.
Otherwise, unvoiced pitches are omitted from the display.
ax : matplotlib.pyplot.axes
An axis handle on which to draw the pitch contours.
If none is provided, a new set of axes is created.
kwargs
Additional keyword arguments to `plt.scatter`.
Returns
-------
ax : matplotlib.pyplot.axes._subplots.AxesSubplot
A handle to the (possibly constructed) plot axes
'''
# Get the axes handle
ax, _ = __get_axes(ax=ax)
# Set up a style for the plot
style_voiced = dict()
style_voiced.update(next(ax._get_lines.prop_cycler))
style_voiced.update(kwargs)
style_unvoiced = style_voiced.copy()
style_unvoiced.pop('label', None)
style_unvoiced['alpha'] = style_unvoiced.get('alpha', 1.0) * 0.5
# We'll collect all times and frequencies first, then plot them
voiced_times = []
voiced_freqs = []
unvoiced_times = []
unvoiced_freqs = []
for t, freqs in zip(times, frequencies):
if not len(freqs):
continue
freqs, voicings = freq_to_voicing(np.asarray(freqs, dtype=np.float))
# Discard all 0-frequency measurements
idx = freqs > 0
freqs = freqs[idx]
voicings = voicings[idx]
if midi:
freqs = hz_to_midi(freqs)
n_voiced = sum(voicings)
voiced_times.extend([t] * n_voiced)
voiced_freqs.extend(freqs[voicings])
unvoiced_times.extend([t] * (len(freqs) - n_voiced))
unvoiced_freqs.extend(freqs[~voicings])
# Plot the voiced frequencies
ax.scatter(voiced_times, voiced_freqs, **style_voiced)
# Plot the unvoiced frequencies
if unvoiced:
ax.scatter(unvoiced_times, unvoiced_freqs, **style_unvoiced)
# Tick at integer midi notes
if midi:
ax.yaxis.set_minor_locator(MultipleLocator(1))
return ax
|
python
|
def multipitch(times, frequencies, midi=False, unvoiced=False, ax=None,
**kwargs):
'''Visualize multiple f0 measurements
Parameters
----------
times : np.ndarray, shape=(n,)
Sample times of frequencies
frequencies : list of np.ndarray
frequencies (in Hz) of the pitch measurements.
Voicing is indicated by sign (positive for voiced,
non-positive for non-voiced).
`times` and `frequencies` should be in the format produced by
:func:`mir_eval.io.load_ragged_time_series`
midi : bool
If `True`, plot on a MIDI-numbered vertical axis.
Otherwise, plot on a linear frequency axis.
unvoiced : bool
If `True`, unvoiced pitches are plotted and indicated
by transparency.
Otherwise, unvoiced pitches are omitted from the display.
ax : matplotlib.pyplot.axes
An axis handle on which to draw the pitch contours.
If none is provided, a new set of axes is created.
kwargs
Additional keyword arguments to `plt.scatter`.
Returns
-------
ax : matplotlib.pyplot.axes._subplots.AxesSubplot
A handle to the (possibly constructed) plot axes
'''
# Get the axes handle
ax, _ = __get_axes(ax=ax)
# Set up a style for the plot
style_voiced = dict()
style_voiced.update(next(ax._get_lines.prop_cycler))
style_voiced.update(kwargs)
style_unvoiced = style_voiced.copy()
style_unvoiced.pop('label', None)
style_unvoiced['alpha'] = style_unvoiced.get('alpha', 1.0) * 0.5
# We'll collect all times and frequencies first, then plot them
voiced_times = []
voiced_freqs = []
unvoiced_times = []
unvoiced_freqs = []
for t, freqs in zip(times, frequencies):
if not len(freqs):
continue
freqs, voicings = freq_to_voicing(np.asarray(freqs, dtype=np.float))
# Discard all 0-frequency measurements
idx = freqs > 0
freqs = freqs[idx]
voicings = voicings[idx]
if midi:
freqs = hz_to_midi(freqs)
n_voiced = sum(voicings)
voiced_times.extend([t] * n_voiced)
voiced_freqs.extend(freqs[voicings])
unvoiced_times.extend([t] * (len(freqs) - n_voiced))
unvoiced_freqs.extend(freqs[~voicings])
# Plot the voiced frequencies
ax.scatter(voiced_times, voiced_freqs, **style_voiced)
# Plot the unvoiced frequencies
if unvoiced:
ax.scatter(unvoiced_times, unvoiced_freqs, **style_unvoiced)
# Tick at integer midi notes
if midi:
ax.yaxis.set_minor_locator(MultipleLocator(1))
return ax
|
[
"def",
"multipitch",
"(",
"times",
",",
"frequencies",
",",
"midi",
"=",
"False",
",",
"unvoiced",
"=",
"False",
",",
"ax",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"# Get the axes handle",
"ax",
",",
"_",
"=",
"__get_axes",
"(",
"ax",
"=",
"ax",
")",
"# Set up a style for the plot",
"style_voiced",
"=",
"dict",
"(",
")",
"style_voiced",
".",
"update",
"(",
"next",
"(",
"ax",
".",
"_get_lines",
".",
"prop_cycler",
")",
")",
"style_voiced",
".",
"update",
"(",
"kwargs",
")",
"style_unvoiced",
"=",
"style_voiced",
".",
"copy",
"(",
")",
"style_unvoiced",
".",
"pop",
"(",
"'label'",
",",
"None",
")",
"style_unvoiced",
"[",
"'alpha'",
"]",
"=",
"style_unvoiced",
".",
"get",
"(",
"'alpha'",
",",
"1.0",
")",
"*",
"0.5",
"# We'll collect all times and frequencies first, then plot them",
"voiced_times",
"=",
"[",
"]",
"voiced_freqs",
"=",
"[",
"]",
"unvoiced_times",
"=",
"[",
"]",
"unvoiced_freqs",
"=",
"[",
"]",
"for",
"t",
",",
"freqs",
"in",
"zip",
"(",
"times",
",",
"frequencies",
")",
":",
"if",
"not",
"len",
"(",
"freqs",
")",
":",
"continue",
"freqs",
",",
"voicings",
"=",
"freq_to_voicing",
"(",
"np",
".",
"asarray",
"(",
"freqs",
",",
"dtype",
"=",
"np",
".",
"float",
")",
")",
"# Discard all 0-frequency measurements",
"idx",
"=",
"freqs",
">",
"0",
"freqs",
"=",
"freqs",
"[",
"idx",
"]",
"voicings",
"=",
"voicings",
"[",
"idx",
"]",
"if",
"midi",
":",
"freqs",
"=",
"hz_to_midi",
"(",
"freqs",
")",
"n_voiced",
"=",
"sum",
"(",
"voicings",
")",
"voiced_times",
".",
"extend",
"(",
"[",
"t",
"]",
"*",
"n_voiced",
")",
"voiced_freqs",
".",
"extend",
"(",
"freqs",
"[",
"voicings",
"]",
")",
"unvoiced_times",
".",
"extend",
"(",
"[",
"t",
"]",
"*",
"(",
"len",
"(",
"freqs",
")",
"-",
"n_voiced",
")",
")",
"unvoiced_freqs",
".",
"extend",
"(",
"freqs",
"[",
"~",
"voicings",
"]",
")",
"# Plot the voiced frequencies",
"ax",
".",
"scatter",
"(",
"voiced_times",
",",
"voiced_freqs",
",",
"*",
"*",
"style_voiced",
")",
"# Plot the unvoiced frequencies",
"if",
"unvoiced",
":",
"ax",
".",
"scatter",
"(",
"unvoiced_times",
",",
"unvoiced_freqs",
",",
"*",
"*",
"style_unvoiced",
")",
"# Tick at integer midi notes",
"if",
"midi",
":",
"ax",
".",
"yaxis",
".",
"set_minor_locator",
"(",
"MultipleLocator",
"(",
"1",
")",
")",
"return",
"ax"
] |
Visualize multiple f0 measurements
Parameters
----------
times : np.ndarray, shape=(n,)
Sample times of frequencies
frequencies : list of np.ndarray
frequencies (in Hz) of the pitch measurements.
Voicing is indicated by sign (positive for voiced,
non-positive for non-voiced).
`times` and `frequencies` should be in the format produced by
:func:`mir_eval.io.load_ragged_time_series`
midi : bool
If `True`, plot on a MIDI-numbered vertical axis.
Otherwise, plot on a linear frequency axis.
unvoiced : bool
If `True`, unvoiced pitches are plotted and indicated
by transparency.
Otherwise, unvoiced pitches are omitted from the display.
ax : matplotlib.pyplot.axes
An axis handle on which to draw the pitch contours.
If none is provided, a new set of axes is created.
kwargs
Additional keyword arguments to `plt.scatter`.
Returns
-------
ax : matplotlib.pyplot.axes._subplots.AxesSubplot
A handle to the (possibly constructed) plot axes
|
[
"Visualize",
"multiple",
"f0",
"measurements"
] |
f41c8dafaea04b411252a516d1965af43c7d531b
|
https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/display.py#L577-L666
|
train
|
craffel/mir_eval
|
mir_eval/display.py
|
piano_roll
|
def piano_roll(intervals, pitches=None, midi=None, ax=None, **kwargs):
'''Plot a quantized piano roll as intervals
Parameters
----------
intervals : np.ndarray, shape=(n, 2)
timing intervals for notes
pitches : np.ndarray, shape=(n,), optional
pitches of notes (in Hz).
midi : np.ndarray, shape=(n,), optional
pitches of notes (in MIDI numbers).
At least one of ``pitches`` or ``midi`` must be provided.
ax : matplotlib.pyplot.axes
An axis handle on which to draw the intervals.
If none is provided, a new set of axes is created.
kwargs
Additional keyword arguments to :func:`labeled_intervals`.
Returns
-------
ax : matplotlib.pyplot.axes._subplots.AxesSubplot
A handle to the (possibly constructed) plot axes
'''
if midi is None:
if pitches is None:
raise ValueError('At least one of `midi` or `pitches` '
'must be provided.')
midi = hz_to_midi(pitches)
scale = np.arange(128)
ax = labeled_intervals(intervals, np.round(midi).astype(int),
label_set=scale,
tick=False,
ax=ax,
**kwargs)
# Minor tick at each semitone
ax.yaxis.set_minor_locator(MultipleLocator(1))
ax.axis('auto')
return ax
|
python
|
def piano_roll(intervals, pitches=None, midi=None, ax=None, **kwargs):
'''Plot a quantized piano roll as intervals
Parameters
----------
intervals : np.ndarray, shape=(n, 2)
timing intervals for notes
pitches : np.ndarray, shape=(n,), optional
pitches of notes (in Hz).
midi : np.ndarray, shape=(n,), optional
pitches of notes (in MIDI numbers).
At least one of ``pitches`` or ``midi`` must be provided.
ax : matplotlib.pyplot.axes
An axis handle on which to draw the intervals.
If none is provided, a new set of axes is created.
kwargs
Additional keyword arguments to :func:`labeled_intervals`.
Returns
-------
ax : matplotlib.pyplot.axes._subplots.AxesSubplot
A handle to the (possibly constructed) plot axes
'''
if midi is None:
if pitches is None:
raise ValueError('At least one of `midi` or `pitches` '
'must be provided.')
midi = hz_to_midi(pitches)
scale = np.arange(128)
ax = labeled_intervals(intervals, np.round(midi).astype(int),
label_set=scale,
tick=False,
ax=ax,
**kwargs)
# Minor tick at each semitone
ax.yaxis.set_minor_locator(MultipleLocator(1))
ax.axis('auto')
return ax
|
[
"def",
"piano_roll",
"(",
"intervals",
",",
"pitches",
"=",
"None",
",",
"midi",
"=",
"None",
",",
"ax",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"midi",
"is",
"None",
":",
"if",
"pitches",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"'At least one of `midi` or `pitches` '",
"'must be provided.'",
")",
"midi",
"=",
"hz_to_midi",
"(",
"pitches",
")",
"scale",
"=",
"np",
".",
"arange",
"(",
"128",
")",
"ax",
"=",
"labeled_intervals",
"(",
"intervals",
",",
"np",
".",
"round",
"(",
"midi",
")",
".",
"astype",
"(",
"int",
")",
",",
"label_set",
"=",
"scale",
",",
"tick",
"=",
"False",
",",
"ax",
"=",
"ax",
",",
"*",
"*",
"kwargs",
")",
"# Minor tick at each semitone",
"ax",
".",
"yaxis",
".",
"set_minor_locator",
"(",
"MultipleLocator",
"(",
"1",
")",
")",
"ax",
".",
"axis",
"(",
"'auto'",
")",
"return",
"ax"
] |
Plot a quantized piano roll as intervals
Parameters
----------
intervals : np.ndarray, shape=(n, 2)
timing intervals for notes
pitches : np.ndarray, shape=(n,), optional
pitches of notes (in Hz).
midi : np.ndarray, shape=(n,), optional
pitches of notes (in MIDI numbers).
At least one of ``pitches`` or ``midi`` must be provided.
ax : matplotlib.pyplot.axes
An axis handle on which to draw the intervals.
If none is provided, a new set of axes is created.
kwargs
Additional keyword arguments to :func:`labeled_intervals`.
Returns
-------
ax : matplotlib.pyplot.axes._subplots.AxesSubplot
A handle to the (possibly constructed) plot axes
|
[
"Plot",
"a",
"quantized",
"piano",
"roll",
"as",
"intervals"
] |
f41c8dafaea04b411252a516d1965af43c7d531b
|
https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/display.py#L669-L716
|
train
|
craffel/mir_eval
|
mir_eval/display.py
|
separation
|
def separation(sources, fs=22050, labels=None, alpha=0.75, ax=None, **kwargs):
'''Source-separation visualization
Parameters
----------
sources : np.ndarray, shape=(nsrc, nsampl)
A list of waveform buffers corresponding to each source
fs : number > 0
The sampling rate
labels : list of strings
An optional list of descriptors corresponding to each source
alpha : float in [0, 1]
Maximum alpha (opacity) of spectrogram values.
ax : matplotlib.pyplot.axes
An axis handle on which to draw the spectrograms.
If none is provided, a new set of axes is created.
kwargs
Additional keyword arguments to ``scipy.signal.spectrogram``
Returns
-------
ax
The axis handle for this plot
'''
# Get the axes handle
ax, new_axes = __get_axes(ax=ax)
# Make sure we have at least two dimensions
sources = np.atleast_2d(sources)
if labels is None:
labels = ['Source {:d}'.format(_) for _ in range(len(sources))]
kwargs.setdefault('scaling', 'spectrum')
# The cumulative spectrogram across sources
# is used to establish the reference power
# for each individual source
cumspec = None
specs = []
for i, src in enumerate(sources):
freqs, times, spec = spectrogram(src, fs=fs, **kwargs)
specs.append(spec)
if cumspec is None:
cumspec = spec.copy()
else:
cumspec += spec
ref_max = cumspec.max()
ref_min = ref_max * 1e-6
color_conv = ColorConverter()
for i, spec in enumerate(specs):
# For each source, grab a new color from the cycler
# Then construct a colormap that interpolates from
# [transparent white -> new color]
color = next(ax._get_lines.prop_cycler)['color']
color = color_conv.to_rgba(color, alpha=alpha)
cmap = LinearSegmentedColormap.from_list(labels[i],
[(1.0, 1.0, 1.0, 0.0),
color])
ax.pcolormesh(times, freqs, spec,
cmap=cmap,
norm=LogNorm(vmin=ref_min, vmax=ref_max),
shading='gouraud',
label=labels[i])
# Attach a 0x0 rect to the axis with the corresponding label
# This way, it will show up in the legend
ax.add_patch(Rectangle((0, 0), 0, 0, color=color, label=labels[i]))
if new_axes:
ax.axis('tight')
return ax
|
python
|
def separation(sources, fs=22050, labels=None, alpha=0.75, ax=None, **kwargs):
'''Source-separation visualization
Parameters
----------
sources : np.ndarray, shape=(nsrc, nsampl)
A list of waveform buffers corresponding to each source
fs : number > 0
The sampling rate
labels : list of strings
An optional list of descriptors corresponding to each source
alpha : float in [0, 1]
Maximum alpha (opacity) of spectrogram values.
ax : matplotlib.pyplot.axes
An axis handle on which to draw the spectrograms.
If none is provided, a new set of axes is created.
kwargs
Additional keyword arguments to ``scipy.signal.spectrogram``
Returns
-------
ax
The axis handle for this plot
'''
# Get the axes handle
ax, new_axes = __get_axes(ax=ax)
# Make sure we have at least two dimensions
sources = np.atleast_2d(sources)
if labels is None:
labels = ['Source {:d}'.format(_) for _ in range(len(sources))]
kwargs.setdefault('scaling', 'spectrum')
# The cumulative spectrogram across sources
# is used to establish the reference power
# for each individual source
cumspec = None
specs = []
for i, src in enumerate(sources):
freqs, times, spec = spectrogram(src, fs=fs, **kwargs)
specs.append(spec)
if cumspec is None:
cumspec = spec.copy()
else:
cumspec += spec
ref_max = cumspec.max()
ref_min = ref_max * 1e-6
color_conv = ColorConverter()
for i, spec in enumerate(specs):
# For each source, grab a new color from the cycler
# Then construct a colormap that interpolates from
# [transparent white -> new color]
color = next(ax._get_lines.prop_cycler)['color']
color = color_conv.to_rgba(color, alpha=alpha)
cmap = LinearSegmentedColormap.from_list(labels[i],
[(1.0, 1.0, 1.0, 0.0),
color])
ax.pcolormesh(times, freqs, spec,
cmap=cmap,
norm=LogNorm(vmin=ref_min, vmax=ref_max),
shading='gouraud',
label=labels[i])
# Attach a 0x0 rect to the axis with the corresponding label
# This way, it will show up in the legend
ax.add_patch(Rectangle((0, 0), 0, 0, color=color, label=labels[i]))
if new_axes:
ax.axis('tight')
return ax
|
[
"def",
"separation",
"(",
"sources",
",",
"fs",
"=",
"22050",
",",
"labels",
"=",
"None",
",",
"alpha",
"=",
"0.75",
",",
"ax",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"# Get the axes handle",
"ax",
",",
"new_axes",
"=",
"__get_axes",
"(",
"ax",
"=",
"ax",
")",
"# Make sure we have at least two dimensions",
"sources",
"=",
"np",
".",
"atleast_2d",
"(",
"sources",
")",
"if",
"labels",
"is",
"None",
":",
"labels",
"=",
"[",
"'Source {:d}'",
".",
"format",
"(",
"_",
")",
"for",
"_",
"in",
"range",
"(",
"len",
"(",
"sources",
")",
")",
"]",
"kwargs",
".",
"setdefault",
"(",
"'scaling'",
",",
"'spectrum'",
")",
"# The cumulative spectrogram across sources",
"# is used to establish the reference power",
"# for each individual source",
"cumspec",
"=",
"None",
"specs",
"=",
"[",
"]",
"for",
"i",
",",
"src",
"in",
"enumerate",
"(",
"sources",
")",
":",
"freqs",
",",
"times",
",",
"spec",
"=",
"spectrogram",
"(",
"src",
",",
"fs",
"=",
"fs",
",",
"*",
"*",
"kwargs",
")",
"specs",
".",
"append",
"(",
"spec",
")",
"if",
"cumspec",
"is",
"None",
":",
"cumspec",
"=",
"spec",
".",
"copy",
"(",
")",
"else",
":",
"cumspec",
"+=",
"spec",
"ref_max",
"=",
"cumspec",
".",
"max",
"(",
")",
"ref_min",
"=",
"ref_max",
"*",
"1e-6",
"color_conv",
"=",
"ColorConverter",
"(",
")",
"for",
"i",
",",
"spec",
"in",
"enumerate",
"(",
"specs",
")",
":",
"# For each source, grab a new color from the cycler",
"# Then construct a colormap that interpolates from",
"# [transparent white -> new color]",
"color",
"=",
"next",
"(",
"ax",
".",
"_get_lines",
".",
"prop_cycler",
")",
"[",
"'color'",
"]",
"color",
"=",
"color_conv",
".",
"to_rgba",
"(",
"color",
",",
"alpha",
"=",
"alpha",
")",
"cmap",
"=",
"LinearSegmentedColormap",
".",
"from_list",
"(",
"labels",
"[",
"i",
"]",
",",
"[",
"(",
"1.0",
",",
"1.0",
",",
"1.0",
",",
"0.0",
")",
",",
"color",
"]",
")",
"ax",
".",
"pcolormesh",
"(",
"times",
",",
"freqs",
",",
"spec",
",",
"cmap",
"=",
"cmap",
",",
"norm",
"=",
"LogNorm",
"(",
"vmin",
"=",
"ref_min",
",",
"vmax",
"=",
"ref_max",
")",
",",
"shading",
"=",
"'gouraud'",
",",
"label",
"=",
"labels",
"[",
"i",
"]",
")",
"# Attach a 0x0 rect to the axis with the corresponding label",
"# This way, it will show up in the legend",
"ax",
".",
"add_patch",
"(",
"Rectangle",
"(",
"(",
"0",
",",
"0",
")",
",",
"0",
",",
"0",
",",
"color",
"=",
"color",
",",
"label",
"=",
"labels",
"[",
"i",
"]",
")",
")",
"if",
"new_axes",
":",
"ax",
".",
"axis",
"(",
"'tight'",
")",
"return",
"ax"
] |
Source-separation visualization
Parameters
----------
sources : np.ndarray, shape=(nsrc, nsampl)
A list of waveform buffers corresponding to each source
fs : number > 0
The sampling rate
labels : list of strings
An optional list of descriptors corresponding to each source
alpha : float in [0, 1]
Maximum alpha (opacity) of spectrogram values.
ax : matplotlib.pyplot.axes
An axis handle on which to draw the spectrograms.
If none is provided, a new set of axes is created.
kwargs
Additional keyword arguments to ``scipy.signal.spectrogram``
Returns
-------
ax
The axis handle for this plot
|
[
"Source",
"-",
"separation",
"visualization"
] |
f41c8dafaea04b411252a516d1965af43c7d531b
|
https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/display.py#L719-L803
|
train
|
craffel/mir_eval
|
mir_eval/display.py
|
__ticker_midi_note
|
def __ticker_midi_note(x, pos):
'''A ticker function for midi notes.
Inputs x are interpreted as midi numbers, and converted
to [NOTE][OCTAVE]+[cents].
'''
NOTES = ['C', 'C#', 'D', 'D#', 'E', 'F', 'F#', 'G', 'G#', 'A', 'A#', 'B']
cents = float(np.mod(x, 1.0))
if cents >= 0.5:
cents = cents - 1.0
x = x + 0.5
idx = int(x % 12)
octave = int(x / 12) - 1
if cents == 0:
return '{:s}{:2d}'.format(NOTES[idx], octave)
return '{:s}{:2d}{:+02d}'.format(NOTES[idx], octave, int(cents * 100))
|
python
|
def __ticker_midi_note(x, pos):
'''A ticker function for midi notes.
Inputs x are interpreted as midi numbers, and converted
to [NOTE][OCTAVE]+[cents].
'''
NOTES = ['C', 'C#', 'D', 'D#', 'E', 'F', 'F#', 'G', 'G#', 'A', 'A#', 'B']
cents = float(np.mod(x, 1.0))
if cents >= 0.5:
cents = cents - 1.0
x = x + 0.5
idx = int(x % 12)
octave = int(x / 12) - 1
if cents == 0:
return '{:s}{:2d}'.format(NOTES[idx], octave)
return '{:s}{:2d}{:+02d}'.format(NOTES[idx], octave, int(cents * 100))
|
[
"def",
"__ticker_midi_note",
"(",
"x",
",",
"pos",
")",
":",
"NOTES",
"=",
"[",
"'C'",
",",
"'C#'",
",",
"'D'",
",",
"'D#'",
",",
"'E'",
",",
"'F'",
",",
"'F#'",
",",
"'G'",
",",
"'G#'",
",",
"'A'",
",",
"'A#'",
",",
"'B'",
"]",
"cents",
"=",
"float",
"(",
"np",
".",
"mod",
"(",
"x",
",",
"1.0",
")",
")",
"if",
"cents",
">=",
"0.5",
":",
"cents",
"=",
"cents",
"-",
"1.0",
"x",
"=",
"x",
"+",
"0.5",
"idx",
"=",
"int",
"(",
"x",
"%",
"12",
")",
"octave",
"=",
"int",
"(",
"x",
"/",
"12",
")",
"-",
"1",
"if",
"cents",
"==",
"0",
":",
"return",
"'{:s}{:2d}'",
".",
"format",
"(",
"NOTES",
"[",
"idx",
"]",
",",
"octave",
")",
"return",
"'{:s}{:2d}{:+02d}'",
".",
"format",
"(",
"NOTES",
"[",
"idx",
"]",
",",
"octave",
",",
"int",
"(",
"cents",
"*",
"100",
")",
")"
] |
A ticker function for midi notes.
Inputs x are interpreted as midi numbers, and converted
to [NOTE][OCTAVE]+[cents].
|
[
"A",
"ticker",
"function",
"for",
"midi",
"notes",
"."
] |
f41c8dafaea04b411252a516d1965af43c7d531b
|
https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/display.py#L806-L826
|
train
|
craffel/mir_eval
|
mir_eval/display.py
|
ticker_notes
|
def ticker_notes(ax=None):
'''Set the y-axis of the given axes to MIDI notes
Parameters
----------
ax : matplotlib.pyplot.axes
The axes handle to apply the ticker.
By default, uses the current axes handle.
'''
ax, _ = __get_axes(ax=ax)
ax.yaxis.set_major_formatter(FMT_MIDI_NOTE)
# Get the tick labels and reset the vertical alignment
for tick in ax.yaxis.get_ticklabels():
tick.set_verticalalignment('baseline')
|
python
|
def ticker_notes(ax=None):
'''Set the y-axis of the given axes to MIDI notes
Parameters
----------
ax : matplotlib.pyplot.axes
The axes handle to apply the ticker.
By default, uses the current axes handle.
'''
ax, _ = __get_axes(ax=ax)
ax.yaxis.set_major_formatter(FMT_MIDI_NOTE)
# Get the tick labels and reset the vertical alignment
for tick in ax.yaxis.get_ticklabels():
tick.set_verticalalignment('baseline')
|
[
"def",
"ticker_notes",
"(",
"ax",
"=",
"None",
")",
":",
"ax",
",",
"_",
"=",
"__get_axes",
"(",
"ax",
"=",
"ax",
")",
"ax",
".",
"yaxis",
".",
"set_major_formatter",
"(",
"FMT_MIDI_NOTE",
")",
"# Get the tick labels and reset the vertical alignment",
"for",
"tick",
"in",
"ax",
".",
"yaxis",
".",
"get_ticklabels",
"(",
")",
":",
"tick",
".",
"set_verticalalignment",
"(",
"'baseline'",
")"
] |
Set the y-axis of the given axes to MIDI notes
Parameters
----------
ax : matplotlib.pyplot.axes
The axes handle to apply the ticker.
By default, uses the current axes handle.
|
[
"Set",
"the",
"y",
"-",
"axis",
"of",
"the",
"given",
"axes",
"to",
"MIDI",
"notes"
] |
f41c8dafaea04b411252a516d1965af43c7d531b
|
https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/display.py#L839-L854
|
train
|
craffel/mir_eval
|
mir_eval/display.py
|
ticker_pitch
|
def ticker_pitch(ax=None):
'''Set the y-axis of the given axes to MIDI frequencies
Parameters
----------
ax : matplotlib.pyplot.axes
The axes handle to apply the ticker.
By default, uses the current axes handle.
'''
ax, _ = __get_axes(ax=ax)
ax.yaxis.set_major_formatter(FMT_MIDI_HZ)
|
python
|
def ticker_pitch(ax=None):
'''Set the y-axis of the given axes to MIDI frequencies
Parameters
----------
ax : matplotlib.pyplot.axes
The axes handle to apply the ticker.
By default, uses the current axes handle.
'''
ax, _ = __get_axes(ax=ax)
ax.yaxis.set_major_formatter(FMT_MIDI_HZ)
|
[
"def",
"ticker_pitch",
"(",
"ax",
"=",
"None",
")",
":",
"ax",
",",
"_",
"=",
"__get_axes",
"(",
"ax",
"=",
"ax",
")",
"ax",
".",
"yaxis",
".",
"set_major_formatter",
"(",
"FMT_MIDI_HZ",
")"
] |
Set the y-axis of the given axes to MIDI frequencies
Parameters
----------
ax : matplotlib.pyplot.axes
The axes handle to apply the ticker.
By default, uses the current axes handle.
|
[
"Set",
"the",
"y",
"-",
"axis",
"of",
"the",
"given",
"axes",
"to",
"MIDI",
"frequencies"
] |
f41c8dafaea04b411252a516d1965af43c7d531b
|
https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/display.py#L857-L868
|
train
|
CartoDB/carto-python
|
carto/file_import.py
|
FileImportJob.run
|
def run(self, **import_params):
"""
Actually creates the import job on the CARTO server
:param import_params: To be send to the Import API, see CARTO's docs
on Import API for an updated list of accepted
params
:type import_params: kwargs
:return:
.. note:: The import job is asynchronous, so you should take care of the progression, by calling the :func:`carto.resources.AsyncResource.refresh` method and check the import job :py:attr:`~state` attribute. See :func:`carto.datasets.DatasetManager.create` for a unified method to import files into CARTO
"""
if self.file:
import_params["url"] = self.file
self.id_field = "id"
if "connection" in import_params:
self.fields.append("connector")
self.update_from_dict(import_params["connection"])
self.save(force_create=True)
else:
super(FileImportJob, self).run(params=import_params,
files=self.files)
|
python
|
def run(self, **import_params):
"""
Actually creates the import job on the CARTO server
:param import_params: To be send to the Import API, see CARTO's docs
on Import API for an updated list of accepted
params
:type import_params: kwargs
:return:
.. note:: The import job is asynchronous, so you should take care of the progression, by calling the :func:`carto.resources.AsyncResource.refresh` method and check the import job :py:attr:`~state` attribute. See :func:`carto.datasets.DatasetManager.create` for a unified method to import files into CARTO
"""
if self.file:
import_params["url"] = self.file
self.id_field = "id"
if "connection" in import_params:
self.fields.append("connector")
self.update_from_dict(import_params["connection"])
self.save(force_create=True)
else:
super(FileImportJob, self).run(params=import_params,
files=self.files)
|
[
"def",
"run",
"(",
"self",
",",
"*",
"*",
"import_params",
")",
":",
"if",
"self",
".",
"file",
":",
"import_params",
"[",
"\"url\"",
"]",
"=",
"self",
".",
"file",
"self",
".",
"id_field",
"=",
"\"id\"",
"if",
"\"connection\"",
"in",
"import_params",
":",
"self",
".",
"fields",
".",
"append",
"(",
"\"connector\"",
")",
"self",
".",
"update_from_dict",
"(",
"import_params",
"[",
"\"connection\"",
"]",
")",
"self",
".",
"save",
"(",
"force_create",
"=",
"True",
")",
"else",
":",
"super",
"(",
"FileImportJob",
",",
"self",
")",
".",
"run",
"(",
"params",
"=",
"import_params",
",",
"files",
"=",
"self",
".",
"files",
")"
] |
Actually creates the import job on the CARTO server
:param import_params: To be send to the Import API, see CARTO's docs
on Import API for an updated list of accepted
params
:type import_params: kwargs
:return:
.. note:: The import job is asynchronous, so you should take care of the progression, by calling the :func:`carto.resources.AsyncResource.refresh` method and check the import job :py:attr:`~state` attribute. See :func:`carto.datasets.DatasetManager.create` for a unified method to import files into CARTO
|
[
"Actually",
"creates",
"the",
"import",
"job",
"on",
"the",
"CARTO",
"server"
] |
f6ac3d17ed08e5bc3f99edd2bde4fb7dba3eee16
|
https://github.com/CartoDB/carto-python/blob/f6ac3d17ed08e5bc3f99edd2bde4fb7dba3eee16/carto/file_import.py#L79-L103
|
train
|
CartoDB/carto-python
|
carto/file_import.py
|
FileImportJobManager.filter
|
def filter(self):
"""
Get a filtered list of file imports
:return: A list of file imports, with only the id set (you need to
refresh them if you want all the attributes to be filled in)
:rtype: list of :class:`carto.file_import.FileImportJob`
:raise: CartoException
"""
try:
response = self.send(self.get_collection_endpoint(), "get")
if self.json_collection_attribute is not None:
resource_ids = self.client.get_response_data(
response,
self.Meta.parse_json)[self.json_collection_attribute]
else:
resource_ids = self.client.get_response_data(
response, self.Meta.parse_json)
except Exception as e:
raise CartoException(e)
resources = []
for resource_id in resource_ids:
try:
resource = self.resource_class(self.client)
except (ValueError, TypeError):
continue
else:
setattr(resource, resource.Meta.id_field, resource_id)
resources.append(resource)
return resources
|
python
|
def filter(self):
"""
Get a filtered list of file imports
:return: A list of file imports, with only the id set (you need to
refresh them if you want all the attributes to be filled in)
:rtype: list of :class:`carto.file_import.FileImportJob`
:raise: CartoException
"""
try:
response = self.send(self.get_collection_endpoint(), "get")
if self.json_collection_attribute is not None:
resource_ids = self.client.get_response_data(
response,
self.Meta.parse_json)[self.json_collection_attribute]
else:
resource_ids = self.client.get_response_data(
response, self.Meta.parse_json)
except Exception as e:
raise CartoException(e)
resources = []
for resource_id in resource_ids:
try:
resource = self.resource_class(self.client)
except (ValueError, TypeError):
continue
else:
setattr(resource, resource.Meta.id_field, resource_id)
resources.append(resource)
return resources
|
[
"def",
"filter",
"(",
"self",
")",
":",
"try",
":",
"response",
"=",
"self",
".",
"send",
"(",
"self",
".",
"get_collection_endpoint",
"(",
")",
",",
"\"get\"",
")",
"if",
"self",
".",
"json_collection_attribute",
"is",
"not",
"None",
":",
"resource_ids",
"=",
"self",
".",
"client",
".",
"get_response_data",
"(",
"response",
",",
"self",
".",
"Meta",
".",
"parse_json",
")",
"[",
"self",
".",
"json_collection_attribute",
"]",
"else",
":",
"resource_ids",
"=",
"self",
".",
"client",
".",
"get_response_data",
"(",
"response",
",",
"self",
".",
"Meta",
".",
"parse_json",
")",
"except",
"Exception",
"as",
"e",
":",
"raise",
"CartoException",
"(",
"e",
")",
"resources",
"=",
"[",
"]",
"for",
"resource_id",
"in",
"resource_ids",
":",
"try",
":",
"resource",
"=",
"self",
".",
"resource_class",
"(",
"self",
".",
"client",
")",
"except",
"(",
"ValueError",
",",
"TypeError",
")",
":",
"continue",
"else",
":",
"setattr",
"(",
"resource",
",",
"resource",
".",
"Meta",
".",
"id_field",
",",
"resource_id",
")",
"resources",
".",
"append",
"(",
"resource",
")",
"return",
"resources"
] |
Get a filtered list of file imports
:return: A list of file imports, with only the id set (you need to
refresh them if you want all the attributes to be filled in)
:rtype: list of :class:`carto.file_import.FileImportJob`
:raise: CartoException
|
[
"Get",
"a",
"filtered",
"list",
"of",
"file",
"imports"
] |
f6ac3d17ed08e5bc3f99edd2bde4fb7dba3eee16
|
https://github.com/CartoDB/carto-python/blob/f6ac3d17ed08e5bc3f99edd2bde4fb7dba3eee16/carto/file_import.py#L117-L150
|
train
|
CartoDB/carto-python
|
carto/auth.py
|
APIKeyAuthClient.send
|
def send(self, relative_path, http_method, **requests_args):
"""
Makes an API-key-authorized request
:param relative_path: URL path relative to self.base_url
:param http_method: HTTP method
:param requests_args: kwargs to be sent to requests
:type relative_path: str
:type http_method: str
:type requests_args: kwargs
:return:
A request response object
:raise:
CartoException
"""
try:
http_method, requests_args = self.prepare_send(http_method, **requests_args)
response = super(APIKeyAuthClient, self).send(relative_path, http_method, **requests_args)
except Exception as e:
raise CartoException(e)
if CartoRateLimitException.is_rate_limited(response):
raise CartoRateLimitException(response)
return response
|
python
|
def send(self, relative_path, http_method, **requests_args):
"""
Makes an API-key-authorized request
:param relative_path: URL path relative to self.base_url
:param http_method: HTTP method
:param requests_args: kwargs to be sent to requests
:type relative_path: str
:type http_method: str
:type requests_args: kwargs
:return:
A request response object
:raise:
CartoException
"""
try:
http_method, requests_args = self.prepare_send(http_method, **requests_args)
response = super(APIKeyAuthClient, self).send(relative_path, http_method, **requests_args)
except Exception as e:
raise CartoException(e)
if CartoRateLimitException.is_rate_limited(response):
raise CartoRateLimitException(response)
return response
|
[
"def",
"send",
"(",
"self",
",",
"relative_path",
",",
"http_method",
",",
"*",
"*",
"requests_args",
")",
":",
"try",
":",
"http_method",
",",
"requests_args",
"=",
"self",
".",
"prepare_send",
"(",
"http_method",
",",
"*",
"*",
"requests_args",
")",
"response",
"=",
"super",
"(",
"APIKeyAuthClient",
",",
"self",
")",
".",
"send",
"(",
"relative_path",
",",
"http_method",
",",
"*",
"*",
"requests_args",
")",
"except",
"Exception",
"as",
"e",
":",
"raise",
"CartoException",
"(",
"e",
")",
"if",
"CartoRateLimitException",
".",
"is_rate_limited",
"(",
"response",
")",
":",
"raise",
"CartoRateLimitException",
"(",
"response",
")",
"return",
"response"
] |
Makes an API-key-authorized request
:param relative_path: URL path relative to self.base_url
:param http_method: HTTP method
:param requests_args: kwargs to be sent to requests
:type relative_path: str
:type http_method: str
:type requests_args: kwargs
:return:
A request response object
:raise:
CartoException
|
[
"Makes",
"an",
"API",
"-",
"key",
"-",
"authorized",
"request"
] |
f6ac3d17ed08e5bc3f99edd2bde4fb7dba3eee16
|
https://github.com/CartoDB/carto-python/blob/f6ac3d17ed08e5bc3f99edd2bde4fb7dba3eee16/carto/auth.py#L128-L154
|
train
|
CartoDB/carto-python
|
carto/auth.py
|
AuthAPIClient.is_valid_api_key
|
def is_valid_api_key(self):
"""
Checks validity. Right now, an API key is considered valid if it
can list user API keys and the result contains that API key.
This might change in the future.
:return: True if the API key is considered valid for current user.
"""
res = self.send('api/v3/api_keys', 'get')
return \
res.ok and \
self.api_key in (ak['token'] for ak in res.json()['result'])
|
python
|
def is_valid_api_key(self):
"""
Checks validity. Right now, an API key is considered valid if it
can list user API keys and the result contains that API key.
This might change in the future.
:return: True if the API key is considered valid for current user.
"""
res = self.send('api/v3/api_keys', 'get')
return \
res.ok and \
self.api_key in (ak['token'] for ak in res.json()['result'])
|
[
"def",
"is_valid_api_key",
"(",
"self",
")",
":",
"res",
"=",
"self",
".",
"send",
"(",
"'api/v3/api_keys'",
",",
"'get'",
")",
"return",
"res",
".",
"ok",
"and",
"self",
".",
"api_key",
"in",
"(",
"ak",
"[",
"'token'",
"]",
"for",
"ak",
"in",
"res",
".",
"json",
"(",
")",
"[",
"'result'",
"]",
")"
] |
Checks validity. Right now, an API key is considered valid if it
can list user API keys and the result contains that API key.
This might change in the future.
:return: True if the API key is considered valid for current user.
|
[
"Checks",
"validity",
".",
"Right",
"now",
"an",
"API",
"key",
"is",
"considered",
"valid",
"if",
"it",
"can",
"list",
"user",
"API",
"keys",
"and",
"the",
"result",
"contains",
"that",
"API",
"key",
".",
"This",
"might",
"change",
"in",
"the",
"future",
"."
] |
f6ac3d17ed08e5bc3f99edd2bde4fb7dba3eee16
|
https://github.com/CartoDB/carto-python/blob/f6ac3d17ed08e5bc3f99edd2bde4fb7dba3eee16/carto/auth.py#L262-L273
|
train
|
CartoDB/carto-python
|
carto/datasets.py
|
DatasetManager.send
|
def send(self, url, http_method, **client_args):
"""
Sends an API request, taking into account that datasets are part of
the visualization endpoint.
:param url: Endpoint URL
:param http_method: The method used to make the request to the API
:param client_args: Arguments to be sent to the auth client
:type url: str
:type http_method: str
:type client_args: kwargs
:return: A request response object
:raise: CartoException
"""
try:
client_args = client_args or {}
if "params" not in client_args:
client_args["params"] = {}
client_args["params"].update({"type": "table",
"exclude_shared": "true"})
return super(DatasetManager, self).send(url,
http_method,
**client_args)
except Exception as e:
raise CartoException(e)
|
python
|
def send(self, url, http_method, **client_args):
"""
Sends an API request, taking into account that datasets are part of
the visualization endpoint.
:param url: Endpoint URL
:param http_method: The method used to make the request to the API
:param client_args: Arguments to be sent to the auth client
:type url: str
:type http_method: str
:type client_args: kwargs
:return: A request response object
:raise: CartoException
"""
try:
client_args = client_args or {}
if "params" not in client_args:
client_args["params"] = {}
client_args["params"].update({"type": "table",
"exclude_shared": "true"})
return super(DatasetManager, self).send(url,
http_method,
**client_args)
except Exception as e:
raise CartoException(e)
|
[
"def",
"send",
"(",
"self",
",",
"url",
",",
"http_method",
",",
"*",
"*",
"client_args",
")",
":",
"try",
":",
"client_args",
"=",
"client_args",
"or",
"{",
"}",
"if",
"\"params\"",
"not",
"in",
"client_args",
":",
"client_args",
"[",
"\"params\"",
"]",
"=",
"{",
"}",
"client_args",
"[",
"\"params\"",
"]",
".",
"update",
"(",
"{",
"\"type\"",
":",
"\"table\"",
",",
"\"exclude_shared\"",
":",
"\"true\"",
"}",
")",
"return",
"super",
"(",
"DatasetManager",
",",
"self",
")",
".",
"send",
"(",
"url",
",",
"http_method",
",",
"*",
"*",
"client_args",
")",
"except",
"Exception",
"as",
"e",
":",
"raise",
"CartoException",
"(",
"e",
")"
] |
Sends an API request, taking into account that datasets are part of
the visualization endpoint.
:param url: Endpoint URL
:param http_method: The method used to make the request to the API
:param client_args: Arguments to be sent to the auth client
:type url: str
:type http_method: str
:type client_args: kwargs
:return: A request response object
:raise: CartoException
|
[
"Sends",
"an",
"API",
"request",
"taking",
"into",
"account",
"that",
"datasets",
"are",
"part",
"of",
"the",
"visualization",
"endpoint",
"."
] |
f6ac3d17ed08e5bc3f99edd2bde4fb7dba3eee16
|
https://github.com/CartoDB/carto-python/blob/f6ac3d17ed08e5bc3f99edd2bde4fb7dba3eee16/carto/datasets.py#L96-L124
|
train
|
CartoDB/carto-python
|
carto/datasets.py
|
DatasetManager.is_sync_table
|
def is_sync_table(self, archive, interval, **import_args):
"""
Checks if this is a request for a sync dataset.
The condition for creating a sync dataset is to provide a URL or a
connection to an external database and an interval in seconds
:param archive: URL to the file (both remote URLs or local paths are
supported) or StringIO object
:param interval: Interval in seconds.
:param import_args: Connection parameters for an external database
:type url: str
:type interval: int
:type import_args: kwargs
:return: True if it is a sync dataset
"""
return (hasattr(archive, "startswith") and archive.startswith("http")
or "connection" in import_args) \
and interval is not None
|
python
|
def is_sync_table(self, archive, interval, **import_args):
"""
Checks if this is a request for a sync dataset.
The condition for creating a sync dataset is to provide a URL or a
connection to an external database and an interval in seconds
:param archive: URL to the file (both remote URLs or local paths are
supported) or StringIO object
:param interval: Interval in seconds.
:param import_args: Connection parameters for an external database
:type url: str
:type interval: int
:type import_args: kwargs
:return: True if it is a sync dataset
"""
return (hasattr(archive, "startswith") and archive.startswith("http")
or "connection" in import_args) \
and interval is not None
|
[
"def",
"is_sync_table",
"(",
"self",
",",
"archive",
",",
"interval",
",",
"*",
"*",
"import_args",
")",
":",
"return",
"(",
"hasattr",
"(",
"archive",
",",
"\"startswith\"",
")",
"and",
"archive",
".",
"startswith",
"(",
"\"http\"",
")",
"or",
"\"connection\"",
"in",
"import_args",
")",
"and",
"interval",
"is",
"not",
"None"
] |
Checks if this is a request for a sync dataset.
The condition for creating a sync dataset is to provide a URL or a
connection to an external database and an interval in seconds
:param archive: URL to the file (both remote URLs or local paths are
supported) or StringIO object
:param interval: Interval in seconds.
:param import_args: Connection parameters for an external database
:type url: str
:type interval: int
:type import_args: kwargs
:return: True if it is a sync dataset
|
[
"Checks",
"if",
"this",
"is",
"a",
"request",
"for",
"a",
"sync",
"dataset",
"."
] |
f6ac3d17ed08e5bc3f99edd2bde4fb7dba3eee16
|
https://github.com/CartoDB/carto-python/blob/f6ac3d17ed08e5bc3f99edd2bde4fb7dba3eee16/carto/datasets.py#L126-L146
|
train
|
CartoDB/carto-python
|
carto/datasets.py
|
DatasetManager.create
|
def create(self, archive, interval=None, **import_args):
"""
Creating a table means uploading a file or setting up a sync table
:param archive: URL to the file (both remote URLs or local paths are
supported) or StringIO object
:param interval: Interval in seconds.
If not None, CARTO will try to set up a sync table
against the (remote) URL
:param import_args: Arguments to be sent to the import job when run
:type archive: str
:type interval: int
:type import_args: kwargs
:return: New dataset object
:rtype: Dataset
:raise: CartoException
"""
archive = archive.lower() if hasattr(archive, "lower") else archive
if self.is_sync_table(archive, interval, **import_args):
manager = SyncTableJobManager(self.client)
else:
manager = FileImportJobManager(self.client)
import_job = manager.create(archive) if interval is None \
else manager.create(archive, interval)
import_job.run(**import_args)
if import_job.get_id() is None:
raise CartoException(_("Import API returned corrupt job details \
when creating dataset"))
import_job.refresh()
count = 0
while import_job.state in ("enqueued", "queued", "pending", "uploading",
"unpacking", "importing", "guessing") \
or (isinstance(manager, SyncTableJobManager)
and import_job.state == "created"):
if count >= MAX_NUMBER_OF_RETRIES:
raise CartoException(_("Maximum number of retries exceeded \
when polling the import API for \
dataset creation"))
time.sleep(INTERVAL_BETWEEN_RETRIES_S)
import_job.refresh()
count += 1
if import_job.state == "failure":
raise CartoException(_("Dataset creation was not successful \
because of failed import (error: {error}")
.format(error=json.dumps(
import_job.get_error_text)))
if (import_job.state != "complete" and import_job.state != "created"
and import_job.state != "success") \
or import_job.success is False:
raise CartoException(_("Dataset creation was not successful \
because of unknown import error"))
if hasattr(import_job, "visualization_id") \
and import_job.visualization_id is not None:
visualization_id = import_job.visualization_id
else:
table = TableManager(self.client).get(import_job.table_id)
visualization_id = table.table_visualization.get_id() \
if table is not None else None
try:
return self.get(visualization_id) if visualization_id is not None \
else None
except AttributeError:
raise CartoException(_("Dataset creation was not successful \
because of unknown error"))
|
python
|
def create(self, archive, interval=None, **import_args):
"""
Creating a table means uploading a file or setting up a sync table
:param archive: URL to the file (both remote URLs or local paths are
supported) or StringIO object
:param interval: Interval in seconds.
If not None, CARTO will try to set up a sync table
against the (remote) URL
:param import_args: Arguments to be sent to the import job when run
:type archive: str
:type interval: int
:type import_args: kwargs
:return: New dataset object
:rtype: Dataset
:raise: CartoException
"""
archive = archive.lower() if hasattr(archive, "lower") else archive
if self.is_sync_table(archive, interval, **import_args):
manager = SyncTableJobManager(self.client)
else:
manager = FileImportJobManager(self.client)
import_job = manager.create(archive) if interval is None \
else manager.create(archive, interval)
import_job.run(**import_args)
if import_job.get_id() is None:
raise CartoException(_("Import API returned corrupt job details \
when creating dataset"))
import_job.refresh()
count = 0
while import_job.state in ("enqueued", "queued", "pending", "uploading",
"unpacking", "importing", "guessing") \
or (isinstance(manager, SyncTableJobManager)
and import_job.state == "created"):
if count >= MAX_NUMBER_OF_RETRIES:
raise CartoException(_("Maximum number of retries exceeded \
when polling the import API for \
dataset creation"))
time.sleep(INTERVAL_BETWEEN_RETRIES_S)
import_job.refresh()
count += 1
if import_job.state == "failure":
raise CartoException(_("Dataset creation was not successful \
because of failed import (error: {error}")
.format(error=json.dumps(
import_job.get_error_text)))
if (import_job.state != "complete" and import_job.state != "created"
and import_job.state != "success") \
or import_job.success is False:
raise CartoException(_("Dataset creation was not successful \
because of unknown import error"))
if hasattr(import_job, "visualization_id") \
and import_job.visualization_id is not None:
visualization_id = import_job.visualization_id
else:
table = TableManager(self.client).get(import_job.table_id)
visualization_id = table.table_visualization.get_id() \
if table is not None else None
try:
return self.get(visualization_id) if visualization_id is not None \
else None
except AttributeError:
raise CartoException(_("Dataset creation was not successful \
because of unknown error"))
|
[
"def",
"create",
"(",
"self",
",",
"archive",
",",
"interval",
"=",
"None",
",",
"*",
"*",
"import_args",
")",
":",
"archive",
"=",
"archive",
".",
"lower",
"(",
")",
"if",
"hasattr",
"(",
"archive",
",",
"\"lower\"",
")",
"else",
"archive",
"if",
"self",
".",
"is_sync_table",
"(",
"archive",
",",
"interval",
",",
"*",
"*",
"import_args",
")",
":",
"manager",
"=",
"SyncTableJobManager",
"(",
"self",
".",
"client",
")",
"else",
":",
"manager",
"=",
"FileImportJobManager",
"(",
"self",
".",
"client",
")",
"import_job",
"=",
"manager",
".",
"create",
"(",
"archive",
")",
"if",
"interval",
"is",
"None",
"else",
"manager",
".",
"create",
"(",
"archive",
",",
"interval",
")",
"import_job",
".",
"run",
"(",
"*",
"*",
"import_args",
")",
"if",
"import_job",
".",
"get_id",
"(",
")",
"is",
"None",
":",
"raise",
"CartoException",
"(",
"_",
"(",
"\"Import API returned corrupt job details \\\n when creating dataset\"",
")",
")",
"import_job",
".",
"refresh",
"(",
")",
"count",
"=",
"0",
"while",
"import_job",
".",
"state",
"in",
"(",
"\"enqueued\"",
",",
"\"queued\"",
",",
"\"pending\"",
",",
"\"uploading\"",
",",
"\"unpacking\"",
",",
"\"importing\"",
",",
"\"guessing\"",
")",
"or",
"(",
"isinstance",
"(",
"manager",
",",
"SyncTableJobManager",
")",
"and",
"import_job",
".",
"state",
"==",
"\"created\"",
")",
":",
"if",
"count",
">=",
"MAX_NUMBER_OF_RETRIES",
":",
"raise",
"CartoException",
"(",
"_",
"(",
"\"Maximum number of retries exceeded \\\n when polling the import API for \\\n dataset creation\"",
")",
")",
"time",
".",
"sleep",
"(",
"INTERVAL_BETWEEN_RETRIES_S",
")",
"import_job",
".",
"refresh",
"(",
")",
"count",
"+=",
"1",
"if",
"import_job",
".",
"state",
"==",
"\"failure\"",
":",
"raise",
"CartoException",
"(",
"_",
"(",
"\"Dataset creation was not successful \\\n because of failed import (error: {error}\"",
")",
".",
"format",
"(",
"error",
"=",
"json",
".",
"dumps",
"(",
"import_job",
".",
"get_error_text",
")",
")",
")",
"if",
"(",
"import_job",
".",
"state",
"!=",
"\"complete\"",
"and",
"import_job",
".",
"state",
"!=",
"\"created\"",
"and",
"import_job",
".",
"state",
"!=",
"\"success\"",
")",
"or",
"import_job",
".",
"success",
"is",
"False",
":",
"raise",
"CartoException",
"(",
"_",
"(",
"\"Dataset creation was not successful \\\n because of unknown import error\"",
")",
")",
"if",
"hasattr",
"(",
"import_job",
",",
"\"visualization_id\"",
")",
"and",
"import_job",
".",
"visualization_id",
"is",
"not",
"None",
":",
"visualization_id",
"=",
"import_job",
".",
"visualization_id",
"else",
":",
"table",
"=",
"TableManager",
"(",
"self",
".",
"client",
")",
".",
"get",
"(",
"import_job",
".",
"table_id",
")",
"visualization_id",
"=",
"table",
".",
"table_visualization",
".",
"get_id",
"(",
")",
"if",
"table",
"is",
"not",
"None",
"else",
"None",
"try",
":",
"return",
"self",
".",
"get",
"(",
"visualization_id",
")",
"if",
"visualization_id",
"is",
"not",
"None",
"else",
"None",
"except",
"AttributeError",
":",
"raise",
"CartoException",
"(",
"_",
"(",
"\"Dataset creation was not successful \\\n because of unknown error\"",
")",
")"
] |
Creating a table means uploading a file or setting up a sync table
:param archive: URL to the file (both remote URLs or local paths are
supported) or StringIO object
:param interval: Interval in seconds.
If not None, CARTO will try to set up a sync table
against the (remote) URL
:param import_args: Arguments to be sent to the import job when run
:type archive: str
:type interval: int
:type import_args: kwargs
:return: New dataset object
:rtype: Dataset
:raise: CartoException
|
[
"Creating",
"a",
"table",
"means",
"uploading",
"a",
"file",
"or",
"setting",
"up",
"a",
"sync",
"table"
] |
f6ac3d17ed08e5bc3f99edd2bde4fb7dba3eee16
|
https://github.com/CartoDB/carto-python/blob/f6ac3d17ed08e5bc3f99edd2bde4fb7dba3eee16/carto/datasets.py#L148-L222
|
train
|
CartoDB/carto-python
|
carto/visualizations.py
|
VisualizationManager.send
|
def send(self, url, http_method, **client_args):
"""
Sends API request, taking into account that visualizations are only a
subset of the resources available at the visualization endpoint
:param url: Endpoint URL
:param http_method: The method used to make the request to the API
:param client_args: Arguments to be sent to the auth client
:type url: str
:type http_method: str
:type client_args: kwargs
:return:
:raise: CartoException
"""
try:
client_args.setdefault('params', {})
client_args["params"].update({"type": "derived",
"exclude_shared": "true"})
return super(VisualizationManager, self).send(url,
http_method,
**client_args)
except Exception as e:
raise CartoException(e)
|
python
|
def send(self, url, http_method, **client_args):
"""
Sends API request, taking into account that visualizations are only a
subset of the resources available at the visualization endpoint
:param url: Endpoint URL
:param http_method: The method used to make the request to the API
:param client_args: Arguments to be sent to the auth client
:type url: str
:type http_method: str
:type client_args: kwargs
:return:
:raise: CartoException
"""
try:
client_args.setdefault('params', {})
client_args["params"].update({"type": "derived",
"exclude_shared": "true"})
return super(VisualizationManager, self).send(url,
http_method,
**client_args)
except Exception as e:
raise CartoException(e)
|
[
"def",
"send",
"(",
"self",
",",
"url",
",",
"http_method",
",",
"*",
"*",
"client_args",
")",
":",
"try",
":",
"client_args",
".",
"setdefault",
"(",
"'params'",
",",
"{",
"}",
")",
"client_args",
"[",
"\"params\"",
"]",
".",
"update",
"(",
"{",
"\"type\"",
":",
"\"derived\"",
",",
"\"exclude_shared\"",
":",
"\"true\"",
"}",
")",
"return",
"super",
"(",
"VisualizationManager",
",",
"self",
")",
".",
"send",
"(",
"url",
",",
"http_method",
",",
"*",
"*",
"client_args",
")",
"except",
"Exception",
"as",
"e",
":",
"raise",
"CartoException",
"(",
"e",
")"
] |
Sends API request, taking into account that visualizations are only a
subset of the resources available at the visualization endpoint
:param url: Endpoint URL
:param http_method: The method used to make the request to the API
:param client_args: Arguments to be sent to the auth client
:type url: str
:type http_method: str
:type client_args: kwargs
:return:
:raise: CartoException
|
[
"Sends",
"API",
"request",
"taking",
"into",
"account",
"that",
"visualizations",
"are",
"only",
"a",
"subset",
"of",
"the",
"resources",
"available",
"at",
"the",
"visualization",
"endpoint"
] |
f6ac3d17ed08e5bc3f99edd2bde4fb7dba3eee16
|
https://github.com/CartoDB/carto-python/blob/f6ac3d17ed08e5bc3f99edd2bde4fb7dba3eee16/carto/visualizations.py#L130-L155
|
train
|
CartoDB/carto-python
|
carto/exceptions.py
|
CartoRateLimitException.is_rate_limited
|
def is_rate_limited(response):
"""
Checks if the response has been rate limited by CARTO APIs
:param response: The response rate limited by CARTO APIs
:type response: requests.models.Response class
:return: Boolean
"""
if (response.status_code == codes.too_many_requests and 'Retry-After' in response.headers and
int(response.headers['Retry-After']) >= 0):
return True
return False
|
python
|
def is_rate_limited(response):
"""
Checks if the response has been rate limited by CARTO APIs
:param response: The response rate limited by CARTO APIs
:type response: requests.models.Response class
:return: Boolean
"""
if (response.status_code == codes.too_many_requests and 'Retry-After' in response.headers and
int(response.headers['Retry-After']) >= 0):
return True
return False
|
[
"def",
"is_rate_limited",
"(",
"response",
")",
":",
"if",
"(",
"response",
".",
"status_code",
"==",
"codes",
".",
"too_many_requests",
"and",
"'Retry-After'",
"in",
"response",
".",
"headers",
"and",
"int",
"(",
"response",
".",
"headers",
"[",
"'Retry-After'",
"]",
")",
">=",
"0",
")",
":",
"return",
"True",
"return",
"False"
] |
Checks if the response has been rate limited by CARTO APIs
:param response: The response rate limited by CARTO APIs
:type response: requests.models.Response class
:return: Boolean
|
[
"Checks",
"if",
"the",
"response",
"has",
"been",
"rate",
"limited",
"by",
"CARTO",
"APIs"
] |
f6ac3d17ed08e5bc3f99edd2bde4fb7dba3eee16
|
https://github.com/CartoDB/carto-python/blob/f6ac3d17ed08e5bc3f99edd2bde4fb7dba3eee16/carto/exceptions.py#L46-L59
|
train
|
CartoDB/carto-python
|
carto/maps.py
|
NamedMap.update_from_dict
|
def update_from_dict(self, attribute_dict):
"""
Method overriden from the base class
"""
if 'template' in attribute_dict:
self.update_from_dict(attribute_dict['template'])
setattr(self,
self.Meta.id_field, attribute_dict['template']['name'])
return
try:
for k, v in attribute_dict.items():
setattr(self, k, v)
except Exception:
setattr(self, self.Meta.id_field, attribute_dict)
|
python
|
def update_from_dict(self, attribute_dict):
"""
Method overriden from the base class
"""
if 'template' in attribute_dict:
self.update_from_dict(attribute_dict['template'])
setattr(self,
self.Meta.id_field, attribute_dict['template']['name'])
return
try:
for k, v in attribute_dict.items():
setattr(self, k, v)
except Exception:
setattr(self, self.Meta.id_field, attribute_dict)
|
[
"def",
"update_from_dict",
"(",
"self",
",",
"attribute_dict",
")",
":",
"if",
"'template'",
"in",
"attribute_dict",
":",
"self",
".",
"update_from_dict",
"(",
"attribute_dict",
"[",
"'template'",
"]",
")",
"setattr",
"(",
"self",
",",
"self",
".",
"Meta",
".",
"id_field",
",",
"attribute_dict",
"[",
"'template'",
"]",
"[",
"'name'",
"]",
")",
"return",
"try",
":",
"for",
"k",
",",
"v",
"in",
"attribute_dict",
".",
"items",
"(",
")",
":",
"setattr",
"(",
"self",
",",
"k",
",",
"v",
")",
"except",
"Exception",
":",
"setattr",
"(",
"self",
",",
"self",
".",
"Meta",
".",
"id_field",
",",
"attribute_dict",
")"
] |
Method overriden from the base class
|
[
"Method",
"overriden",
"from",
"the",
"base",
"class"
] |
f6ac3d17ed08e5bc3f99edd2bde4fb7dba3eee16
|
https://github.com/CartoDB/carto-python/blob/f6ac3d17ed08e5bc3f99edd2bde4fb7dba3eee16/carto/maps.py#L165-L179
|
train
|
CartoDB/carto-python
|
carto/resources.py
|
AsyncResource.run
|
def run(self, **client_params):
"""
Actually creates the async job on the CARTO server
:param client_params: To be send to the CARTO API. See CARTO's
documentation depending on the subclass
you are using
:type client_params: kwargs
:return:
:raise: CartoException
"""
try:
self.send(self.get_collection_endpoint(),
http_method="POST",
**client_params)
except Exception as e:
raise CartoException(e)
|
python
|
def run(self, **client_params):
"""
Actually creates the async job on the CARTO server
:param client_params: To be send to the CARTO API. See CARTO's
documentation depending on the subclass
you are using
:type client_params: kwargs
:return:
:raise: CartoException
"""
try:
self.send(self.get_collection_endpoint(),
http_method="POST",
**client_params)
except Exception as e:
raise CartoException(e)
|
[
"def",
"run",
"(",
"self",
",",
"*",
"*",
"client_params",
")",
":",
"try",
":",
"self",
".",
"send",
"(",
"self",
".",
"get_collection_endpoint",
"(",
")",
",",
"http_method",
"=",
"\"POST\"",
",",
"*",
"*",
"client_params",
")",
"except",
"Exception",
"as",
"e",
":",
"raise",
"CartoException",
"(",
"e",
")"
] |
Actually creates the async job on the CARTO server
:param client_params: To be send to the CARTO API. See CARTO's
documentation depending on the subclass
you are using
:type client_params: kwargs
:return:
:raise: CartoException
|
[
"Actually",
"creates",
"the",
"async",
"job",
"on",
"the",
"CARTO",
"server"
] |
f6ac3d17ed08e5bc3f99edd2bde4fb7dba3eee16
|
https://github.com/CartoDB/carto-python/blob/f6ac3d17ed08e5bc3f99edd2bde4fb7dba3eee16/carto/resources.py#L22-L41
|
train
|
CartoDB/carto-python
|
carto/sql.py
|
SQLClient.send
|
def send(self, sql, parse_json=True, do_post=True, format=None, **request_args):
"""
Executes SQL query in a CARTO server
:param sql: The SQL
:param parse_json: Set it to False if you want raw reponse
:param do_post: Set it to True to force post request
:param format: Any of the data export formats allowed by CARTO's
SQL API
:param request_args: Additional parameters to send with the request
:type sql: str
:type parse_json: boolean
:type do_post: boolean
:type format: str
:type request_args: dictionary
:return: response data, either as json or as a regular
response.content object
:rtype: object
:raise: CartoException
"""
try:
params = {'q': sql}
if format:
params['format'] = format
if format not in ['json', 'geojson']:
parse_json = False
if request_args is not None:
for attr in request_args:
params[attr] = request_args[attr]
if len(sql) < MAX_GET_QUERY_LEN and do_post is False:
resp = self.auth_client.send(self.api_url,
'GET',
params=params)
else:
resp = self.auth_client.send(self.api_url, 'POST', data=params)
return self.auth_client.get_response_data(resp, parse_json)
except CartoRateLimitException as e:
raise e
except Exception as e:
raise CartoException(e)
|
python
|
def send(self, sql, parse_json=True, do_post=True, format=None, **request_args):
"""
Executes SQL query in a CARTO server
:param sql: The SQL
:param parse_json: Set it to False if you want raw reponse
:param do_post: Set it to True to force post request
:param format: Any of the data export formats allowed by CARTO's
SQL API
:param request_args: Additional parameters to send with the request
:type sql: str
:type parse_json: boolean
:type do_post: boolean
:type format: str
:type request_args: dictionary
:return: response data, either as json or as a regular
response.content object
:rtype: object
:raise: CartoException
"""
try:
params = {'q': sql}
if format:
params['format'] = format
if format not in ['json', 'geojson']:
parse_json = False
if request_args is not None:
for attr in request_args:
params[attr] = request_args[attr]
if len(sql) < MAX_GET_QUERY_LEN and do_post is False:
resp = self.auth_client.send(self.api_url,
'GET',
params=params)
else:
resp = self.auth_client.send(self.api_url, 'POST', data=params)
return self.auth_client.get_response_data(resp, parse_json)
except CartoRateLimitException as e:
raise e
except Exception as e:
raise CartoException(e)
|
[
"def",
"send",
"(",
"self",
",",
"sql",
",",
"parse_json",
"=",
"True",
",",
"do_post",
"=",
"True",
",",
"format",
"=",
"None",
",",
"*",
"*",
"request_args",
")",
":",
"try",
":",
"params",
"=",
"{",
"'q'",
":",
"sql",
"}",
"if",
"format",
":",
"params",
"[",
"'format'",
"]",
"=",
"format",
"if",
"format",
"not",
"in",
"[",
"'json'",
",",
"'geojson'",
"]",
":",
"parse_json",
"=",
"False",
"if",
"request_args",
"is",
"not",
"None",
":",
"for",
"attr",
"in",
"request_args",
":",
"params",
"[",
"attr",
"]",
"=",
"request_args",
"[",
"attr",
"]",
"if",
"len",
"(",
"sql",
")",
"<",
"MAX_GET_QUERY_LEN",
"and",
"do_post",
"is",
"False",
":",
"resp",
"=",
"self",
".",
"auth_client",
".",
"send",
"(",
"self",
".",
"api_url",
",",
"'GET'",
",",
"params",
"=",
"params",
")",
"else",
":",
"resp",
"=",
"self",
".",
"auth_client",
".",
"send",
"(",
"self",
".",
"api_url",
",",
"'POST'",
",",
"data",
"=",
"params",
")",
"return",
"self",
".",
"auth_client",
".",
"get_response_data",
"(",
"resp",
",",
"parse_json",
")",
"except",
"CartoRateLimitException",
"as",
"e",
":",
"raise",
"e",
"except",
"Exception",
"as",
"e",
":",
"raise",
"CartoException",
"(",
"e",
")"
] |
Executes SQL query in a CARTO server
:param sql: The SQL
:param parse_json: Set it to False if you want raw reponse
:param do_post: Set it to True to force post request
:param format: Any of the data export formats allowed by CARTO's
SQL API
:param request_args: Additional parameters to send with the request
:type sql: str
:type parse_json: boolean
:type do_post: boolean
:type format: str
:type request_args: dictionary
:return: response data, either as json or as a regular
response.content object
:rtype: object
:raise: CartoException
|
[
"Executes",
"SQL",
"query",
"in",
"a",
"CARTO",
"server"
] |
f6ac3d17ed08e5bc3f99edd2bde4fb7dba3eee16
|
https://github.com/CartoDB/carto-python/blob/f6ac3d17ed08e5bc3f99edd2bde4fb7dba3eee16/carto/sql.py#L70-L114
|
train
|
CartoDB/carto-python
|
carto/sql.py
|
BatchSQLClient.send
|
def send(self, url, http_method, json_body=None, http_header=None):
"""
Executes Batch SQL query in a CARTO server
:param url: Endpoint url
:param http_method: The method used to make the request to the API
:param json_body: The information that needs to be sent, by default
is set to None
:param http_header: The header used to make write requests to the API,
by default is none
:type url: str
:type http_method: str
:type json_body: dict
:type http_header: str
:return: Response data, either as json or as a regular response.content
object
:rtype: object
:raise: CartoException
"""
try:
data = self.client.send(url,
http_method=http_method,
headers=http_header,
json=json_body)
data_json = self.client.get_response_data(data)
except CartoRateLimitException as e:
raise e
except Exception as e:
raise CartoException(e)
return data_json
|
python
|
def send(self, url, http_method, json_body=None, http_header=None):
"""
Executes Batch SQL query in a CARTO server
:param url: Endpoint url
:param http_method: The method used to make the request to the API
:param json_body: The information that needs to be sent, by default
is set to None
:param http_header: The header used to make write requests to the API,
by default is none
:type url: str
:type http_method: str
:type json_body: dict
:type http_header: str
:return: Response data, either as json or as a regular response.content
object
:rtype: object
:raise: CartoException
"""
try:
data = self.client.send(url,
http_method=http_method,
headers=http_header,
json=json_body)
data_json = self.client.get_response_data(data)
except CartoRateLimitException as e:
raise e
except Exception as e:
raise CartoException(e)
return data_json
|
[
"def",
"send",
"(",
"self",
",",
"url",
",",
"http_method",
",",
"json_body",
"=",
"None",
",",
"http_header",
"=",
"None",
")",
":",
"try",
":",
"data",
"=",
"self",
".",
"client",
".",
"send",
"(",
"url",
",",
"http_method",
"=",
"http_method",
",",
"headers",
"=",
"http_header",
",",
"json",
"=",
"json_body",
")",
"data_json",
"=",
"self",
".",
"client",
".",
"get_response_data",
"(",
"data",
")",
"except",
"CartoRateLimitException",
"as",
"e",
":",
"raise",
"e",
"except",
"Exception",
"as",
"e",
":",
"raise",
"CartoException",
"(",
"e",
")",
"return",
"data_json"
] |
Executes Batch SQL query in a CARTO server
:param url: Endpoint url
:param http_method: The method used to make the request to the API
:param json_body: The information that needs to be sent, by default
is set to None
:param http_header: The header used to make write requests to the API,
by default is none
:type url: str
:type http_method: str
:type json_body: dict
:type http_header: str
:return: Response data, either as json or as a regular response.content
object
:rtype: object
:raise: CartoException
|
[
"Executes",
"Batch",
"SQL",
"query",
"in",
"a",
"CARTO",
"server"
] |
f6ac3d17ed08e5bc3f99edd2bde4fb7dba3eee16
|
https://github.com/CartoDB/carto-python/blob/f6ac3d17ed08e5bc3f99edd2bde4fb7dba3eee16/carto/sql.py#L149-L180
|
train
|
CartoDB/carto-python
|
carto/sql.py
|
BatchSQLClient.create
|
def create(self, sql_query):
"""
Creates a new batch SQL query.
Batch SQL jobs are asynchronous, once created you should call
:func:`carto.sql.BatchSQLClient.read` method given the `job_id`
to retrieve the state of the batch query
:param sql_query: The SQL query to be used
:type sql_query: str or list of str
:return: Response data, either as json or as a regular response.content
object
:rtype: object
:raise: CartoException
"""
header = {'content-type': 'application/json'}
data = self.send(self.api_url,
http_method="POST",
json_body={"query": sql_query},
http_header=header)
return data
|
python
|
def create(self, sql_query):
"""
Creates a new batch SQL query.
Batch SQL jobs are asynchronous, once created you should call
:func:`carto.sql.BatchSQLClient.read` method given the `job_id`
to retrieve the state of the batch query
:param sql_query: The SQL query to be used
:type sql_query: str or list of str
:return: Response data, either as json or as a regular response.content
object
:rtype: object
:raise: CartoException
"""
header = {'content-type': 'application/json'}
data = self.send(self.api_url,
http_method="POST",
json_body={"query": sql_query},
http_header=header)
return data
|
[
"def",
"create",
"(",
"self",
",",
"sql_query",
")",
":",
"header",
"=",
"{",
"'content-type'",
":",
"'application/json'",
"}",
"data",
"=",
"self",
".",
"send",
"(",
"self",
".",
"api_url",
",",
"http_method",
"=",
"\"POST\"",
",",
"json_body",
"=",
"{",
"\"query\"",
":",
"sql_query",
"}",
",",
"http_header",
"=",
"header",
")",
"return",
"data"
] |
Creates a new batch SQL query.
Batch SQL jobs are asynchronous, once created you should call
:func:`carto.sql.BatchSQLClient.read` method given the `job_id`
to retrieve the state of the batch query
:param sql_query: The SQL query to be used
:type sql_query: str or list of str
:return: Response data, either as json or as a regular response.content
object
:rtype: object
:raise: CartoException
|
[
"Creates",
"a",
"new",
"batch",
"SQL",
"query",
"."
] |
f6ac3d17ed08e5bc3f99edd2bde4fb7dba3eee16
|
https://github.com/CartoDB/carto-python/blob/f6ac3d17ed08e5bc3f99edd2bde4fb7dba3eee16/carto/sql.py#L182-L204
|
train
|
CartoDB/carto-python
|
carto/sql.py
|
BatchSQLClient.create_and_wait_for_completion
|
def create_and_wait_for_completion(self, sql_query):
"""
Creates a new batch SQL query and waits for its completion or failure
Batch SQL jobs are asynchronous, once created this method
automatically queries the job status until it's one of 'done',
'failed', 'canceled', 'unknown'
:param sql_query: The SQL query to be used
:type sql_query: str or list of str
:return: Response data, either as json or as a regular response.content
object
:rtype: object
:raise: CartoException when there's an exception in the BatchSQLJob execution or the batch job status is one of the BATCH_JOBS_FAILED_STATUSES ('failed', 'canceled', 'unknown')
"""
header = {'content-type': 'application/json'}
data = self.send(self.api_url,
http_method="POST",
json_body={"query": sql_query},
http_header=header)
warnings.warn('Batch SQL job created with job_id: {job_id}'.format(job_id=data['job_id']))
while data and data['status'] in BATCH_JOBS_PENDING_STATUSES:
time.sleep(BATCH_READ_STATUS_AFTER_SECONDS)
data = self.read(data['job_id'])
if data['status'] in BATCH_JOBS_FAILED_STATUSES:
raise CartoException(_("Batch SQL job failed with result: {data}".format(data=data)))
return data
|
python
|
def create_and_wait_for_completion(self, sql_query):
"""
Creates a new batch SQL query and waits for its completion or failure
Batch SQL jobs are asynchronous, once created this method
automatically queries the job status until it's one of 'done',
'failed', 'canceled', 'unknown'
:param sql_query: The SQL query to be used
:type sql_query: str or list of str
:return: Response data, either as json or as a regular response.content
object
:rtype: object
:raise: CartoException when there's an exception in the BatchSQLJob execution or the batch job status is one of the BATCH_JOBS_FAILED_STATUSES ('failed', 'canceled', 'unknown')
"""
header = {'content-type': 'application/json'}
data = self.send(self.api_url,
http_method="POST",
json_body={"query": sql_query},
http_header=header)
warnings.warn('Batch SQL job created with job_id: {job_id}'.format(job_id=data['job_id']))
while data and data['status'] in BATCH_JOBS_PENDING_STATUSES:
time.sleep(BATCH_READ_STATUS_AFTER_SECONDS)
data = self.read(data['job_id'])
if data['status'] in BATCH_JOBS_FAILED_STATUSES:
raise CartoException(_("Batch SQL job failed with result: {data}".format(data=data)))
return data
|
[
"def",
"create_and_wait_for_completion",
"(",
"self",
",",
"sql_query",
")",
":",
"header",
"=",
"{",
"'content-type'",
":",
"'application/json'",
"}",
"data",
"=",
"self",
".",
"send",
"(",
"self",
".",
"api_url",
",",
"http_method",
"=",
"\"POST\"",
",",
"json_body",
"=",
"{",
"\"query\"",
":",
"sql_query",
"}",
",",
"http_header",
"=",
"header",
")",
"warnings",
".",
"warn",
"(",
"'Batch SQL job created with job_id: {job_id}'",
".",
"format",
"(",
"job_id",
"=",
"data",
"[",
"'job_id'",
"]",
")",
")",
"while",
"data",
"and",
"data",
"[",
"'status'",
"]",
"in",
"BATCH_JOBS_PENDING_STATUSES",
":",
"time",
".",
"sleep",
"(",
"BATCH_READ_STATUS_AFTER_SECONDS",
")",
"data",
"=",
"self",
".",
"read",
"(",
"data",
"[",
"'job_id'",
"]",
")",
"if",
"data",
"[",
"'status'",
"]",
"in",
"BATCH_JOBS_FAILED_STATUSES",
":",
"raise",
"CartoException",
"(",
"_",
"(",
"\"Batch SQL job failed with result: {data}\"",
".",
"format",
"(",
"data",
"=",
"data",
")",
")",
")",
"return",
"data"
] |
Creates a new batch SQL query and waits for its completion or failure
Batch SQL jobs are asynchronous, once created this method
automatically queries the job status until it's one of 'done',
'failed', 'canceled', 'unknown'
:param sql_query: The SQL query to be used
:type sql_query: str or list of str
:return: Response data, either as json or as a regular response.content
object
:rtype: object
:raise: CartoException when there's an exception in the BatchSQLJob execution or the batch job status is one of the BATCH_JOBS_FAILED_STATUSES ('failed', 'canceled', 'unknown')
|
[
"Creates",
"a",
"new",
"batch",
"SQL",
"query",
"and",
"waits",
"for",
"its",
"completion",
"or",
"failure"
] |
f6ac3d17ed08e5bc3f99edd2bde4fb7dba3eee16
|
https://github.com/CartoDB/carto-python/blob/f6ac3d17ed08e5bc3f99edd2bde4fb7dba3eee16/carto/sql.py#L206-L238
|
train
|
CartoDB/carto-python
|
carto/sql.py
|
BatchSQLClient.read
|
def read(self, job_id):
"""
Reads the information for a specific Batch API request
:param job_id: The id of the job to be read from
:type job_id: str
:return: Response data, either as json or as a regular response.content
object
:rtype: object
:raise: CartoException
"""
data = self.send(self.api_url + job_id, http_method="GET")
return data
|
python
|
def read(self, job_id):
"""
Reads the information for a specific Batch API request
:param job_id: The id of the job to be read from
:type job_id: str
:return: Response data, either as json or as a regular response.content
object
:rtype: object
:raise: CartoException
"""
data = self.send(self.api_url + job_id, http_method="GET")
return data
|
[
"def",
"read",
"(",
"self",
",",
"job_id",
")",
":",
"data",
"=",
"self",
".",
"send",
"(",
"self",
".",
"api_url",
"+",
"job_id",
",",
"http_method",
"=",
"\"GET\"",
")",
"return",
"data"
] |
Reads the information for a specific Batch API request
:param job_id: The id of the job to be read from
:type job_id: str
:return: Response data, either as json or as a regular response.content
object
:rtype: object
:raise: CartoException
|
[
"Reads",
"the",
"information",
"for",
"a",
"specific",
"Batch",
"API",
"request"
] |
f6ac3d17ed08e5bc3f99edd2bde4fb7dba3eee16
|
https://github.com/CartoDB/carto-python/blob/f6ac3d17ed08e5bc3f99edd2bde4fb7dba3eee16/carto/sql.py#L240-L254
|
train
|
CartoDB/carto-python
|
carto/sql.py
|
BatchSQLClient.update
|
def update(self, job_id, sql_query):
"""
Updates the sql query of a specific job
:param job_id: The id of the job to be updated
:param sql_query: The new SQL query for the job
:type job_id: str
:type sql_query: str
:return: Response data, either as json or as a regular response.content
object
:rtype: object
:raise: CartoException
"""
header = {'content-type': 'application/json'}
data = self.send(self.api_url + job_id,
http_method="PUT",
json_body={"query": sql_query},
http_header=header)
return data
|
python
|
def update(self, job_id, sql_query):
"""
Updates the sql query of a specific job
:param job_id: The id of the job to be updated
:param sql_query: The new SQL query for the job
:type job_id: str
:type sql_query: str
:return: Response data, either as json or as a regular response.content
object
:rtype: object
:raise: CartoException
"""
header = {'content-type': 'application/json'}
data = self.send(self.api_url + job_id,
http_method="PUT",
json_body={"query": sql_query},
http_header=header)
return data
|
[
"def",
"update",
"(",
"self",
",",
"job_id",
",",
"sql_query",
")",
":",
"header",
"=",
"{",
"'content-type'",
":",
"'application/json'",
"}",
"data",
"=",
"self",
".",
"send",
"(",
"self",
".",
"api_url",
"+",
"job_id",
",",
"http_method",
"=",
"\"PUT\"",
",",
"json_body",
"=",
"{",
"\"query\"",
":",
"sql_query",
"}",
",",
"http_header",
"=",
"header",
")",
"return",
"data"
] |
Updates the sql query of a specific job
:param job_id: The id of the job to be updated
:param sql_query: The new SQL query for the job
:type job_id: str
:type sql_query: str
:return: Response data, either as json or as a regular response.content
object
:rtype: object
:raise: CartoException
|
[
"Updates",
"the",
"sql",
"query",
"of",
"a",
"specific",
"job"
] |
f6ac3d17ed08e5bc3f99edd2bde4fb7dba3eee16
|
https://github.com/CartoDB/carto-python/blob/f6ac3d17ed08e5bc3f99edd2bde4fb7dba3eee16/carto/sql.py#L256-L276
|
train
|
CartoDB/carto-python
|
carto/sql.py
|
BatchSQLClient.cancel
|
def cancel(self, job_id):
"""
Cancels a job
:param job_id: The id of the job to be cancelled
:type job_id: str
:return: A status code depending on whether the cancel request was
successful
:rtype: str
:raise CartoException:
"""
try:
confirmation = self.send(self.api_url + job_id, http_method="DELETE")
except CartoException as e:
if 'Cannot set status from done to cancelled' in e.args[0].args[0]:
return 'done'
else:
raise e
return confirmation['status']
|
python
|
def cancel(self, job_id):
"""
Cancels a job
:param job_id: The id of the job to be cancelled
:type job_id: str
:return: A status code depending on whether the cancel request was
successful
:rtype: str
:raise CartoException:
"""
try:
confirmation = self.send(self.api_url + job_id, http_method="DELETE")
except CartoException as e:
if 'Cannot set status from done to cancelled' in e.args[0].args[0]:
return 'done'
else:
raise e
return confirmation['status']
|
[
"def",
"cancel",
"(",
"self",
",",
"job_id",
")",
":",
"try",
":",
"confirmation",
"=",
"self",
".",
"send",
"(",
"self",
".",
"api_url",
"+",
"job_id",
",",
"http_method",
"=",
"\"DELETE\"",
")",
"except",
"CartoException",
"as",
"e",
":",
"if",
"'Cannot set status from done to cancelled'",
"in",
"e",
".",
"args",
"[",
"0",
"]",
".",
"args",
"[",
"0",
"]",
":",
"return",
"'done'",
"else",
":",
"raise",
"e",
"return",
"confirmation",
"[",
"'status'",
"]"
] |
Cancels a job
:param job_id: The id of the job to be cancelled
:type job_id: str
:return: A status code depending on whether the cancel request was
successful
:rtype: str
:raise CartoException:
|
[
"Cancels",
"a",
"job"
] |
f6ac3d17ed08e5bc3f99edd2bde4fb7dba3eee16
|
https://github.com/CartoDB/carto-python/blob/f6ac3d17ed08e5bc3f99edd2bde4fb7dba3eee16/carto/sql.py#L278-L298
|
train
|
CartoDB/carto-python
|
carto/sql.py
|
CopySQLClient.copyfrom
|
def copyfrom(self, query, iterable_data, compress=True,
compression_level=DEFAULT_COMPRESSION_LEVEL):
"""
Gets data from an iterable object into a table
:param query: The "COPY table_name [(column_name[, ...])]
FROM STDIN [WITH(option[,...])]" query to execute
:type query: str
:param iterable_data: An object that can be iterated
to retrieve the data
:type iterable_data: object
:return: Response data as json
:rtype: str
:raise CartoException:
"""
url = self.api_url + '/copyfrom'
headers = {
'Content-Type': 'application/octet-stream',
'Transfer-Encoding': 'chunked'
}
params = {'api_key': self.api_key, 'q': query}
if compress:
headers['Content-Encoding'] = 'gzip'
_iterable_data = self._compress_chunks(iterable_data,
compression_level)
else:
_iterable_data = iterable_data
try:
response = self.client.send(url,
http_method='POST',
params=params,
data=_iterable_data,
headers=headers,
stream=True)
response_json = self.client.get_response_data(response)
except CartoRateLimitException as e:
raise e
except Exception as e:
raise CartoException(e)
return response_json
|
python
|
def copyfrom(self, query, iterable_data, compress=True,
compression_level=DEFAULT_COMPRESSION_LEVEL):
"""
Gets data from an iterable object into a table
:param query: The "COPY table_name [(column_name[, ...])]
FROM STDIN [WITH(option[,...])]" query to execute
:type query: str
:param iterable_data: An object that can be iterated
to retrieve the data
:type iterable_data: object
:return: Response data as json
:rtype: str
:raise CartoException:
"""
url = self.api_url + '/copyfrom'
headers = {
'Content-Type': 'application/octet-stream',
'Transfer-Encoding': 'chunked'
}
params = {'api_key': self.api_key, 'q': query}
if compress:
headers['Content-Encoding'] = 'gzip'
_iterable_data = self._compress_chunks(iterable_data,
compression_level)
else:
_iterable_data = iterable_data
try:
response = self.client.send(url,
http_method='POST',
params=params,
data=_iterable_data,
headers=headers,
stream=True)
response_json = self.client.get_response_data(response)
except CartoRateLimitException as e:
raise e
except Exception as e:
raise CartoException(e)
return response_json
|
[
"def",
"copyfrom",
"(",
"self",
",",
"query",
",",
"iterable_data",
",",
"compress",
"=",
"True",
",",
"compression_level",
"=",
"DEFAULT_COMPRESSION_LEVEL",
")",
":",
"url",
"=",
"self",
".",
"api_url",
"+",
"'/copyfrom'",
"headers",
"=",
"{",
"'Content-Type'",
":",
"'application/octet-stream'",
",",
"'Transfer-Encoding'",
":",
"'chunked'",
"}",
"params",
"=",
"{",
"'api_key'",
":",
"self",
".",
"api_key",
",",
"'q'",
":",
"query",
"}",
"if",
"compress",
":",
"headers",
"[",
"'Content-Encoding'",
"]",
"=",
"'gzip'",
"_iterable_data",
"=",
"self",
".",
"_compress_chunks",
"(",
"iterable_data",
",",
"compression_level",
")",
"else",
":",
"_iterable_data",
"=",
"iterable_data",
"try",
":",
"response",
"=",
"self",
".",
"client",
".",
"send",
"(",
"url",
",",
"http_method",
"=",
"'POST'",
",",
"params",
"=",
"params",
",",
"data",
"=",
"_iterable_data",
",",
"headers",
"=",
"headers",
",",
"stream",
"=",
"True",
")",
"response_json",
"=",
"self",
".",
"client",
".",
"get_response_data",
"(",
"response",
")",
"except",
"CartoRateLimitException",
"as",
"e",
":",
"raise",
"e",
"except",
"Exception",
"as",
"e",
":",
"raise",
"CartoException",
"(",
"e",
")",
"return",
"response_json"
] |
Gets data from an iterable object into a table
:param query: The "COPY table_name [(column_name[, ...])]
FROM STDIN [WITH(option[,...])]" query to execute
:type query: str
:param iterable_data: An object that can be iterated
to retrieve the data
:type iterable_data: object
:return: Response data as json
:rtype: str
:raise CartoException:
|
[
"Gets",
"data",
"from",
"an",
"iterable",
"object",
"into",
"a",
"table"
] |
f6ac3d17ed08e5bc3f99edd2bde4fb7dba3eee16
|
https://github.com/CartoDB/carto-python/blob/f6ac3d17ed08e5bc3f99edd2bde4fb7dba3eee16/carto/sql.py#L341-L386
|
train
|
CartoDB/carto-python
|
carto/sql.py
|
CopySQLClient.copyfrom_file_object
|
def copyfrom_file_object(self, query, file_object, compress=True,
compression_level=DEFAULT_COMPRESSION_LEVEL):
"""
Gets data from a readable file object into a table
:param query: The "COPY table_name [(column_name[, ...])]
FROM STDIN [WITH(option[,...])]" query to execute
:type query: str
:param file_object: A file-like object.
Normally the return value of open('file.ext', 'rb')
:type file_object: file
:return: Response data as json
:rtype: str
:raise CartoException:
"""
chunk_generator = self._read_in_chunks(file_object)
return self.copyfrom(query, chunk_generator, compress,
compression_level)
|
python
|
def copyfrom_file_object(self, query, file_object, compress=True,
compression_level=DEFAULT_COMPRESSION_LEVEL):
"""
Gets data from a readable file object into a table
:param query: The "COPY table_name [(column_name[, ...])]
FROM STDIN [WITH(option[,...])]" query to execute
:type query: str
:param file_object: A file-like object.
Normally the return value of open('file.ext', 'rb')
:type file_object: file
:return: Response data as json
:rtype: str
:raise CartoException:
"""
chunk_generator = self._read_in_chunks(file_object)
return self.copyfrom(query, chunk_generator, compress,
compression_level)
|
[
"def",
"copyfrom_file_object",
"(",
"self",
",",
"query",
",",
"file_object",
",",
"compress",
"=",
"True",
",",
"compression_level",
"=",
"DEFAULT_COMPRESSION_LEVEL",
")",
":",
"chunk_generator",
"=",
"self",
".",
"_read_in_chunks",
"(",
"file_object",
")",
"return",
"self",
".",
"copyfrom",
"(",
"query",
",",
"chunk_generator",
",",
"compress",
",",
"compression_level",
")"
] |
Gets data from a readable file object into a table
:param query: The "COPY table_name [(column_name[, ...])]
FROM STDIN [WITH(option[,...])]" query to execute
:type query: str
:param file_object: A file-like object.
Normally the return value of open('file.ext', 'rb')
:type file_object: file
:return: Response data as json
:rtype: str
:raise CartoException:
|
[
"Gets",
"data",
"from",
"a",
"readable",
"file",
"object",
"into",
"a",
"table"
] |
f6ac3d17ed08e5bc3f99edd2bde4fb7dba3eee16
|
https://github.com/CartoDB/carto-python/blob/f6ac3d17ed08e5bc3f99edd2bde4fb7dba3eee16/carto/sql.py#L388-L408
|
train
|
CartoDB/carto-python
|
carto/sql.py
|
CopySQLClient.copyfrom_file_path
|
def copyfrom_file_path(self, query, path, compress=True,
compression_level=DEFAULT_COMPRESSION_LEVEL):
"""
Gets data from a readable file into a table
:param query: The "COPY table_name [(column_name[, ...])]
FROM STDIN [WITH(option[,...])]" query to execute
:type query: str
:param path: A path to a file
:type path: str
:return: Response data as json
:rtype: str
:raise CartoException:
"""
with open(path, 'rb') as f:
result = self.copyfrom_file_object(query, f, compress,
compression_level)
return result
|
python
|
def copyfrom_file_path(self, query, path, compress=True,
compression_level=DEFAULT_COMPRESSION_LEVEL):
"""
Gets data from a readable file into a table
:param query: The "COPY table_name [(column_name[, ...])]
FROM STDIN [WITH(option[,...])]" query to execute
:type query: str
:param path: A path to a file
:type path: str
:return: Response data as json
:rtype: str
:raise CartoException:
"""
with open(path, 'rb') as f:
result = self.copyfrom_file_object(query, f, compress,
compression_level)
return result
|
[
"def",
"copyfrom_file_path",
"(",
"self",
",",
"query",
",",
"path",
",",
"compress",
"=",
"True",
",",
"compression_level",
"=",
"DEFAULT_COMPRESSION_LEVEL",
")",
":",
"with",
"open",
"(",
"path",
",",
"'rb'",
")",
"as",
"f",
":",
"result",
"=",
"self",
".",
"copyfrom_file_object",
"(",
"query",
",",
"f",
",",
"compress",
",",
"compression_level",
")",
"return",
"result"
] |
Gets data from a readable file into a table
:param query: The "COPY table_name [(column_name[, ...])]
FROM STDIN [WITH(option[,...])]" query to execute
:type query: str
:param path: A path to a file
:type path: str
:return: Response data as json
:rtype: str
:raise CartoException:
|
[
"Gets",
"data",
"from",
"a",
"readable",
"file",
"into",
"a",
"table"
] |
f6ac3d17ed08e5bc3f99edd2bde4fb7dba3eee16
|
https://github.com/CartoDB/carto-python/blob/f6ac3d17ed08e5bc3f99edd2bde4fb7dba3eee16/carto/sql.py#L410-L430
|
train
|
CartoDB/carto-python
|
carto/sql.py
|
CopySQLClient.copyto
|
def copyto(self, query):
"""
Gets data from a table into a Response object that can be iterated
:param query: The "COPY { table_name [(column_name[, ...])] | (query) }
TO STDOUT [WITH(option[,...])]" query to execute
:type query: str
:return: response object
:rtype: Response
:raise CartoException:
"""
url = self.api_url + '/copyto'
params = {'api_key': self.api_key, 'q': query}
try:
response = self.client.send(url,
http_method='GET',
params=params,
stream=True)
response.raise_for_status()
except CartoRateLimitException as e:
raise e
except HTTPError as e:
if 400 <= response.status_code < 500:
# Client error, provide better reason
reason = response.json()['error'][0]
error_msg = u'%s Client Error: %s' % (response.status_code,
reason)
raise CartoException(error_msg)
else:
raise CartoException(e)
except Exception as e:
raise CartoException(e)
return response
|
python
|
def copyto(self, query):
"""
Gets data from a table into a Response object that can be iterated
:param query: The "COPY { table_name [(column_name[, ...])] | (query) }
TO STDOUT [WITH(option[,...])]" query to execute
:type query: str
:return: response object
:rtype: Response
:raise CartoException:
"""
url = self.api_url + '/copyto'
params = {'api_key': self.api_key, 'q': query}
try:
response = self.client.send(url,
http_method='GET',
params=params,
stream=True)
response.raise_for_status()
except CartoRateLimitException as e:
raise e
except HTTPError as e:
if 400 <= response.status_code < 500:
# Client error, provide better reason
reason = response.json()['error'][0]
error_msg = u'%s Client Error: %s' % (response.status_code,
reason)
raise CartoException(error_msg)
else:
raise CartoException(e)
except Exception as e:
raise CartoException(e)
return response
|
[
"def",
"copyto",
"(",
"self",
",",
"query",
")",
":",
"url",
"=",
"self",
".",
"api_url",
"+",
"'/copyto'",
"params",
"=",
"{",
"'api_key'",
":",
"self",
".",
"api_key",
",",
"'q'",
":",
"query",
"}",
"try",
":",
"response",
"=",
"self",
".",
"client",
".",
"send",
"(",
"url",
",",
"http_method",
"=",
"'GET'",
",",
"params",
"=",
"params",
",",
"stream",
"=",
"True",
")",
"response",
".",
"raise_for_status",
"(",
")",
"except",
"CartoRateLimitException",
"as",
"e",
":",
"raise",
"e",
"except",
"HTTPError",
"as",
"e",
":",
"if",
"400",
"<=",
"response",
".",
"status_code",
"<",
"500",
":",
"# Client error, provide better reason",
"reason",
"=",
"response",
".",
"json",
"(",
")",
"[",
"'error'",
"]",
"[",
"0",
"]",
"error_msg",
"=",
"u'%s Client Error: %s'",
"%",
"(",
"response",
".",
"status_code",
",",
"reason",
")",
"raise",
"CartoException",
"(",
"error_msg",
")",
"else",
":",
"raise",
"CartoException",
"(",
"e",
")",
"except",
"Exception",
"as",
"e",
":",
"raise",
"CartoException",
"(",
"e",
")",
"return",
"response"
] |
Gets data from a table into a Response object that can be iterated
:param query: The "COPY { table_name [(column_name[, ...])] | (query) }
TO STDOUT [WITH(option[,...])]" query to execute
:type query: str
:return: response object
:rtype: Response
:raise CartoException:
|
[
"Gets",
"data",
"from",
"a",
"table",
"into",
"a",
"Response",
"object",
"that",
"can",
"be",
"iterated"
] |
f6ac3d17ed08e5bc3f99edd2bde4fb7dba3eee16
|
https://github.com/CartoDB/carto-python/blob/f6ac3d17ed08e5bc3f99edd2bde4fb7dba3eee16/carto/sql.py#L432-L468
|
train
|
CartoDB/carto-python
|
carto/sql.py
|
CopySQLClient.copyto_file_object
|
def copyto_file_object(self, query, file_object):
"""
Gets data from a table into a writable file object
:param query: The "COPY { table_name [(column_name[, ...])] | (query) }
TO STDOUT [WITH(option[,...])]" query to execute
:type query: str
:param file_object: A file-like object.
Normally the return value of open('file.ext', 'wb')
:type file_object: file
:raise CartoException:
"""
response = self.copyto(query)
for block in response.iter_content(DEFAULT_CHUNK_SIZE):
file_object.write(block)
|
python
|
def copyto_file_object(self, query, file_object):
"""
Gets data from a table into a writable file object
:param query: The "COPY { table_name [(column_name[, ...])] | (query) }
TO STDOUT [WITH(option[,...])]" query to execute
:type query: str
:param file_object: A file-like object.
Normally the return value of open('file.ext', 'wb')
:type file_object: file
:raise CartoException:
"""
response = self.copyto(query)
for block in response.iter_content(DEFAULT_CHUNK_SIZE):
file_object.write(block)
|
[
"def",
"copyto_file_object",
"(",
"self",
",",
"query",
",",
"file_object",
")",
":",
"response",
"=",
"self",
".",
"copyto",
"(",
"query",
")",
"for",
"block",
"in",
"response",
".",
"iter_content",
"(",
"DEFAULT_CHUNK_SIZE",
")",
":",
"file_object",
".",
"write",
"(",
"block",
")"
] |
Gets data from a table into a writable file object
:param query: The "COPY { table_name [(column_name[, ...])] | (query) }
TO STDOUT [WITH(option[,...])]" query to execute
:type query: str
:param file_object: A file-like object.
Normally the return value of open('file.ext', 'wb')
:type file_object: file
:raise CartoException:
|
[
"Gets",
"data",
"from",
"a",
"table",
"into",
"a",
"writable",
"file",
"object"
] |
f6ac3d17ed08e5bc3f99edd2bde4fb7dba3eee16
|
https://github.com/CartoDB/carto-python/blob/f6ac3d17ed08e5bc3f99edd2bde4fb7dba3eee16/carto/sql.py#L470-L486
|
train
|
CartoDB/carto-python
|
carto/sql.py
|
CopySQLClient.copyto_file_path
|
def copyto_file_path(self, query, path, append=False):
"""
Gets data from a table into a writable file
:param query: The "COPY { table_name [(column_name[, ...])] | (query) }
TO STDOUT [WITH(option[,...])]" query to execute
:type query: str
:param path: A path to a writable file
:type path: str
:param append: Whether to append or not if the file already exists
Default value is False
:type append: bool
:raise CartoException:
"""
file_mode = 'wb' if not append else 'ab'
with open(path, file_mode) as f:
self.copyto_file_object(query, f)
|
python
|
def copyto_file_path(self, query, path, append=False):
"""
Gets data from a table into a writable file
:param query: The "COPY { table_name [(column_name[, ...])] | (query) }
TO STDOUT [WITH(option[,...])]" query to execute
:type query: str
:param path: A path to a writable file
:type path: str
:param append: Whether to append or not if the file already exists
Default value is False
:type append: bool
:raise CartoException:
"""
file_mode = 'wb' if not append else 'ab'
with open(path, file_mode) as f:
self.copyto_file_object(query, f)
|
[
"def",
"copyto_file_path",
"(",
"self",
",",
"query",
",",
"path",
",",
"append",
"=",
"False",
")",
":",
"file_mode",
"=",
"'wb'",
"if",
"not",
"append",
"else",
"'ab'",
"with",
"open",
"(",
"path",
",",
"file_mode",
")",
"as",
"f",
":",
"self",
".",
"copyto_file_object",
"(",
"query",
",",
"f",
")"
] |
Gets data from a table into a writable file
:param query: The "COPY { table_name [(column_name[, ...])] | (query) }
TO STDOUT [WITH(option[,...])]" query to execute
:type query: str
:param path: A path to a writable file
:type path: str
:param append: Whether to append or not if the file already exists
Default value is False
:type append: bool
:raise CartoException:
|
[
"Gets",
"data",
"from",
"a",
"table",
"into",
"a",
"writable",
"file"
] |
f6ac3d17ed08e5bc3f99edd2bde4fb7dba3eee16
|
https://github.com/CartoDB/carto-python/blob/f6ac3d17ed08e5bc3f99edd2bde4fb7dba3eee16/carto/sql.py#L488-L507
|
train
|
CartoDB/carto-python
|
carto/sync_tables.py
|
SyncTableJob.run
|
def run(self, **import_params):
"""
Actually creates the job import on the CARTO server
:param import_params: To be send to the Import API, see CARTO's docs
on Import API for an updated list of accepted
params
:type import_params: kwargs
:return:
.. note:: The sync table job is asynchronous, so you should take care of the progression, by calling the :func:`carto.resources.AsyncResource.refresh` method and check the import job :py:attr:`~state` attribute. See :func:`carto.datasets.DatasetManager.create` for a unified method to import files into CARTO
"""
import_params["url"] = self.url
import_params["interval"] = self.interval
if "connection" in import_params:
self.fields.append("connector")
import_params["connection"]["interval"] = self.interval
self.update_from_dict(import_params["connection"])
self.save(force_create=True)
else:
return super(SyncTableJob, self).run(params=import_params)
|
python
|
def run(self, **import_params):
"""
Actually creates the job import on the CARTO server
:param import_params: To be send to the Import API, see CARTO's docs
on Import API for an updated list of accepted
params
:type import_params: kwargs
:return:
.. note:: The sync table job is asynchronous, so you should take care of the progression, by calling the :func:`carto.resources.AsyncResource.refresh` method and check the import job :py:attr:`~state` attribute. See :func:`carto.datasets.DatasetManager.create` for a unified method to import files into CARTO
"""
import_params["url"] = self.url
import_params["interval"] = self.interval
if "connection" in import_params:
self.fields.append("connector")
import_params["connection"]["interval"] = self.interval
self.update_from_dict(import_params["connection"])
self.save(force_create=True)
else:
return super(SyncTableJob, self).run(params=import_params)
|
[
"def",
"run",
"(",
"self",
",",
"*",
"*",
"import_params",
")",
":",
"import_params",
"[",
"\"url\"",
"]",
"=",
"self",
".",
"url",
"import_params",
"[",
"\"interval\"",
"]",
"=",
"self",
".",
"interval",
"if",
"\"connection\"",
"in",
"import_params",
":",
"self",
".",
"fields",
".",
"append",
"(",
"\"connector\"",
")",
"import_params",
"[",
"\"connection\"",
"]",
"[",
"\"interval\"",
"]",
"=",
"self",
".",
"interval",
"self",
".",
"update_from_dict",
"(",
"import_params",
"[",
"\"connection\"",
"]",
")",
"self",
".",
"save",
"(",
"force_create",
"=",
"True",
")",
"else",
":",
"return",
"super",
"(",
"SyncTableJob",
",",
"self",
")",
".",
"run",
"(",
"params",
"=",
"import_params",
")"
] |
Actually creates the job import on the CARTO server
:param import_params: To be send to the Import API, see CARTO's docs
on Import API for an updated list of accepted
params
:type import_params: kwargs
:return:
.. note:: The sync table job is asynchronous, so you should take care of the progression, by calling the :func:`carto.resources.AsyncResource.refresh` method and check the import job :py:attr:`~state` attribute. See :func:`carto.datasets.DatasetManager.create` for a unified method to import files into CARTO
|
[
"Actually",
"creates",
"the",
"job",
"import",
"on",
"the",
"CARTO",
"server"
] |
f6ac3d17ed08e5bc3f99edd2bde4fb7dba3eee16
|
https://github.com/CartoDB/carto-python/blob/f6ac3d17ed08e5bc3f99edd2bde4fb7dba3eee16/carto/sync_tables.py#L84-L106
|
train
|
CartoDB/carto-python
|
carto/sync_tables.py
|
SyncTableJob.force_sync
|
def force_sync(self):
"""
Forces to sync the SyncTableJob
:return:
:raise: CartoException
"""
try:
self.send(self.get_resource_endpoint(), "put")
except Exception as e:
raise CartoException(e)
|
python
|
def force_sync(self):
"""
Forces to sync the SyncTableJob
:return:
:raise: CartoException
"""
try:
self.send(self.get_resource_endpoint(), "put")
except Exception as e:
raise CartoException(e)
|
[
"def",
"force_sync",
"(",
"self",
")",
":",
"try",
":",
"self",
".",
"send",
"(",
"self",
".",
"get_resource_endpoint",
"(",
")",
",",
"\"put\"",
")",
"except",
"Exception",
"as",
"e",
":",
"raise",
"CartoException",
"(",
"e",
")"
] |
Forces to sync the SyncTableJob
:return:
:raise: CartoException
|
[
"Forces",
"to",
"sync",
"the",
"SyncTableJob"
] |
f6ac3d17ed08e5bc3f99edd2bde4fb7dba3eee16
|
https://github.com/CartoDB/carto-python/blob/f6ac3d17ed08e5bc3f99edd2bde4fb7dba3eee16/carto/sync_tables.py#L118-L129
|
train
|
ikki407/stacking
|
stacking/base.py
|
BaseModel.set_prob_type
|
def set_prob_type(cls, problem_type, classification_type, eval_type):
""" Set problem type """
assert problem_type in problem_type_list, 'Need to set Problem Type'
if problem_type == 'classification':
assert classification_type in classification_type_list,\
'Need to set Classification Type'
assert eval_type in eval_type_list, 'Need to set Evaluation Type'
cls.problem_type = problem_type
cls.classification_type = classification_type
cls.eval_type = eval_type
if cls.problem_type == 'classification':
print 'Setting Problem:{}, Type:{}, Eval:{}'.format(cls.problem_type,
cls.classification_type,
cls.eval_type)
elif cls.problem_type == 'regression':
print 'Setting Problem:{}, Eval:{}'.format(cls.problem_type,
cls.eval_type)
return
|
python
|
def set_prob_type(cls, problem_type, classification_type, eval_type):
""" Set problem type """
assert problem_type in problem_type_list, 'Need to set Problem Type'
if problem_type == 'classification':
assert classification_type in classification_type_list,\
'Need to set Classification Type'
assert eval_type in eval_type_list, 'Need to set Evaluation Type'
cls.problem_type = problem_type
cls.classification_type = classification_type
cls.eval_type = eval_type
if cls.problem_type == 'classification':
print 'Setting Problem:{}, Type:{}, Eval:{}'.format(cls.problem_type,
cls.classification_type,
cls.eval_type)
elif cls.problem_type == 'regression':
print 'Setting Problem:{}, Eval:{}'.format(cls.problem_type,
cls.eval_type)
return
|
[
"def",
"set_prob_type",
"(",
"cls",
",",
"problem_type",
",",
"classification_type",
",",
"eval_type",
")",
":",
"assert",
"problem_type",
"in",
"problem_type_list",
",",
"'Need to set Problem Type'",
"if",
"problem_type",
"==",
"'classification'",
":",
"assert",
"classification_type",
"in",
"classification_type_list",
",",
"'Need to set Classification Type'",
"assert",
"eval_type",
"in",
"eval_type_list",
",",
"'Need to set Evaluation Type'",
"cls",
".",
"problem_type",
"=",
"problem_type",
"cls",
".",
"classification_type",
"=",
"classification_type",
"cls",
".",
"eval_type",
"=",
"eval_type",
"if",
"cls",
".",
"problem_type",
"==",
"'classification'",
":",
"print",
"'Setting Problem:{}, Type:{}, Eval:{}'",
".",
"format",
"(",
"cls",
".",
"problem_type",
",",
"cls",
".",
"classification_type",
",",
"cls",
".",
"eval_type",
")",
"elif",
"cls",
".",
"problem_type",
"==",
"'regression'",
":",
"print",
"'Setting Problem:{}, Eval:{}'",
".",
"format",
"(",
"cls",
".",
"problem_type",
",",
"cls",
".",
"eval_type",
")",
"return"
] |
Set problem type
|
[
"Set",
"problem",
"type"
] |
105073598fd4f9481212d9db9dea92559d9a9d5a
|
https://github.com/ikki407/stacking/blob/105073598fd4f9481212d9db9dea92559d9a9d5a/stacking/base.py#L280-L301
|
train
|
ikki407/stacking
|
stacking/base.py
|
BaseModel.make_multi_cols
|
def make_multi_cols(self, num_class, name):
'''make cols for multi-class predictions'''
cols = ['c' + str(i) + '_' for i in xrange(num_class)]
cols = map(lambda x: x + name, cols)
return cols
|
python
|
def make_multi_cols(self, num_class, name):
'''make cols for multi-class predictions'''
cols = ['c' + str(i) + '_' for i in xrange(num_class)]
cols = map(lambda x: x + name, cols)
return cols
|
[
"def",
"make_multi_cols",
"(",
"self",
",",
"num_class",
",",
"name",
")",
":",
"cols",
"=",
"[",
"'c'",
"+",
"str",
"(",
"i",
")",
"+",
"'_'",
"for",
"i",
"in",
"xrange",
"(",
"num_class",
")",
"]",
"cols",
"=",
"map",
"(",
"lambda",
"x",
":",
"x",
"+",
"name",
",",
"cols",
")",
"return",
"cols"
] |
make cols for multi-class predictions
|
[
"make",
"cols",
"for",
"multi",
"-",
"class",
"predictions"
] |
105073598fd4f9481212d9db9dea92559d9a9d5a
|
https://github.com/ikki407/stacking/blob/105073598fd4f9481212d9db9dea92559d9a9d5a/stacking/base.py#L308-L312
|
train
|
clalancette/pycdlib
|
pycdlib/path_table_record.py
|
PathTableRecord.parse
|
def parse(self, data):
# type: (bytes) -> None
'''
A method to parse an ISO9660 Path Table Record out of a string.
Parameters:
data - The string to parse.
Returns:
Nothing.
'''
(self.len_di, self.xattr_length, self.extent_location,
self.parent_directory_num) = struct.unpack_from(self.FMT, data[:8], 0)
if self.len_di % 2 != 0:
self.directory_identifier = data[8:-1]
else:
self.directory_identifier = data[8:]
self.dirrecord = None
self._initialized = True
|
python
|
def parse(self, data):
# type: (bytes) -> None
'''
A method to parse an ISO9660 Path Table Record out of a string.
Parameters:
data - The string to parse.
Returns:
Nothing.
'''
(self.len_di, self.xattr_length, self.extent_location,
self.parent_directory_num) = struct.unpack_from(self.FMT, data[:8], 0)
if self.len_di % 2 != 0:
self.directory_identifier = data[8:-1]
else:
self.directory_identifier = data[8:]
self.dirrecord = None
self._initialized = True
|
[
"def",
"parse",
"(",
"self",
",",
"data",
")",
":",
"# type: (bytes) -> None",
"(",
"self",
".",
"len_di",
",",
"self",
".",
"xattr_length",
",",
"self",
".",
"extent_location",
",",
"self",
".",
"parent_directory_num",
")",
"=",
"struct",
".",
"unpack_from",
"(",
"self",
".",
"FMT",
",",
"data",
"[",
":",
"8",
"]",
",",
"0",
")",
"if",
"self",
".",
"len_di",
"%",
"2",
"!=",
"0",
":",
"self",
".",
"directory_identifier",
"=",
"data",
"[",
"8",
":",
"-",
"1",
"]",
"else",
":",
"self",
".",
"directory_identifier",
"=",
"data",
"[",
"8",
":",
"]",
"self",
".",
"dirrecord",
"=",
"None",
"self",
".",
"_initialized",
"=",
"True"
] |
A method to parse an ISO9660 Path Table Record out of a string.
Parameters:
data - The string to parse.
Returns:
Nothing.
|
[
"A",
"method",
"to",
"parse",
"an",
"ISO9660",
"Path",
"Table",
"Record",
"out",
"of",
"a",
"string",
"."
] |
1e7b77a809e905d67dc71e12d70e850be26b6233
|
https://github.com/clalancette/pycdlib/blob/1e7b77a809e905d67dc71e12d70e850be26b6233/pycdlib/path_table_record.py#L46-L64
|
train
|
clalancette/pycdlib
|
pycdlib/path_table_record.py
|
PathTableRecord._record
|
def _record(self, ext_loc, parent_dir_num):
# type: (int, int) -> bytes
'''
An internal method to generate a string representing this Path Table Record.
Parameters:
ext_loc - The extent location to place in this Path Table Record.
parent_dir_num - The parent directory number to place in this Path Table
Record.
Returns:
A string representing this Path Table Record.
'''
return struct.pack(self.FMT, self.len_di, self.xattr_length,
ext_loc, parent_dir_num) + self.directory_identifier + b'\x00' * (self.len_di % 2)
|
python
|
def _record(self, ext_loc, parent_dir_num):
# type: (int, int) -> bytes
'''
An internal method to generate a string representing this Path Table Record.
Parameters:
ext_loc - The extent location to place in this Path Table Record.
parent_dir_num - The parent directory number to place in this Path Table
Record.
Returns:
A string representing this Path Table Record.
'''
return struct.pack(self.FMT, self.len_di, self.xattr_length,
ext_loc, parent_dir_num) + self.directory_identifier + b'\x00' * (self.len_di % 2)
|
[
"def",
"_record",
"(",
"self",
",",
"ext_loc",
",",
"parent_dir_num",
")",
":",
"# type: (int, int) -> bytes",
"return",
"struct",
".",
"pack",
"(",
"self",
".",
"FMT",
",",
"self",
".",
"len_di",
",",
"self",
".",
"xattr_length",
",",
"ext_loc",
",",
"parent_dir_num",
")",
"+",
"self",
".",
"directory_identifier",
"+",
"b'\\x00'",
"*",
"(",
"self",
".",
"len_di",
"%",
"2",
")"
] |
An internal method to generate a string representing this Path Table Record.
Parameters:
ext_loc - The extent location to place in this Path Table Record.
parent_dir_num - The parent directory number to place in this Path Table
Record.
Returns:
A string representing this Path Table Record.
|
[
"An",
"internal",
"method",
"to",
"generate",
"a",
"string",
"representing",
"this",
"Path",
"Table",
"Record",
"."
] |
1e7b77a809e905d67dc71e12d70e850be26b6233
|
https://github.com/clalancette/pycdlib/blob/1e7b77a809e905d67dc71e12d70e850be26b6233/pycdlib/path_table_record.py#L66-L79
|
train
|
clalancette/pycdlib
|
pycdlib/path_table_record.py
|
PathTableRecord.record_little_endian
|
def record_little_endian(self):
# type: () -> bytes
'''
A method to generate a string representing the little endian version of
this Path Table Record.
Parameters:
None.
Returns:
A string representing the little endian version of this Path Table Record.
'''
if not self._initialized:
raise pycdlibexception.PyCdlibInternalError('Path Table Record not yet initialized')
return self._record(self.extent_location, self.parent_directory_num)
|
python
|
def record_little_endian(self):
# type: () -> bytes
'''
A method to generate a string representing the little endian version of
this Path Table Record.
Parameters:
None.
Returns:
A string representing the little endian version of this Path Table Record.
'''
if not self._initialized:
raise pycdlibexception.PyCdlibInternalError('Path Table Record not yet initialized')
return self._record(self.extent_location, self.parent_directory_num)
|
[
"def",
"record_little_endian",
"(",
"self",
")",
":",
"# type: () -> bytes",
"if",
"not",
"self",
".",
"_initialized",
":",
"raise",
"pycdlibexception",
".",
"PyCdlibInternalError",
"(",
"'Path Table Record not yet initialized'",
")",
"return",
"self",
".",
"_record",
"(",
"self",
".",
"extent_location",
",",
"self",
".",
"parent_directory_num",
")"
] |
A method to generate a string representing the little endian version of
this Path Table Record.
Parameters:
None.
Returns:
A string representing the little endian version of this Path Table Record.
|
[
"A",
"method",
"to",
"generate",
"a",
"string",
"representing",
"the",
"little",
"endian",
"version",
"of",
"this",
"Path",
"Table",
"Record",
"."
] |
1e7b77a809e905d67dc71e12d70e850be26b6233
|
https://github.com/clalancette/pycdlib/blob/1e7b77a809e905d67dc71e12d70e850be26b6233/pycdlib/path_table_record.py#L81-L95
|
train
|
clalancette/pycdlib
|
pycdlib/path_table_record.py
|
PathTableRecord.record_big_endian
|
def record_big_endian(self):
# type: () -> bytes
'''
A method to generate a string representing the big endian version of
this Path Table Record.
Parameters:
None.
Returns:
A string representing the big endian version of this Path Table Record.
'''
if not self._initialized:
raise pycdlibexception.PyCdlibInternalError('Path Table Record not yet initialized')
return self._record(utils.swab_32bit(self.extent_location),
utils.swab_16bit(self.parent_directory_num))
|
python
|
def record_big_endian(self):
# type: () -> bytes
'''
A method to generate a string representing the big endian version of
this Path Table Record.
Parameters:
None.
Returns:
A string representing the big endian version of this Path Table Record.
'''
if not self._initialized:
raise pycdlibexception.PyCdlibInternalError('Path Table Record not yet initialized')
return self._record(utils.swab_32bit(self.extent_location),
utils.swab_16bit(self.parent_directory_num))
|
[
"def",
"record_big_endian",
"(",
"self",
")",
":",
"# type: () -> bytes",
"if",
"not",
"self",
".",
"_initialized",
":",
"raise",
"pycdlibexception",
".",
"PyCdlibInternalError",
"(",
"'Path Table Record not yet initialized'",
")",
"return",
"self",
".",
"_record",
"(",
"utils",
".",
"swab_32bit",
"(",
"self",
".",
"extent_location",
")",
",",
"utils",
".",
"swab_16bit",
"(",
"self",
".",
"parent_directory_num",
")",
")"
] |
A method to generate a string representing the big endian version of
this Path Table Record.
Parameters:
None.
Returns:
A string representing the big endian version of this Path Table Record.
|
[
"A",
"method",
"to",
"generate",
"a",
"string",
"representing",
"the",
"big",
"endian",
"version",
"of",
"this",
"Path",
"Table",
"Record",
"."
] |
1e7b77a809e905d67dc71e12d70e850be26b6233
|
https://github.com/clalancette/pycdlib/blob/1e7b77a809e905d67dc71e12d70e850be26b6233/pycdlib/path_table_record.py#L97-L112
|
train
|
clalancette/pycdlib
|
pycdlib/path_table_record.py
|
PathTableRecord._new
|
def _new(self, name, parent_dir_num):
# type: (bytes, int) -> None
'''
An internal method to create a new Path Table Record.
Parameters:
name - The name for this Path Table Record.
parent_dir_num - The directory number of the parent of this Path Table
Record.
Returns:
Nothing.
'''
self.len_di = len(name)
self.xattr_length = 0 # FIXME: we don't support xattr for now
self.parent_directory_num = parent_dir_num
self.directory_identifier = name
self._initialized = True
|
python
|
def _new(self, name, parent_dir_num):
# type: (bytes, int) -> None
'''
An internal method to create a new Path Table Record.
Parameters:
name - The name for this Path Table Record.
parent_dir_num - The directory number of the parent of this Path Table
Record.
Returns:
Nothing.
'''
self.len_di = len(name)
self.xattr_length = 0 # FIXME: we don't support xattr for now
self.parent_directory_num = parent_dir_num
self.directory_identifier = name
self._initialized = True
|
[
"def",
"_new",
"(",
"self",
",",
"name",
",",
"parent_dir_num",
")",
":",
"# type: (bytes, int) -> None",
"self",
".",
"len_di",
"=",
"len",
"(",
"name",
")",
"self",
".",
"xattr_length",
"=",
"0",
"# FIXME: we don't support xattr for now",
"self",
".",
"parent_directory_num",
"=",
"parent_dir_num",
"self",
".",
"directory_identifier",
"=",
"name",
"self",
".",
"_initialized",
"=",
"True"
] |
An internal method to create a new Path Table Record.
Parameters:
name - The name for this Path Table Record.
parent_dir_num - The directory number of the parent of this Path Table
Record.
Returns:
Nothing.
|
[
"An",
"internal",
"method",
"to",
"create",
"a",
"new",
"Path",
"Table",
"Record",
"."
] |
1e7b77a809e905d67dc71e12d70e850be26b6233
|
https://github.com/clalancette/pycdlib/blob/1e7b77a809e905d67dc71e12d70e850be26b6233/pycdlib/path_table_record.py#L127-L143
|
train
|
clalancette/pycdlib
|
pycdlib/path_table_record.py
|
PathTableRecord.new_dir
|
def new_dir(self, name):
# type: (bytes) -> None
'''
A method to create a new Path Table Record.
Parameters:
name - The name for this Path Table Record.
Returns:
Nothing.
'''
if self._initialized:
raise pycdlibexception.PyCdlibInternalError('Path Table Record already initialized')
# Zero for the parent dir num is bogus, but that will get fixed later.
self._new(name, 0)
|
python
|
def new_dir(self, name):
# type: (bytes) -> None
'''
A method to create a new Path Table Record.
Parameters:
name - The name for this Path Table Record.
Returns:
Nothing.
'''
if self._initialized:
raise pycdlibexception.PyCdlibInternalError('Path Table Record already initialized')
# Zero for the parent dir num is bogus, but that will get fixed later.
self._new(name, 0)
|
[
"def",
"new_dir",
"(",
"self",
",",
"name",
")",
":",
"# type: (bytes) -> None",
"if",
"self",
".",
"_initialized",
":",
"raise",
"pycdlibexception",
".",
"PyCdlibInternalError",
"(",
"'Path Table Record already initialized'",
")",
"# Zero for the parent dir num is bogus, but that will get fixed later.",
"self",
".",
"_new",
"(",
"name",
",",
"0",
")"
] |
A method to create a new Path Table Record.
Parameters:
name - The name for this Path Table Record.
Returns:
Nothing.
|
[
"A",
"method",
"to",
"create",
"a",
"new",
"Path",
"Table",
"Record",
"."
] |
1e7b77a809e905d67dc71e12d70e850be26b6233
|
https://github.com/clalancette/pycdlib/blob/1e7b77a809e905d67dc71e12d70e850be26b6233/pycdlib/path_table_record.py#L160-L174
|
train
|
clalancette/pycdlib
|
pycdlib/path_table_record.py
|
PathTableRecord.update_extent_location
|
def update_extent_location(self, extent_loc):
# type: (int) -> None
'''
A method to update the extent location for this Path Table Record.
Parameters:
extent_loc - The new extent location.
Returns:
Nothing.
'''
if not self._initialized:
raise pycdlibexception.PyCdlibInternalError('Path Table Record not yet initialized')
self.extent_location = extent_loc
|
python
|
def update_extent_location(self, extent_loc):
# type: (int) -> None
'''
A method to update the extent location for this Path Table Record.
Parameters:
extent_loc - The new extent location.
Returns:
Nothing.
'''
if not self._initialized:
raise pycdlibexception.PyCdlibInternalError('Path Table Record not yet initialized')
self.extent_location = extent_loc
|
[
"def",
"update_extent_location",
"(",
"self",
",",
"extent_loc",
")",
":",
"# type: (int) -> None",
"if",
"not",
"self",
".",
"_initialized",
":",
"raise",
"pycdlibexception",
".",
"PyCdlibInternalError",
"(",
"'Path Table Record not yet initialized'",
")",
"self",
".",
"extent_location",
"=",
"extent_loc"
] |
A method to update the extent location for this Path Table Record.
Parameters:
extent_loc - The new extent location.
Returns:
Nothing.
|
[
"A",
"method",
"to",
"update",
"the",
"extent",
"location",
"for",
"this",
"Path",
"Table",
"Record",
"."
] |
1e7b77a809e905d67dc71e12d70e850be26b6233
|
https://github.com/clalancette/pycdlib/blob/1e7b77a809e905d67dc71e12d70e850be26b6233/pycdlib/path_table_record.py#L176-L189
|
train
|
clalancette/pycdlib
|
pycdlib/path_table_record.py
|
PathTableRecord.update_parent_directory_number
|
def update_parent_directory_number(self, parent_dir_num):
# type: (int) -> None
'''
A method to update the parent directory number for this Path Table
Record from the directory record.
Parameters:
parent_dir_num - The new parent directory number to assign to this PTR.
Returns:
Nothing.
'''
if not self._initialized:
raise pycdlibexception.PyCdlibInternalError('Path Table Record not yet initialized')
self.parent_directory_num = parent_dir_num
|
python
|
def update_parent_directory_number(self, parent_dir_num):
# type: (int) -> None
'''
A method to update the parent directory number for this Path Table
Record from the directory record.
Parameters:
parent_dir_num - The new parent directory number to assign to this PTR.
Returns:
Nothing.
'''
if not self._initialized:
raise pycdlibexception.PyCdlibInternalError('Path Table Record not yet initialized')
self.parent_directory_num = parent_dir_num
|
[
"def",
"update_parent_directory_number",
"(",
"self",
",",
"parent_dir_num",
")",
":",
"# type: (int) -> None",
"if",
"not",
"self",
".",
"_initialized",
":",
"raise",
"pycdlibexception",
".",
"PyCdlibInternalError",
"(",
"'Path Table Record not yet initialized'",
")",
"self",
".",
"parent_directory_num",
"=",
"parent_dir_num"
] |
A method to update the parent directory number for this Path Table
Record from the directory record.
Parameters:
parent_dir_num - The new parent directory number to assign to this PTR.
Returns:
Nothing.
|
[
"A",
"method",
"to",
"update",
"the",
"parent",
"directory",
"number",
"for",
"this",
"Path",
"Table",
"Record",
"from",
"the",
"directory",
"record",
"."
] |
1e7b77a809e905d67dc71e12d70e850be26b6233
|
https://github.com/clalancette/pycdlib/blob/1e7b77a809e905d67dc71e12d70e850be26b6233/pycdlib/path_table_record.py#L191-L204
|
train
|
clalancette/pycdlib
|
pycdlib/path_table_record.py
|
PathTableRecord.equal_to_be
|
def equal_to_be(self, be_record):
# type: (PathTableRecord) -> bool
'''
A method to compare a little-endian path table record to its
big-endian counterpart. This is used to ensure that the ISO is sane.
Parameters:
be_record - The big-endian object to compare with the little-endian
object.
Returns:
True if this record is equal to the big-endian record passed in,
False otherwise.
'''
if not self._initialized:
raise pycdlibexception.PyCdlibInternalError('This Path Table Record is not yet initialized')
if be_record.len_di != self.len_di or \
be_record.xattr_length != self.xattr_length or \
utils.swab_32bit(be_record.extent_location) != self.extent_location or \
utils.swab_16bit(be_record.parent_directory_num) != self.parent_directory_num or \
be_record.directory_identifier != self.directory_identifier:
return False
return True
|
python
|
def equal_to_be(self, be_record):
# type: (PathTableRecord) -> bool
'''
A method to compare a little-endian path table record to its
big-endian counterpart. This is used to ensure that the ISO is sane.
Parameters:
be_record - The big-endian object to compare with the little-endian
object.
Returns:
True if this record is equal to the big-endian record passed in,
False otherwise.
'''
if not self._initialized:
raise pycdlibexception.PyCdlibInternalError('This Path Table Record is not yet initialized')
if be_record.len_di != self.len_di or \
be_record.xattr_length != self.xattr_length or \
utils.swab_32bit(be_record.extent_location) != self.extent_location or \
utils.swab_16bit(be_record.parent_directory_num) != self.parent_directory_num or \
be_record.directory_identifier != self.directory_identifier:
return False
return True
|
[
"def",
"equal_to_be",
"(",
"self",
",",
"be_record",
")",
":",
"# type: (PathTableRecord) -> bool",
"if",
"not",
"self",
".",
"_initialized",
":",
"raise",
"pycdlibexception",
".",
"PyCdlibInternalError",
"(",
"'This Path Table Record is not yet initialized'",
")",
"if",
"be_record",
".",
"len_di",
"!=",
"self",
".",
"len_di",
"or",
"be_record",
".",
"xattr_length",
"!=",
"self",
".",
"xattr_length",
"or",
"utils",
".",
"swab_32bit",
"(",
"be_record",
".",
"extent_location",
")",
"!=",
"self",
".",
"extent_location",
"or",
"utils",
".",
"swab_16bit",
"(",
"be_record",
".",
"parent_directory_num",
")",
"!=",
"self",
".",
"parent_directory_num",
"or",
"be_record",
".",
"directory_identifier",
"!=",
"self",
".",
"directory_identifier",
":",
"return",
"False",
"return",
"True"
] |
A method to compare a little-endian path table record to its
big-endian counterpart. This is used to ensure that the ISO is sane.
Parameters:
be_record - The big-endian object to compare with the little-endian
object.
Returns:
True if this record is equal to the big-endian record passed in,
False otherwise.
|
[
"A",
"method",
"to",
"compare",
"a",
"little",
"-",
"endian",
"path",
"table",
"record",
"to",
"its",
"big",
"-",
"endian",
"counterpart",
".",
"This",
"is",
"used",
"to",
"ensure",
"that",
"the",
"ISO",
"is",
"sane",
"."
] |
1e7b77a809e905d67dc71e12d70e850be26b6233
|
https://github.com/clalancette/pycdlib/blob/1e7b77a809e905d67dc71e12d70e850be26b6233/pycdlib/path_table_record.py#L206-L228
|
train
|
clalancette/pycdlib
|
pycdlib/utils.py
|
copy_data
|
def copy_data(data_length, blocksize, infp, outfp):
# type: (int, int, BinaryIO, BinaryIO) -> None
'''
A utility function to copy data from the input file object to the output
file object. This function will use the most efficient copy method available,
which is often sendfile.
Parameters:
data_length - The amount of data to copy.
blocksize - How much data to copy per iteration.
infp - The file object to copy data from.
outfp - The file object to copy data to.
Returns:
Nothing.
'''
use_sendfile = False
if have_sendfile:
# Python 3 implements the fileno method for all file-like objects, so
# we can't just use the existence of the method to tell whether it is
# available. Instead, we try to assign it, and if we fail, then we
# assume it is not available.
try:
x_unused = infp.fileno() # NOQA
y_unused = outfp.fileno() # NOQA
use_sendfile = True
except (AttributeError, io.UnsupportedOperation):
pass
if use_sendfile:
# This is one of those instances where using the file object and the
# file descriptor causes problems. The sendfile() call actually updates
# the underlying file descriptor, but the file object does not know
# about it. To get around this, we instead get the offset, allow
# sendfile() to update the offset, then manually seek the file object
# to the right location. This ensures that the file object gets updated
# properly.
in_offset = infp.tell()
out_offset = outfp.tell()
sendfile(outfp.fileno(), infp.fileno(), in_offset, data_length)
infp.seek(in_offset + data_length)
outfp.seek(out_offset + data_length)
else:
left = data_length
readsize = blocksize
while left > 0:
if left < readsize:
readsize = left
data = infp.read(readsize)
# We have seen ISOs in the wild (Tribes Vengeance 1of4.iso) that
# lie about the size of their files, causing reads to fail (since
# we hit EOF before the supposed end of the file). If we are using
# sendfile above, sendfile just silently returns as much data as it
# can, with no additional checking. We should do the same here, so
# if we got less data than we asked for, abort the loop silently.
data_len = len(data)
if data_len != readsize:
data_len = left
outfp.write(data)
left -= data_len
|
python
|
def copy_data(data_length, blocksize, infp, outfp):
# type: (int, int, BinaryIO, BinaryIO) -> None
'''
A utility function to copy data from the input file object to the output
file object. This function will use the most efficient copy method available,
which is often sendfile.
Parameters:
data_length - The amount of data to copy.
blocksize - How much data to copy per iteration.
infp - The file object to copy data from.
outfp - The file object to copy data to.
Returns:
Nothing.
'''
use_sendfile = False
if have_sendfile:
# Python 3 implements the fileno method for all file-like objects, so
# we can't just use the existence of the method to tell whether it is
# available. Instead, we try to assign it, and if we fail, then we
# assume it is not available.
try:
x_unused = infp.fileno() # NOQA
y_unused = outfp.fileno() # NOQA
use_sendfile = True
except (AttributeError, io.UnsupportedOperation):
pass
if use_sendfile:
# This is one of those instances where using the file object and the
# file descriptor causes problems. The sendfile() call actually updates
# the underlying file descriptor, but the file object does not know
# about it. To get around this, we instead get the offset, allow
# sendfile() to update the offset, then manually seek the file object
# to the right location. This ensures that the file object gets updated
# properly.
in_offset = infp.tell()
out_offset = outfp.tell()
sendfile(outfp.fileno(), infp.fileno(), in_offset, data_length)
infp.seek(in_offset + data_length)
outfp.seek(out_offset + data_length)
else:
left = data_length
readsize = blocksize
while left > 0:
if left < readsize:
readsize = left
data = infp.read(readsize)
# We have seen ISOs in the wild (Tribes Vengeance 1of4.iso) that
# lie about the size of their files, causing reads to fail (since
# we hit EOF before the supposed end of the file). If we are using
# sendfile above, sendfile just silently returns as much data as it
# can, with no additional checking. We should do the same here, so
# if we got less data than we asked for, abort the loop silently.
data_len = len(data)
if data_len != readsize:
data_len = left
outfp.write(data)
left -= data_len
|
[
"def",
"copy_data",
"(",
"data_length",
",",
"blocksize",
",",
"infp",
",",
"outfp",
")",
":",
"# type: (int, int, BinaryIO, BinaryIO) -> None",
"use_sendfile",
"=",
"False",
"if",
"have_sendfile",
":",
"# Python 3 implements the fileno method for all file-like objects, so",
"# we can't just use the existence of the method to tell whether it is",
"# available. Instead, we try to assign it, and if we fail, then we",
"# assume it is not available.",
"try",
":",
"x_unused",
"=",
"infp",
".",
"fileno",
"(",
")",
"# NOQA",
"y_unused",
"=",
"outfp",
".",
"fileno",
"(",
")",
"# NOQA",
"use_sendfile",
"=",
"True",
"except",
"(",
"AttributeError",
",",
"io",
".",
"UnsupportedOperation",
")",
":",
"pass",
"if",
"use_sendfile",
":",
"# This is one of those instances where using the file object and the",
"# file descriptor causes problems. The sendfile() call actually updates",
"# the underlying file descriptor, but the file object does not know",
"# about it. To get around this, we instead get the offset, allow",
"# sendfile() to update the offset, then manually seek the file object",
"# to the right location. This ensures that the file object gets updated",
"# properly.",
"in_offset",
"=",
"infp",
".",
"tell",
"(",
")",
"out_offset",
"=",
"outfp",
".",
"tell",
"(",
")",
"sendfile",
"(",
"outfp",
".",
"fileno",
"(",
")",
",",
"infp",
".",
"fileno",
"(",
")",
",",
"in_offset",
",",
"data_length",
")",
"infp",
".",
"seek",
"(",
"in_offset",
"+",
"data_length",
")",
"outfp",
".",
"seek",
"(",
"out_offset",
"+",
"data_length",
")",
"else",
":",
"left",
"=",
"data_length",
"readsize",
"=",
"blocksize",
"while",
"left",
">",
"0",
":",
"if",
"left",
"<",
"readsize",
":",
"readsize",
"=",
"left",
"data",
"=",
"infp",
".",
"read",
"(",
"readsize",
")",
"# We have seen ISOs in the wild (Tribes Vengeance 1of4.iso) that",
"# lie about the size of their files, causing reads to fail (since",
"# we hit EOF before the supposed end of the file). If we are using",
"# sendfile above, sendfile just silently returns as much data as it",
"# can, with no additional checking. We should do the same here, so",
"# if we got less data than we asked for, abort the loop silently.",
"data_len",
"=",
"len",
"(",
"data",
")",
"if",
"data_len",
"!=",
"readsize",
":",
"data_len",
"=",
"left",
"outfp",
".",
"write",
"(",
"data",
")",
"left",
"-=",
"data_len"
] |
A utility function to copy data from the input file object to the output
file object. This function will use the most efficient copy method available,
which is often sendfile.
Parameters:
data_length - The amount of data to copy.
blocksize - How much data to copy per iteration.
infp - The file object to copy data from.
outfp - The file object to copy data to.
Returns:
Nothing.
|
[
"A",
"utility",
"function",
"to",
"copy",
"data",
"from",
"the",
"input",
"file",
"object",
"to",
"the",
"output",
"file",
"object",
".",
"This",
"function",
"will",
"use",
"the",
"most",
"efficient",
"copy",
"method",
"available",
"which",
"is",
"often",
"sendfile",
"."
] |
1e7b77a809e905d67dc71e12d70e850be26b6233
|
https://github.com/clalancette/pycdlib/blob/1e7b77a809e905d67dc71e12d70e850be26b6233/pycdlib/utils.py#L93-L151
|
train
|
clalancette/pycdlib
|
pycdlib/utils.py
|
encode_space_pad
|
def encode_space_pad(instr, length, encoding):
# type: (bytes, int, str) -> bytes
'''
A function to pad out an input string with spaces to the length specified.
The space is first encoded into the specified encoding, then appended to
the input string until the length is reached.
Parameters:
instr - The input string to encode and pad.
length - The length to pad the input string to.
encoding - The encoding to use.
Returns:
The input string encoded in the encoding and padded with encoded spaces.
'''
output = instr.decode('utf-8').encode(encoding)
if len(output) > length:
raise pycdlibexception.PyCdlibInvalidInput('Input string too long!')
encoded_space = ' '.encode(encoding)
left = length - len(output)
while left > 0:
output += encoded_space
left -= len(encoded_space)
if left < 0:
output = output[:left]
return output
|
python
|
def encode_space_pad(instr, length, encoding):
# type: (bytes, int, str) -> bytes
'''
A function to pad out an input string with spaces to the length specified.
The space is first encoded into the specified encoding, then appended to
the input string until the length is reached.
Parameters:
instr - The input string to encode and pad.
length - The length to pad the input string to.
encoding - The encoding to use.
Returns:
The input string encoded in the encoding and padded with encoded spaces.
'''
output = instr.decode('utf-8').encode(encoding)
if len(output) > length:
raise pycdlibexception.PyCdlibInvalidInput('Input string too long!')
encoded_space = ' '.encode(encoding)
left = length - len(output)
while left > 0:
output += encoded_space
left -= len(encoded_space)
if left < 0:
output = output[:left]
return output
|
[
"def",
"encode_space_pad",
"(",
"instr",
",",
"length",
",",
"encoding",
")",
":",
"# type: (bytes, int, str) -> bytes",
"output",
"=",
"instr",
".",
"decode",
"(",
"'utf-8'",
")",
".",
"encode",
"(",
"encoding",
")",
"if",
"len",
"(",
"output",
")",
">",
"length",
":",
"raise",
"pycdlibexception",
".",
"PyCdlibInvalidInput",
"(",
"'Input string too long!'",
")",
"encoded_space",
"=",
"' '",
".",
"encode",
"(",
"encoding",
")",
"left",
"=",
"length",
"-",
"len",
"(",
"output",
")",
"while",
"left",
">",
"0",
":",
"output",
"+=",
"encoded_space",
"left",
"-=",
"len",
"(",
"encoded_space",
")",
"if",
"left",
"<",
"0",
":",
"output",
"=",
"output",
"[",
":",
"left",
"]",
"return",
"output"
] |
A function to pad out an input string with spaces to the length specified.
The space is first encoded into the specified encoding, then appended to
the input string until the length is reached.
Parameters:
instr - The input string to encode and pad.
length - The length to pad the input string to.
encoding - The encoding to use.
Returns:
The input string encoded in the encoding and padded with encoded spaces.
|
[
"A",
"function",
"to",
"pad",
"out",
"an",
"input",
"string",
"with",
"spaces",
"to",
"the",
"length",
"specified",
".",
"The",
"space",
"is",
"first",
"encoded",
"into",
"the",
"specified",
"encoding",
"then",
"appended",
"to",
"the",
"input",
"string",
"until",
"the",
"length",
"is",
"reached",
"."
] |
1e7b77a809e905d67dc71e12d70e850be26b6233
|
https://github.com/clalancette/pycdlib/blob/1e7b77a809e905d67dc71e12d70e850be26b6233/pycdlib/utils.py#L154-L182
|
train
|
clalancette/pycdlib
|
pycdlib/utils.py
|
gmtoffset_from_tm
|
def gmtoffset_from_tm(tm, local):
# type: (float, time.struct_time) -> int
'''
A function to compute the GMT offset from the time in seconds since the epoch
and the local time object.
Parameters:
tm - The time in seconds since the epoch.
local - The struct_time object representing the local time.
Returns:
The gmtoffset.
'''
gmtime = time.gmtime(tm)
tmpyear = gmtime.tm_year - local.tm_year
tmpyday = gmtime.tm_yday - local.tm_yday
tmphour = gmtime.tm_hour - local.tm_hour
tmpmin = gmtime.tm_min - local.tm_min
if tmpyday < 0:
tmpyday = -1
else:
if tmpyear > 0:
tmpyday = 1
return -(tmpmin + 60 * (tmphour + 24 * tmpyday)) // 15
|
python
|
def gmtoffset_from_tm(tm, local):
# type: (float, time.struct_time) -> int
'''
A function to compute the GMT offset from the time in seconds since the epoch
and the local time object.
Parameters:
tm - The time in seconds since the epoch.
local - The struct_time object representing the local time.
Returns:
The gmtoffset.
'''
gmtime = time.gmtime(tm)
tmpyear = gmtime.tm_year - local.tm_year
tmpyday = gmtime.tm_yday - local.tm_yday
tmphour = gmtime.tm_hour - local.tm_hour
tmpmin = gmtime.tm_min - local.tm_min
if tmpyday < 0:
tmpyday = -1
else:
if tmpyear > 0:
tmpyday = 1
return -(tmpmin + 60 * (tmphour + 24 * tmpyday)) // 15
|
[
"def",
"gmtoffset_from_tm",
"(",
"tm",
",",
"local",
")",
":",
"# type: (float, time.struct_time) -> int",
"gmtime",
"=",
"time",
".",
"gmtime",
"(",
"tm",
")",
"tmpyear",
"=",
"gmtime",
".",
"tm_year",
"-",
"local",
".",
"tm_year",
"tmpyday",
"=",
"gmtime",
".",
"tm_yday",
"-",
"local",
".",
"tm_yday",
"tmphour",
"=",
"gmtime",
".",
"tm_hour",
"-",
"local",
".",
"tm_hour",
"tmpmin",
"=",
"gmtime",
".",
"tm_min",
"-",
"local",
".",
"tm_min",
"if",
"tmpyday",
"<",
"0",
":",
"tmpyday",
"=",
"-",
"1",
"else",
":",
"if",
"tmpyear",
">",
"0",
":",
"tmpyday",
"=",
"1",
"return",
"-",
"(",
"tmpmin",
"+",
"60",
"*",
"(",
"tmphour",
"+",
"24",
"*",
"tmpyday",
")",
")",
"//",
"15"
] |
A function to compute the GMT offset from the time in seconds since the epoch
and the local time object.
Parameters:
tm - The time in seconds since the epoch.
local - The struct_time object representing the local time.
Returns:
The gmtoffset.
|
[
"A",
"function",
"to",
"compute",
"the",
"GMT",
"offset",
"from",
"the",
"time",
"in",
"seconds",
"since",
"the",
"epoch",
"and",
"the",
"local",
"time",
"object",
"."
] |
1e7b77a809e905d67dc71e12d70e850be26b6233
|
https://github.com/clalancette/pycdlib/blob/1e7b77a809e905d67dc71e12d70e850be26b6233/pycdlib/utils.py#L223-L246
|
train
|
clalancette/pycdlib
|
pycdlib/utils.py
|
zero_pad
|
def zero_pad(fp, data_size, pad_size):
# type: (BinaryIO, int, int) -> None
'''
A function to write padding out from data_size up to pad_size
efficiently.
Parameters:
fp - The file object to use to write padding out to.
data_size - The current size of the data.
pad_size - The boundary size of data to pad out to.
Returns:
Nothing.
'''
padbytes = pad_size - (data_size % pad_size)
if padbytes == pad_size:
# Nothing to pad, get out.
return
fp.seek(padbytes - 1, os.SEEK_CUR)
fp.write(b'\x00')
|
python
|
def zero_pad(fp, data_size, pad_size):
# type: (BinaryIO, int, int) -> None
'''
A function to write padding out from data_size up to pad_size
efficiently.
Parameters:
fp - The file object to use to write padding out to.
data_size - The current size of the data.
pad_size - The boundary size of data to pad out to.
Returns:
Nothing.
'''
padbytes = pad_size - (data_size % pad_size)
if padbytes == pad_size:
# Nothing to pad, get out.
return
fp.seek(padbytes - 1, os.SEEK_CUR)
fp.write(b'\x00')
|
[
"def",
"zero_pad",
"(",
"fp",
",",
"data_size",
",",
"pad_size",
")",
":",
"# type: (BinaryIO, int, int) -> None",
"padbytes",
"=",
"pad_size",
"-",
"(",
"data_size",
"%",
"pad_size",
")",
"if",
"padbytes",
"==",
"pad_size",
":",
"# Nothing to pad, get out.",
"return",
"fp",
".",
"seek",
"(",
"padbytes",
"-",
"1",
",",
"os",
".",
"SEEK_CUR",
")",
"fp",
".",
"write",
"(",
"b'\\x00'",
")"
] |
A function to write padding out from data_size up to pad_size
efficiently.
Parameters:
fp - The file object to use to write padding out to.
data_size - The current size of the data.
pad_size - The boundary size of data to pad out to.
Returns:
Nothing.
|
[
"A",
"function",
"to",
"write",
"padding",
"out",
"from",
"data_size",
"up",
"to",
"pad_size",
"efficiently",
"."
] |
1e7b77a809e905d67dc71e12d70e850be26b6233
|
https://github.com/clalancette/pycdlib/blob/1e7b77a809e905d67dc71e12d70e850be26b6233/pycdlib/utils.py#L249-L268
|
train
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.