Code
stringlengths 103
85.9k
| Summary
sequencelengths 0
94
|
---|---|
Please provide a description of the function:def get_shares(self, path='', **kwargs):
if not (isinstance(path, six.string_types)):
return None
data = 'shares'
if path != '':
data += '?'
path = self._encode_string(self._normalize_path(path))
args = {'path': path}
reshares = kwargs.get('reshares', False)
if isinstance(reshares, bool) and reshares:
args['reshares'] = reshares
subfiles = kwargs.get('subfiles', False)
if isinstance(subfiles, bool) and subfiles:
args['subfiles'] = str(subfiles).lower()
shared_with_me = kwargs.get('shared_with_me', False)
if isinstance(shared_with_me, bool) and shared_with_me:
args['shared_with_me'] = "true"
del args['path']
data += parse.urlencode(args)
res = self._make_ocs_request(
'GET',
self.OCS_SERVICE_SHARE,
data
)
if res.status_code == 200:
tree = ET.fromstring(res.content)
self._check_ocs_status(tree)
shares = []
for element in tree.find('data').iter('element'):
'''share_attr = {}
for child in element:
key = child.tag
value = child.text
share_attr[key] = value
shares.append(share_attr)'''
shares.append(self._get_shareinfo(element))
return shares
raise HTTPResponseError(res) | [
"Returns array of shares\n\n :param path: path to the share to be checked\n :param reshares: (optional, boolean) returns not only the shares from\n the current user but all shares from the given file (default: False)\n :param subfiles: (optional, boolean) returns all shares within\n a folder, given that path defines a folder (default: False)\n :param shared_with_me: (optional, boolean) returns all shares which are\n shared with me (default: False)\n :returns: array of shares ShareInfo instances or empty array if the operation failed\n :raises: HTTPResponseError in case an HTTP error status was returned\n "
] |
Please provide a description of the function:def create_user(self, user_name, initial_password):
res = self._make_ocs_request(
'POST',
self.OCS_SERVICE_CLOUD,
'users',
data={'password': initial_password, 'userid': user_name}
)
# We get 200 when the user was just created.
if res.status_code == 200:
tree = ET.fromstring(res.content)
self._check_ocs_status(tree, [100])
return True
raise HTTPResponseError(res) | [
"Create a new user with an initial password via provisioning API.\n It is not an error, if the user already existed before.\n If you get back an error 999, then the provisioning API is not enabled.\n\n :param user_name: name of user to be created\n :param initial_password: password for user being created\n :returns: True on success\n :raises: HTTPResponseError in case an HTTP error status was returned\n\n "
] |
Please provide a description of the function:def delete_user(self, user_name):
res = self._make_ocs_request(
'DELETE',
self.OCS_SERVICE_CLOUD,
'users/' + user_name
)
# We get 200 when the user was deleted.
if res.status_code == 200:
return True
raise HTTPResponseError(res) | [
"Deletes a user via provisioning API.\n If you get back an error 999, then the provisioning API is not enabled.\n\n :param user_name: name of user to be deleted\n :returns: True on success\n :raises: HTTPResponseError in case an HTTP error status was returned\n\n "
] |
Please provide a description of the function:def search_users(self, user_name):
action_path = 'users'
if user_name:
action_path += '?search={}'.format(user_name)
res = self._make_ocs_request(
'GET',
self.OCS_SERVICE_CLOUD,
action_path
)
if res.status_code == 200:
tree = ET.fromstring(res.content)
users = [x.text for x in tree.findall('data/users/element')]
return users
raise HTTPResponseError(res) | [
"Searches for users via provisioning API.\n If you get back an error 999, then the provisioning API is not enabled.\n\n :param user_name: name of user to be searched for\n :returns: list of usernames that contain user_name as substring\n :raises: HTTPResponseError in case an HTTP error status was returned\n\n "
] |
Please provide a description of the function:def set_user_attribute(self, user_name, key, value):
res = self._make_ocs_request(
'PUT',
self.OCS_SERVICE_CLOUD,
'users/' + parse.quote(user_name),
data={'key': self._encode_string(key),
'value': self._encode_string(value)}
)
if res.status_code == 200:
tree = ET.fromstring(res.content)
self._check_ocs_status(tree, [100])
return True
raise HTTPResponseError(res) | [
"Sets a user attribute\n\n :param user_name: name of user to modify\n :param key: key of the attribute to set\n :param value: value to set\n :returns: True if the operation succeeded, False otherwise\n :raises: HTTPResponseError in case an HTTP error status was returned\n "
] |
Please provide a description of the function:def add_user_to_group(self, user_name, group_name):
res = self._make_ocs_request(
'POST',
self.OCS_SERVICE_CLOUD,
'users/' + user_name + '/groups',
data={'groupid': group_name}
)
if res.status_code == 200:
tree = ET.fromstring(res.content)
self._check_ocs_status(tree, [100])
return True
raise HTTPResponseError(res) | [
"Adds a user to a group.\n\n :param user_name: name of user to be added\n :param group_name: name of group user is to be added to\n :returns: True if user added\n :raises: HTTPResponseError in case an HTTP error status was returned\n\n "
] |
Please provide a description of the function:def get_user_groups(self, user_name):
res = self._make_ocs_request(
'GET',
self.OCS_SERVICE_CLOUD,
'users/' + user_name + '/groups',
)
if res.status_code == 200:
tree = ET.fromstring(res.content)
self._check_ocs_status(tree, [100])
return [group.text for group in tree.find('data/groups')]
raise HTTPResponseError(res) | [
"Get a list of groups associated to a user.\n\n :param user_name: name of user to list groups\n :returns: list of groups\n :raises: HTTPResponseError in case an HTTP error status was returned\n\n "
] |
Please provide a description of the function:def get_user(self, user_name):
res = self._make_ocs_request(
'GET',
self.OCS_SERVICE_CLOUD,
'users/' + parse.quote(user_name),
data={}
)
tree = ET.fromstring(res.content)
self._check_ocs_status(tree)
# <ocs><meta><statuscode>100</statuscode><status>ok</status></meta>
# <data>
# <email>[email protected]</email><quota>0</quota><enabled>true</enabled>
# </data>
# </ocs>
data_element = tree.find('data')
return self._xml_to_dict(data_element) | [
"Retrieves information about a user\n\n :param user_name: name of user to query\n\n :returns: Dictionary of information about user\n :raises: ResponseError in case an HTTP error status was returned\n "
] |
Please provide a description of the function:def get_user_subadmin_groups(self, user_name):
res = self._make_ocs_request(
'GET',
self.OCS_SERVICE_CLOUD,
'users/' + user_name + '/subadmins',
)
if res.status_code == 200:
tree = ET.fromstring(res.content)
self._check_ocs_status(tree, [100])
groups = tree.find('data')
return groups
raise HTTPResponseError(res) | [
"Get a list of subadmin groups associated to a user.\n\n :param user_name: name of user\n :returns: list of subadmin groups\n :raises: HTTPResponseError in case an HTTP error status was returned\n\n "
] |
Please provide a description of the function:def share_file_with_user(self, path, user, **kwargs):
remote_user = kwargs.get('remote_user', False)
perms = kwargs.get('perms', self.OCS_PERMISSION_READ)
if (((not isinstance(perms, int)) or (perms > self.OCS_PERMISSION_ALL))
or ((not isinstance(user, six.string_types)) or (user == ''))):
return False
if remote_user and (not user.endswith('/')):
user = user + '/'
path = self._normalize_path(path)
post_data = {
'shareType': self.OCS_SHARE_TYPE_REMOTE if remote_user else
self.OCS_SHARE_TYPE_USER,
'shareWith': user,
'path': self._encode_string(path),
'permissions': perms
}
res = self._make_ocs_request(
'POST',
self.OCS_SERVICE_SHARE,
'shares',
data=post_data
)
if self._debug:
print('OCS share_file request for file %s with permissions %i '
'returned: %i' % (path, perms, res.status_code))
if res.status_code == 200:
tree = ET.fromstring(res.content)
self._check_ocs_status(tree)
data_el = tree.find('data')
return ShareInfo(
{
'id': data_el.find('id').text,
'path': path,
'permissions': perms
}
)
raise HTTPResponseError(res) | [
"Shares a remote file with specified user\n\n :param path: path to the remote file to share\n :param user: name of the user whom we want to share a file/folder\n :param perms (optional): permissions of the shared object\n defaults to read only (1)\n http://doc.owncloud.org/server/6.0/admin_manual/sharing_api/index.html\n :param remote_user (optional): True if it is a federated users\n defaults to False if it is a local user\n :returns: instance of :class:`ShareInfo` with the share info\n or False if the operation failed\n :raises: HTTPResponseError in case an HTTP error status was returned\n "
] |
Please provide a description of the function:def delete_group(self, group_name):
res = self._make_ocs_request(
'DELETE',
self.OCS_SERVICE_CLOUD,
'groups/' + group_name
)
# We get 200 when the group was just deleted.
if res.status_code == 200:
return True
raise HTTPResponseError(res) | [
"Delete a group via provisioning API.\n If you get back an error 999, then the provisioning API is not enabled.\n\n :param group_name: name of group to be deleted\n :returns: True if group deleted\n :raises: HTTPResponseError in case an HTTP error status was returned\n\n "
] |
Please provide a description of the function:def get_groups(self):
res = self._make_ocs_request(
'GET',
self.OCS_SERVICE_CLOUD,
'groups'
)
if res.status_code == 200:
tree = ET.fromstring(res.content)
groups = [x.text for x in tree.findall('data/groups/element')]
return groups
raise HTTPResponseError(res) | [
"Get groups via provisioning API.\n If you get back an error 999, then the provisioning API is not enabled.\n\n :returns: list of groups\n :raises: HTTPResponseError in case an HTTP error status was returned\n\n "
] |
Please provide a description of the function:def get_group_members(self, group_name):
res = self._make_ocs_request(
'GET',
self.OCS_SERVICE_CLOUD,
'groups/' + group_name
)
if res.status_code == 200:
tree = ET.fromstring(res.content)
self._check_ocs_status(tree, [100])
return [group.text for group in tree.find('data/users')]
raise HTTPResponseError(res) | [
"Get group members via provisioning API.\n If you get back an error 999, then the provisioning API is not enabled.\n\n :param group_name: name of group to list members\n :returns: list of group members\n :raises: HTTPResponseError in case an HTTP error status was returned\n\n "
] |
Please provide a description of the function:def group_exists(self, group_name):
res = self._make_ocs_request(
'GET',
self.OCS_SERVICE_CLOUD,
'groups?search=' + group_name
)
if res.status_code == 200:
tree = ET.fromstring(res.content)
for code_el in tree.findall('data/groups/element'):
if code_el is not None and code_el.text == group_name:
return True
return False
raise HTTPResponseError(res) | [
"Checks a group via provisioning API.\n If you get back an error 999, then the provisioning API is not enabled.\n\n :param group_name: name of group to be checked\n :returns: True if group exists\n :raises: HTTPResponseError in case an HTTP error status was returned\n\n "
] |
Please provide a description of the function:def share_file_with_group(self, path, group, **kwargs):
perms = kwargs.get('perms', self.OCS_PERMISSION_READ)
if (((not isinstance(perms, int)) or (perms > self.OCS_PERMISSION_ALL))
or ((not isinstance(group, six.string_types)) or (group == ''))):
return False
path = self._normalize_path(path)
post_data = {'shareType': self.OCS_SHARE_TYPE_GROUP,
'shareWith': group,
'path': path,
'permissions': perms}
res = self._make_ocs_request(
'POST',
self.OCS_SERVICE_SHARE,
'shares',
data=post_data
)
if res.status_code == 200:
tree = ET.fromstring(res.content)
self._check_ocs_status(tree)
data_el = tree.find('data')
return ShareInfo(
{
'id': data_el.find('id').text,
'path': path,
'permissions': perms
}
)
raise HTTPResponseError(res) | [
"Shares a remote file with specified group\n\n :param path: path to the remote file to share\n :param group: name of the group with which we want to share a file/folder\n :param perms (optional): permissions of the shared object\n defaults to read only (1)\n http://doc.owncloud.org/server/6.0/admin_manual/sharing_api/index.html\n :returns: instance of :class:`ShareInfo` with the share info\n or False if the operation failed\n :raises: HTTPResponseError in case an HTTP error status was returned\n "
] |
Please provide a description of the function:def get_config(self):
path = 'config'
res = self._make_ocs_request(
'GET',
'',
path
)
if res.status_code == 200:
tree = ET.fromstring(res.content)
self._check_ocs_status(tree)
values = []
element = tree.find('data')
if element is not None:
keys = ['version', 'website', 'host', 'contact', 'ssl']
for key in keys:
text = element.find(key).text or ''
values.append(text)
return zip(keys, values)
else:
return None
raise HTTPResponseError(res) | [
"Returns ownCloud config information\n :returns: array of tuples (key, value) for each information\n e.g. [('version', '1.7'), ('website', 'ownCloud'), ('host', 'cloud.example.com'),\n ('contact', ''), ('ssl', 'false')]\n :raises: HTTPResponseError in case an HTTP error status was returned\n "
] |
Please provide a description of the function:def get_attribute(self, app=None, key=None):
path = 'getattribute'
if app is not None:
path += '/' + parse.quote(app, '')
if key is not None:
path += '/' + parse.quote(self._encode_string(key), '')
res = self._make_ocs_request(
'GET',
self.OCS_SERVICE_PRIVATEDATA,
path
)
if res.status_code == 200:
tree = ET.fromstring(res.content)
self._check_ocs_status(tree)
values = []
for element in tree.find('data').iter('element'):
app_text = element.find('app').text
key_text = element.find('key').text
value_text = element.find('value').text or ''
if key is None:
if app is None:
values.append((app_text, key_text, value_text))
else:
values.append((key_text, value_text))
else:
return value_text
if len(values) == 0 and key is not None:
return None
return values
raise HTTPResponseError(res) | [
"Returns an application attribute\n\n :param app: application id\n :param key: attribute key or None to retrieve all values for the\n given application\n :returns: attribute value if key was specified, or an array of tuples\n (key, value) for each attribute\n :raises: HTTPResponseError in case an HTTP error status was returned\n "
] |
Please provide a description of the function:def set_attribute(self, app, key, value):
path = 'setattribute/' + parse.quote(app, '') + '/' + parse.quote(
self._encode_string(key), '')
res = self._make_ocs_request(
'POST',
self.OCS_SERVICE_PRIVATEDATA,
path,
data={'value': self._encode_string(value)}
)
if res.status_code == 200:
tree = ET.fromstring(res.content)
self._check_ocs_status(tree)
return True
raise HTTPResponseError(res) | [
"Sets an application attribute\n\n :param app: application id\n :param key: key of the attribute to set\n :param value: value to set\n :returns: True if the operation succeeded, False otherwise\n :raises: HTTPResponseError in case an HTTP error status was returned\n "
] |
Please provide a description of the function:def get_apps(self):
ena_apps = {}
res = self._make_ocs_request('GET', self.OCS_SERVICE_CLOUD, 'apps')
if res.status_code != 200:
raise HTTPResponseError(res)
tree = ET.fromstring(res.content)
self._check_ocs_status(tree)
# <data><apps><element>files</element><element>activity</element> ...
for el in tree.findall('data/apps/element'):
ena_apps[el.text] = False
res = self._make_ocs_request('GET', self.OCS_SERVICE_CLOUD,
'apps?filter=enabled')
if res.status_code != 200:
raise HTTPResponseError(res)
tree = ET.fromstring(res.content)
self._check_ocs_status(tree)
for el in tree.findall('data/apps/element'):
ena_apps[el.text] = True
return ena_apps | [
" List all enabled apps through the provisioning api.\n\n :returns: a dict of apps, with values True/False, representing the enabled state.\n :raises: HTTPResponseError in case an HTTP error status was returned\n "
] |
Please provide a description of the function:def enable_app(self, appname):
res = self._make_ocs_request('POST', self.OCS_SERVICE_CLOUD,
'apps/' + appname)
if res.status_code == 200:
return True
raise HTTPResponseError(res) | [
"Enable an app through provisioning_api\n\n :param appname: Name of app to be enabled\n :returns: True if the operation succeeded, False otherwise\n :raises: HTTPResponseError in case an HTTP error status was returned\n\n "
] |
Please provide a description of the function:def _normalize_path(path):
if isinstance(path, FileInfo):
path = path.path
if len(path) == 0:
return '/'
if not path.startswith('/'):
path = '/' + path
return path | [
"Makes sure the path starts with a \"/\"\n "
] |
Please provide a description of the function:def _encode_string(s):
if six.PY2 and isinstance(s, unicode):
return s.encode('utf-8')
return s | [
"Encodes a unicode instance to utf-8. If a str is passed it will\n simply be returned\n\n :param s: str or unicode to encode\n :returns: encoded output as str\n "
] |
Please provide a description of the function:def _check_ocs_status(tree, accepted_codes=[100]):
code_el = tree.find('meta/statuscode')
if code_el is not None and int(code_el.text) not in accepted_codes:
r = requests.Response()
msg_el = tree.find('meta/message')
if msg_el is None:
msg_el = tree # fallback to the entire ocs response, if we find no message.
r._content = ET.tostring(msg_el)
r.status_code = int(code_el.text)
raise OCSResponseError(r) | [
"Checks the status code of an OCS request\n\n :param tree: response parsed with elementtree\n :param accepted_codes: list of statuscodes we consider good. E.g. [100,102] can be used to accept a POST\n returning an 'already exists' condition\n :raises: HTTPResponseError if the http status is not 200, or OCSResponseError if the OCS status is not one of the accepted_codes.\n "
] |
Please provide a description of the function:def make_ocs_request(self, method, service, action, **kwargs):
accepted_codes = kwargs.pop('accepted_codes', [100])
res = self._make_ocs_request(method, service, action, **kwargs)
if res.status_code == 200:
tree = ET.fromstring(res.content)
self._check_ocs_status(tree, accepted_codes=accepted_codes)
return res
raise OCSResponseError(res) | [
"Makes a OCS API request and analyses the response\n\n :param method: HTTP method\n :param service: service name\n :param action: action path\n :param \\*\\*kwargs: optional arguments that ``requests.Request.request`` accepts\n :returns :class:`requests.Response` instance\n "
] |
Please provide a description of the function:def _make_ocs_request(self, method, service, action, **kwargs):
slash = ''
if service:
slash = '/'
path = self.OCS_BASEPATH + service + slash + action
attributes = kwargs.copy()
if 'headers' not in attributes:
attributes['headers'] = {}
attributes['headers']['OCS-APIREQUEST'] = 'true'
if self._debug:
print('OCS request: %s %s %s' % (method, self.url + path,
attributes))
res = self._session.request(method, self.url + path, **attributes)
return res | [
"Makes a OCS API request\n\n :param method: HTTP method\n :param service: service name\n :param action: action path\n :param \\*\\*kwargs: optional arguments that ``requests.Request.request`` accepts\n :returns :class:`requests.Response` instance\n "
] |
Please provide a description of the function:def _make_dav_request(self, method, path, **kwargs):
if self._debug:
print('DAV request: %s %s' % (method, path))
if kwargs.get('headers'):
print('Headers: ', kwargs.get('headers'))
path = self._normalize_path(path)
res = self._session.request(
method,
self._webdav_url + parse.quote(self._encode_string(path)),
**kwargs
)
if self._debug:
print('DAV status: %i' % res.status_code)
if res.status_code in [200, 207]:
return self._parse_dav_response(res)
if res.status_code in [204, 201]:
return True
raise HTTPResponseError(res) | [
"Makes a WebDAV request\n\n :param method: HTTP method\n :param path: remote path of the targetted file\n :param \\*\\*kwargs: optional arguments that ``requests.Request.request`` accepts\n :returns array of :class:`FileInfo` if the response\n contains it, or True if the operation succeded, False\n if it didn't\n "
] |
Please provide a description of the function:def _parse_dav_response(self, res):
if res.status_code == 207:
tree = ET.fromstring(res.content)
items = []
for child in tree:
items.append(self._parse_dav_element(child))
return items
return False | [
"Parses the DAV responses from a multi-status response\n\n :param res: DAV response\n :returns array of :class:`FileInfo` or False if\n the operation did not succeed\n "
] |
Please provide a description of the function:def _parse_dav_element(self, dav_response):
href = parse.unquote(
self._strip_dav_path(dav_response.find('{DAV:}href').text)
)
if six.PY2:
href = href.decode('utf-8')
file_type = 'file'
if href[-1] == '/':
file_type = 'dir'
file_attrs = {}
attrs = dav_response.find('{DAV:}propstat')
attrs = attrs.find('{DAV:}prop')
for attr in attrs:
file_attrs[attr.tag] = attr.text
return FileInfo(href, file_type, file_attrs) | [
"Parses a single DAV element\n\n :param dav_response: DAV response\n :returns :class:`FileInfo`\n "
] |
Please provide a description of the function:def _strip_dav_path(self, path):
if path.startswith(self._davpath):
return path[len(self._davpath):]
return path | [
"Removes the leading \"remote.php/webdav\" path from the given path\n\n :param path: path containing the remote DAV path \"remote.php/webdav\"\n :returns: path stripped of the remote DAV path\n "
] |
Please provide a description of the function:def _webdav_move_copy(self, remote_path_source, remote_path_target,
operation):
if operation != "MOVE" and operation != "COPY":
return False
if remote_path_target[-1] == '/':
remote_path_target += os.path.basename(remote_path_source)
if not (remote_path_target[0] == '/'):
remote_path_target = '/' + remote_path_target
remote_path_source = self._normalize_path(remote_path_source)
headers = {
'Destination': self._webdav_url + parse.quote(
self._encode_string(remote_path_target))
}
return self._make_dav_request(
operation,
remote_path_source,
headers=headers
) | [
"Copies or moves a remote file or directory\n\n :param remote_path_source: source file or folder to copy / move\n :param remote_path_target: target file to which to copy / move\n :param operation: MOVE or COPY\n\n :returns: True if the operation succeeded, False otherwise\n :raises: HTTPResponseError in case an HTTP error status was returned\n "
] |
Please provide a description of the function:def _xml_to_dict(self, element):
return_dict = {}
for el in element:
return_dict[el.tag] = None
children = el.getchildren()
if children:
return_dict[el.tag] = self._xml_to_dict(children)
else:
return_dict[el.tag] = el.text
return return_dict | [
"\n Take an XML element, iterate over it and build a dict\n\n :param element: An xml.etree.ElementTree.Element, or a list of the same\n :returns: A dictionary\n "
] |
Please provide a description of the function:def _get_shareinfo(self, data_el):
if (data_el is None) or not (isinstance(data_el, ET.Element)):
return None
return ShareInfo(self._xml_to_dict(data_el)) | [
"Simple helper which returns instance of ShareInfo class\n\n :param data_el: 'data' element extracted from _make_ocs_request\n :returns: instance of ShareInfo class\n "
] |
Please provide a description of the function:def emit(self, *args, **kwargs):
if self._block:
return
for slot in self._slots:
if not slot:
continue
elif isinstance(slot, partial):
slot()
elif isinstance(slot, weakref.WeakKeyDictionary):
# For class methods, get the class object and call the method accordingly.
for obj, method in slot.items():
method(obj, *args, **kwargs)
elif isinstance(slot, weakref.ref):
# If it's a weakref, call the ref to get the instance and then call the func
# Don't wrap in try/except so we don't risk masking exceptions from the actual func call
if (slot() is not None):
slot()(*args, **kwargs)
else:
# Else call it in a standard way. Should be just lambdas at this point
slot(*args, **kwargs) | [
"\n Calls all the connected slots with the provided args and kwargs unless block is activated\n "
] |
Please provide a description of the function:def connect(self, slot):
if not callable(slot):
raise ValueError("Connection to non-callable '%s' object failed" % slot.__class__.__name__)
if (isinstance(slot, partial) or '<' in slot.__name__):
# If it's a partial or a lambda. The '<' check is the only py2 and py3 compatible way I could find
if slot not in self._slots:
self._slots.append(slot)
elif inspect.ismethod(slot):
# Check if it's an instance method and store it with the instance as the key
slotSelf = slot.__self__
slotDict = weakref.WeakKeyDictionary()
slotDict[slotSelf] = slot.__func__
if slotDict not in self._slots:
self._slots.append(slotDict)
else:
# If it's just a function then just store it as a weakref.
newSlotRef = weakref.ref(slot)
if newSlotRef not in self._slots:
self._slots.append(newSlotRef) | [
"\n Connects the signal to any callable object\n "
] |
Please provide a description of the function:def disconnect(self, slot):
if not callable(slot):
return
if inspect.ismethod(slot):
# If it's a method, then find it by its instance
slotSelf = slot.__self__
for s in self._slots:
if isinstance(s, weakref.WeakKeyDictionary) and (slotSelf in s) and (s[slotSelf] is slot.__func__):
self._slots.remove(s)
break
elif isinstance(slot, partial) or '<' in slot.__name__:
# If it's a partial or lambda, try to remove directly
try:
self._slots.remove(slot)
except ValueError:
pass
else:
# It's probably a function, so try to remove by weakref
try:
self._slots.remove(weakref.ref(slot))
except ValueError:
pass | [
"\n Disconnects the slot from the signal\n "
] |
Please provide a description of the function:def register(self, name, *slots):
# setdefault initializes the object even if it exists. This is more efficient
if name not in self:
self[name] = Signal()
for slot in slots:
self[name].connect(slot) | [
"\n Registers a given signal\n :param name: the signal to register\n "
] |
Please provide a description of the function:def emit(self, signalName, *args, **kwargs):
assert signalName in self, "%s is not a registered signal" % signalName
self[signalName].emit(*args, **kwargs) | [
"\n Emits a signal by name if it exists. Any additional args or kwargs are passed to the signal\n :param signalName: the signal name to emit\n "
] |
Please provide a description of the function:def connect(self, signalName, slot):
assert signalName in self, "%s is not a registered signal" % signalName
self[signalName].connect(slot) | [
"\n Connects a given signal to a given slot\n :param signalName: the signal name to connect to\n :param slot: the callable slot to register\n "
] |
Please provide a description of the function:def block(self, signals=None, isBlocked=True):
if signals:
try:
if isinstance(signals, basestring):
signals = [signals]
except NameError:
if isinstance(signals, str):
signals = [signals]
signals = signals or self.keys()
for signal in signals:
if signal not in self:
raise RuntimeError("Could not find signal matching %s" % signal)
self[signal].block(isBlocked) | [
"\n Sets the block on any provided signals, or to all signals\n\n :param signals: defaults to all signals. Accepts either a single string or a list of strings\n :param isBlocked: the state to set the signal to\n "
] |
Please provide a description of the function:def _open(file_or_str, **kwargs):
'''Either open a file handle, or use an existing file-like object.
This will behave as the `open` function if `file_or_str` is a string.
If `file_or_str` has the `read` attribute, it will return `file_or_str`.
Otherwise, an `IOError` is raised.
'''
if hasattr(file_or_str, 'read'):
yield file_or_str
elif isinstance(file_or_str, six.string_types):
with open(file_or_str, **kwargs) as file_desc:
yield file_desc
else:
raise IOError('Invalid file-or-str object: {}'.format(file_or_str)) | [] |
Please provide a description of the function:def load_delimited(filename, converters, delimiter=r'\s+'):
r
# Initialize list of empty lists
n_columns = len(converters)
columns = tuple(list() for _ in range(n_columns))
# Create re object for splitting lines
splitter = re.compile(delimiter)
# Note: we do io manually here for two reasons.
# 1. The csv module has difficulties with unicode, which may lead
# to failures on certain annotation strings
#
# 2. numpy's text loader does not handle non-numeric data
#
with _open(filename, mode='r') as input_file:
for row, line in enumerate(input_file, 1):
# Split each line using the supplied delimiter
data = splitter.split(line.strip(), n_columns - 1)
# Throw a helpful error if we got an unexpected # of columns
if n_columns != len(data):
raise ValueError('Expected {} columns, got {} at '
'{}:{:d}:\n\t{}'.format(n_columns, len(data),
filename, row, line))
for value, column, converter in zip(data, columns, converters):
# Try converting the value, throw a helpful error on failure
try:
converted_value = converter(value)
except:
raise ValueError("Couldn't convert value {} using {} "
"found at {}:{:d}:\n\t{}".format(
value, converter.__name__, filename,
row, line))
column.append(converted_value)
# Sane output
if n_columns == 1:
return columns[0]
else:
return columns | [
"Utility function for loading in data from an annotation file where columns\n are delimited. The number of columns is inferred from the length of\n the provided converters list.\n\n Examples\n --------\n >>> # Load in a one-column list of event times (floats)\n >>> load_delimited('events.txt', [float])\n >>> # Load in a list of labeled events, separated by commas\n >>> load_delimited('labeled_events.csv', [float, str], ',')\n\n Parameters\n ----------\n filename : str\n Path to the annotation file\n converters : list of functions\n Each entry in column ``n`` of the file will be cast by the function\n ``converters[n]``.\n delimiter : str\n Separator regular expression.\n By default, lines will be split by any amount of whitespace.\n\n Returns\n -------\n columns : tuple of lists\n Each list in this tuple corresponds to values in one of the columns\n in the file.\n\n "
] |
Please provide a description of the function:def load_events(filename, delimiter=r'\s+'):
r
# Use our universal function to load in the events
events = load_delimited(filename, [float], delimiter)
events = np.array(events)
# Validate them, but throw a warning in place of an error
try:
util.validate_events(events)
except ValueError as error:
warnings.warn(error.args[0])
return events | [
"Import time-stamp events from an annotation file. The file should\n consist of a single column of numeric values corresponding to the event\n times. This is primarily useful for processing events which lack duration,\n such as beats or onsets.\n\n Parameters\n ----------\n filename : str\n Path to the annotation file\n delimiter : str\n Separator regular expression.\n By default, lines will be split by any amount of whitespace.\n\n Returns\n -------\n event_times : np.ndarray\n array of event times (float)\n\n "
] |
Please provide a description of the function:def load_labeled_events(filename, delimiter=r'\s+'):
r
# Use our universal function to load in the events
events, labels = load_delimited(filename, [float, str], delimiter)
events = np.array(events)
# Validate them, but throw a warning in place of an error
try:
util.validate_events(events)
except ValueError as error:
warnings.warn(error.args[0])
return events, labels | [
"Import labeled time-stamp events from an annotation file. The file should\n consist of two columns; the first having numeric values corresponding to\n the event times and the second having string labels for each event. This\n is primarily useful for processing labeled events which lack duration, such\n as beats with metric beat number or onsets with an instrument label.\n\n Parameters\n ----------\n filename : str\n Path to the annotation file\n delimiter : str\n Separator regular expression.\n By default, lines will be split by any amount of whitespace.\n\n Returns\n -------\n event_times : np.ndarray\n array of event times (float)\n labels : list of str\n list of labels\n\n "
] |
Please provide a description of the function:def load_labeled_intervals(filename, delimiter=r'\s+'):
r
# Use our universal function to load in the events
starts, ends, labels = load_delimited(filename, [float, float, str],
delimiter)
# Stack into an interval matrix
intervals = np.array([starts, ends]).T
# Validate them, but throw a warning in place of an error
try:
util.validate_intervals(intervals)
except ValueError as error:
warnings.warn(error.args[0])
return intervals, labels | [
"Import labeled intervals from an annotation file. The file should consist\n of three columns: Two consisting of numeric values corresponding to start\n and end time of each interval and a third corresponding to the label of\n each interval. This is primarily useful for processing events which span a\n duration, such as segmentation, chords, or instrument activation.\n\n Parameters\n ----------\n filename : str\n Path to the annotation file\n delimiter : str\n Separator regular expression.\n By default, lines will be split by any amount of whitespace.\n\n Returns\n -------\n intervals : np.ndarray, shape=(n_events, 2)\n array of event start and end time\n labels : list of str\n list of labels\n\n "
] |
Please provide a description of the function:def load_time_series(filename, delimiter=r'\s+'):
r
# Use our universal function to load in the events
times, values = load_delimited(filename, [float, float], delimiter)
times = np.array(times)
values = np.array(values)
return times, values | [
"Import a time series from an annotation file. The file should consist of\n two columns of numeric values corresponding to the time and value of each\n sample of the time series.\n\n Parameters\n ----------\n filename : str\n Path to the annotation file\n delimiter : str\n Separator regular expression.\n By default, lines will be split by any amount of whitespace.\n\n Returns\n -------\n times : np.ndarray\n array of timestamps (float)\n values : np.ndarray\n array of corresponding numeric values (float)\n\n "
] |
Please provide a description of the function:def load_patterns(filename):
# List with all the patterns
pattern_list = []
# Current pattern, which will contain all occs
pattern = []
# Current occurrence, containing (onset, midi)
occurrence = []
with _open(filename, mode='r') as input_file:
for line in input_file.readlines():
if "pattern" in line:
if occurrence != []:
pattern.append(occurrence)
if pattern != []:
pattern_list.append(pattern)
occurrence = []
pattern = []
continue
if "occurrence" in line:
if occurrence != []:
pattern.append(occurrence)
occurrence = []
continue
string_values = line.split(",")
onset_midi = (float(string_values[0]), float(string_values[1]))
occurrence.append(onset_midi)
# Add last occurrence and pattern to pattern_list
if occurrence != []:
pattern.append(occurrence)
if pattern != []:
pattern_list.append(pattern)
return pattern_list | [
"Loads the patters contained in the filename and puts them into a list\n of patterns, each pattern being a list of occurrence, and each\n occurrence being a list of (onset, midi) pairs.\n\n The input file must be formatted as described in MIREX 2013:\n http://www.music-ir.org/mirex/wiki/2013:Discovery_of_Repeated_Themes_%26_Sections\n\n Parameters\n ----------\n filename : str\n The input file path containing the patterns of a given piece using the\n MIREX 2013 format.\n\n Returns\n -------\n pattern_list : list\n The list of patterns, containing all their occurrences,\n using the following format::\n\n onset_midi = (onset_time, midi_number)\n occurrence = [onset_midi1, ..., onset_midiO]\n pattern = [occurrence1, ..., occurrenceM]\n pattern_list = [pattern1, ..., patternN]\n\n where ``N`` is the number of patterns, ``M[i]`` is the number of\n occurrences of the ``i`` th pattern, and ``O[j]`` is the number of\n onsets in the ``j``'th occurrence. E.g.::\n\n occ1 = [(0.5, 67.0), (1.0, 67.0), (1.5, 67.0), (2.0, 64.0)]\n occ2 = [(4.5, 65.0), (5.0, 65.0), (5.5, 65.0), (6.0, 62.0)]\n pattern1 = [occ1, occ2]\n\n occ1 = [(10.5, 67.0), (11.0, 67.0), (11.5, 67.0), (12.0, 64.0),\n (12.5, 69.0), (13.0, 69.0), (13.5, 69.0), (14.0, 67.0),\n (14.5, 76.0), (15.0, 76.0), (15.5, 76.0), (16.0, 72.0)]\n occ2 = [(18.5, 67.0), (19.0, 67.0), (19.5, 67.0), (20.0, 62.0),\n (20.5, 69.0), (21.0, 69.0), (21.5, 69.0), (22.0, 67.0),\n (22.5, 77.0), (23.0, 77.0), (23.5, 77.0), (24.0, 74.0)]\n pattern2 = [occ1, occ2]\n\n pattern_list = [pattern1, pattern2]\n\n "
] |
Please provide a description of the function:def load_wav(path, mono=True):
fs, audio_data = scipy.io.wavfile.read(path)
# Make float in range [-1, 1]
if audio_data.dtype == 'int8':
audio_data = audio_data/float(2**8)
elif audio_data.dtype == 'int16':
audio_data = audio_data/float(2**16)
elif audio_data.dtype == 'int32':
audio_data = audio_data/float(2**24)
else:
raise ValueError('Got unexpected .wav data type '
'{}'.format(audio_data.dtype))
# Optionally convert to mono
if mono and audio_data.ndim != 1:
audio_data = audio_data.mean(axis=1)
return audio_data, fs | [
"Loads a .wav file as a numpy array using ``scipy.io.wavfile``.\n\n Parameters\n ----------\n path : str\n Path to a .wav file\n mono : bool\n If the provided .wav has more than one channel, it will be\n converted to mono if ``mono=True``. (Default value = True)\n\n Returns\n -------\n audio_data : np.ndarray\n Array of audio samples, normalized to the range [-1., 1.]\n fs : int\n Sampling rate of the audio data\n\n "
] |
Please provide a description of the function:def load_key(filename, delimiter=r'\s+'):
r
# Use our universal function to load the key and mode strings
scale, mode = load_delimited(filename, [str, str], delimiter)
if len(scale) != 1:
raise ValueError('Key file should contain only one line.')
scale, mode = scale[0], mode[0]
# Join with a space
key_string = '{} {}'.format(scale, mode)
# Validate them, but throw a warning in place of an error
try:
key.validate_key(key_string)
except ValueError as error:
warnings.warn(error.args[0])
return key_string | [
"Load key labels from an annotation file. The file should\n consist of two string columns: One denoting the key scale degree\n (semitone), and the other denoting the mode (major or minor). The file\n should contain only one row.\n\n Parameters\n ----------\n filename : str\n Path to the annotation file\n delimiter : str\n Separator regular expression.\n By default, lines will be split by any amount of whitespace.\n\n Returns\n -------\n key : str\n Key label, in the form ``'(key) (mode)'``\n\n "
] |
Please provide a description of the function:def load_tempo(filename, delimiter=r'\s+'):
r
# Use our universal function to load the key and mode strings
t1, t2, weight = load_delimited(filename, [float, float, float], delimiter)
weight = weight[0]
tempi = np.concatenate([t1, t2])
if len(t1) != 1:
raise ValueError('Tempo file should contain only one line.')
# Validate them, but throw a warning in place of an error
try:
tempo.validate_tempi(tempi)
except ValueError as error:
warnings.warn(error.args[0])
if not 0 <= weight <= 1:
raise ValueError('Invalid weight: {}'.format(weight))
return tempi, weight | [
"Load tempo estimates from an annotation file in MIREX format.\n The file should consist of three numeric columns: the first two\n correspond to tempo estimates (in beats-per-minute), and the third\n denotes the relative confidence of the first value compared to the\n second (in the range [0, 1]). The file should contain only one row.\n\n Parameters\n ----------\n filename : str\n Path to the annotation file\n delimiter : str\n Separator regular expression.\n By default, lines will be split by any amount of whitespace.\n\n Returns\n -------\n tempi : np.ndarray, non-negative\n The two tempo estimates\n\n weight : float [0, 1]\n The relative importance of ``tempi[0]`` compared to ``tempi[1]``\n "
] |
Please provide a description of the function:def load_ragged_time_series(filename, dtype=float, delimiter=r'\s+',
header=False):
r
# Initialize empty lists
times = []
values = []
# Create re object for splitting lines
splitter = re.compile(delimiter)
if header:
start_row = 1
else:
start_row = 0
with _open(filename, mode='r') as input_file:
for row, line in enumerate(input_file, start_row):
# Split each line using the supplied delimiter
data = splitter.split(line.strip())
try:
converted_time = float(data[0])
except (TypeError, ValueError) as exe:
six.raise_from(ValueError("Couldn't convert value {} using {} "
"found at {}:{:d}:\n\t{}".format(
data[0], float.__name__,
filename, row, line)), exe)
times.append(converted_time)
# cast values to a numpy array. time stamps with no values are cast
# to an empty array.
try:
converted_value = np.array(data[1:], dtype=dtype)
except (TypeError, ValueError) as exe:
six.raise_from(ValueError("Couldn't convert value {} using {} "
"found at {}:{:d}:\n\t{}".format(
data[1:], dtype.__name__,
filename, row, line)), exe)
values.append(converted_value)
return np.array(times), values | [
"Utility function for loading in data from a delimited time series\n annotation file with a variable number of columns.\n Assumes that column 0 contains time stamps and columns 1 through n contain\n values. n may be variable from time stamp to time stamp.\n\n Examples\n --------\n >>> # Load a ragged list of tab-delimited multi-f0 midi notes\n >>> times, vals = load_ragged_time_series('multif0.txt', dtype=int,\n delimiter='\\t')\n >>> # Load a raggled list of space delimited multi-f0 values with a header\n >>> times, vals = load_ragged_time_series('labeled_events.csv',\n header=True)\n\n Parameters\n ----------\n filename : str\n Path to the annotation file\n dtype : function\n Data type to apply to values columns.\n delimiter : str\n Separator regular expression.\n By default, lines will be split by any amount of whitespace.\n header : bool\n Indicates whether a header row is present or not.\n By default, assumes no header is present.\n\n Returns\n -------\n times : np.ndarray\n array of timestamps (float)\n values : list of np.ndarray\n list of arrays of corresponding values\n\n "
] |
Please provide a description of the function:def _pitch_classes():
r'''Map from pitch class (str) to semitone (int).'''
pitch_classes = ['C', 'D', 'E', 'F', 'G', 'A', 'B']
semitones = [0, 2, 4, 5, 7, 9, 11]
return dict([(c, s) for c, s in zip(pitch_classes, semitones)]) | [] |
Please provide a description of the function:def _scale_degrees():
r'''Mapping from scale degrees (str) to semitones (int).'''
degrees = ['1', '2', '3', '4', '5', '6', '7',
'8', '9', '10', '11', '12', '13']
semitones = [0, 2, 4, 5, 7, 9, 11, 12, 14, 16, 17, 19, 21]
return dict([(d, s) for d, s in zip(degrees, semitones)]) | [] |
Please provide a description of the function:def pitch_class_to_semitone(pitch_class):
r'''Convert a pitch class to semitone.
Parameters
----------
pitch_class : str
Spelling of a given pitch class, e.g. 'C#', 'Gbb'
Returns
-------
semitone : int
Semitone value of the pitch class.
'''
semitone = 0
for idx, char in enumerate(pitch_class):
if char == '#' and idx > 0:
semitone += 1
elif char == 'b' and idx > 0:
semitone -= 1
elif idx == 0:
semitone = PITCH_CLASSES.get(char)
else:
raise InvalidChordException(
"Pitch class improperly formed: %s" % pitch_class)
return semitone % 12 | [] |
Please provide a description of the function:def scale_degree_to_semitone(scale_degree):
r
semitone = 0
offset = 0
if scale_degree.startswith("#"):
offset = scale_degree.count("#")
scale_degree = scale_degree.strip("#")
elif scale_degree.startswith('b'):
offset = -1 * scale_degree.count("b")
scale_degree = scale_degree.strip("b")
semitone = SCALE_DEGREES.get(scale_degree, None)
if semitone is None:
raise InvalidChordException(
"Scale degree improperly formed: {}, expected one of {}."
.format(scale_degree, list(SCALE_DEGREES.keys())))
return semitone + offset | [
"Convert a scale degree to semitone.\n\n Parameters\n ----------\n scale degree : str\n Spelling of a relative scale degree, e.g. 'b3', '7', '#5'\n\n Returns\n -------\n semitone : int\n Relative semitone of the scale degree, wrapped to a single octave\n\n Raises\n ------\n InvalidChordException if `scale_degree` is invalid.\n "
] |
Please provide a description of the function:def scale_degree_to_bitmap(scale_degree, modulo=False, length=BITMAP_LENGTH):
sign = 1
if scale_degree.startswith("*"):
sign = -1
scale_degree = scale_degree.strip("*")
edit_map = [0] * length
sd_idx = scale_degree_to_semitone(scale_degree)
if sd_idx < length or modulo:
edit_map[sd_idx % length] = sign
return np.array(edit_map) | [
"Create a bitmap representation of a scale degree.\n\n Note that values in the bitmap may be negative, indicating that the\n semitone is to be removed.\n\n Parameters\n ----------\n scale_degree : str\n Spelling of a relative scale degree, e.g. 'b3', '7', '#5'\n modulo : bool, default=True\n If a scale degree exceeds the length of the bit-vector, modulo the\n scale degree back into the bit-vector; otherwise it is discarded.\n length : int, default=12\n Length of the bit-vector to produce\n\n Returns\n -------\n bitmap : np.ndarray, in [-1, 0, 1], len=`length`\n Bitmap representation of this scale degree.\n "
] |
Please provide a description of the function:def quality_to_bitmap(quality):
if quality not in QUALITIES:
raise InvalidChordException(
"Unsupported chord quality shorthand: '%s' "
"Did you mean to reduce extended chords?" % quality)
return np.array(QUALITIES[quality]) | [
"Return the bitmap for a given quality.\n\n Parameters\n ----------\n quality : str\n Chord quality name.\n\n Returns\n -------\n bitmap : np.ndarray\n Bitmap representation of this quality (12-dim).\n\n "
] |
Please provide a description of the function:def validate_chord_label(chord_label):
# This monster regexp is pulled from the JAMS chord namespace,
# which is in turn derived from the context-free grammar of
# Harte et al., 2005.
pattern = re.compile(r'''^((N|X)|(([A-G](b*|#*))((:(maj|min|dim|aug|1|5|sus2|sus4|maj6|min6|7|maj7|min7|dim7|hdim7|minmaj7|aug7|9|maj9|min9|11|maj11|min11|13|maj13|min13)(\((\*?((b*|#*)([1-9]|1[0-3]?))(,\*?((b*|#*)([1-9]|1[0-3]?)))*)\))?)|(:\((\*?((b*|#*)([1-9]|1[0-3]?))(,\*?((b*|#*)([1-9]|1[0-3]?)))*)\)))?((/((b*|#*)([1-9]|1[0-3]?)))?)?))$''') # nopep8
if not pattern.match(chord_label):
raise InvalidChordException('Invalid chord label: '
'{}'.format(chord_label))
pass | [
"Test for well-formedness of a chord label.\n\n Parameters\n ----------\n chord : str\n Chord label to validate.\n\n "
] |
Please provide a description of the function:def split(chord_label, reduce_extended_chords=False):
chord_label = str(chord_label)
validate_chord_label(chord_label)
if chord_label == NO_CHORD:
return [chord_label, '', set(), '']
bass = '1'
if "/" in chord_label:
chord_label, bass = chord_label.split("/")
scale_degrees = set()
omission = False
if "(" in chord_label:
chord_label, scale_degrees = chord_label.split("(")
omission = "*" in scale_degrees
scale_degrees = scale_degrees.strip(")")
scale_degrees = set([i.strip() for i in scale_degrees.split(",")])
# Note: Chords lacking quality AND added interval information are major.
# If a quality shorthand is specified, it is returned.
# If an interval is specified WITHOUT a quality, the quality field is
# empty.
# Intervals specifying omissions MUST have a quality.
if omission and ":" not in chord_label:
raise InvalidChordException(
"Intervals specifying omissions MUST have a quality.")
quality = '' if scale_degrees else 'maj'
if ":" in chord_label:
chord_root, quality_name = chord_label.split(":")
# Extended chords (with ":"s) may not explicitly have Major qualities,
# so only overwrite the default if the string is not empty.
if quality_name:
quality = quality_name.lower()
else:
chord_root = chord_label
if reduce_extended_chords:
quality, addl_scale_degrees = reduce_extended_quality(quality)
scale_degrees.update(addl_scale_degrees)
return [chord_root, quality, scale_degrees, bass] | [
"Parse a chord label into its four constituent parts:\n - root\n - quality shorthand\n - scale degrees\n - bass\n\n Note: Chords lacking quality AND interval information are major.\n - If a quality is specified, it is returned.\n - If an interval is specified WITHOUT a quality, the quality field is\n empty.\n\n Some examples::\n\n 'C' -> ['C', 'maj', {}, '1']\n 'G#:min(*b3,*5)/5' -> ['G#', 'min', {'*b3', '*5'}, '5']\n 'A:(3)/6' -> ['A', '', {'3'}, '6']\n\n Parameters\n ----------\n chord_label : str\n A chord label.\n reduce_extended_chords : bool\n Whether to map the upper voicings of extended chords (9's, 11's, 13's)\n to semitone extensions. (Default value = False)\n\n Returns\n -------\n chord_parts : list\n Split version of the chord label.\n\n "
] |
Please provide a description of the function:def join(chord_root, quality='', extensions=None, bass=''):
r
chord_label = chord_root
if quality or extensions:
chord_label += ":%s" % quality
if extensions:
chord_label += "(%s)" % ",".join(extensions)
if bass and bass != '1':
chord_label += "/%s" % bass
validate_chord_label(chord_label)
return chord_label | [
"Join the parts of a chord into a complete chord label.\n\n Parameters\n ----------\n chord_root : str\n Root pitch class of the chord, e.g. 'C', 'Eb'\n quality : str\n Quality of the chord, e.g. 'maj', 'hdim7'\n (Default value = '')\n extensions : list\n Any added or absent scaled degrees for this chord, e.g. ['4', '\\*3']\n (Default value = None)\n bass : str\n Scale degree of the bass note, e.g. '5'.\n (Default value = '')\n\n Returns\n -------\n chord_label : str\n A complete chord label.\n\n "
] |
Please provide a description of the function:def encode(chord_label, reduce_extended_chords=False,
strict_bass_intervals=False):
if chord_label == NO_CHORD:
return NO_CHORD_ENCODED
if chord_label == X_CHORD:
return X_CHORD_ENCODED
chord_root, quality, scale_degrees, bass = split(
chord_label, reduce_extended_chords=reduce_extended_chords)
root_number = pitch_class_to_semitone(chord_root)
bass_number = scale_degree_to_semitone(bass) % 12
semitone_bitmap = quality_to_bitmap(quality)
semitone_bitmap[0] = 1
for scale_degree in scale_degrees:
semitone_bitmap += scale_degree_to_bitmap(scale_degree,
reduce_extended_chords)
semitone_bitmap = (semitone_bitmap > 0).astype(np.int)
if not semitone_bitmap[bass_number] and strict_bass_intervals:
raise InvalidChordException(
"Given bass scale degree is absent from this chord: "
"%s" % chord_label, chord_label)
else:
semitone_bitmap[bass_number] = 1
return root_number, semitone_bitmap, bass_number | [
"Translate a chord label to numerical representations for evaluation.\n\n Parameters\n ----------\n chord_label : str\n Chord label to encode.\n reduce_extended_chords : bool\n Whether to map the upper voicings of extended chords (9's, 11's, 13's)\n to semitone extensions.\n (Default value = False)\n strict_bass_intervals : bool\n Whether to require that the bass scale degree is present in the chord.\n (Default value = False)\n\n Returns\n -------\n root_number : int\n Absolute semitone of the chord's root.\n semitone_bitmap : np.ndarray, dtype=int\n 12-dim vector of relative semitones in the chord spelling.\n bass_number : int\n Relative semitone of the chord's bass note, e.g. 0=root, 7=fifth, etc.\n\n "
] |
Please provide a description of the function:def encode_many(chord_labels, reduce_extended_chords=False):
num_items = len(chord_labels)
roots, basses = np.zeros([2, num_items], dtype=np.int)
semitones = np.zeros([num_items, 12], dtype=np.int)
local_cache = dict()
for i, label in enumerate(chord_labels):
result = local_cache.get(label, None)
if result is None:
result = encode(label, reduce_extended_chords)
local_cache[label] = result
roots[i], semitones[i], basses[i] = result
return roots, semitones, basses | [
"Translate a set of chord labels to numerical representations for sane\n evaluation.\n\n Parameters\n ----------\n chord_labels : list\n Set of chord labels to encode.\n reduce_extended_chords : bool\n Whether to map the upper voicings of extended chords (9's, 11's, 13's)\n to semitone extensions.\n (Default value = False)\n\n Returns\n -------\n root_number : np.ndarray, dtype=int\n Absolute semitone of the chord's root.\n interval_bitmap : np.ndarray, dtype=int\n 12-dim vector of relative semitones in the given chord quality.\n bass_number : np.ndarray, dtype=int\n Relative semitones of the chord's bass notes.\n\n "
] |
Please provide a description of the function:def rotate_bitmap_to_root(bitmap, chord_root):
bitmap = np.asarray(bitmap)
assert bitmap.ndim == 1, "Currently only 1D bitmaps are supported."
idxs = list(np.nonzero(bitmap))
idxs[-1] = (idxs[-1] + chord_root) % 12
abs_bitmap = np.zeros_like(bitmap)
abs_bitmap[tuple(idxs)] = 1
return abs_bitmap | [
"Circularly shift a relative bitmap to its asbolute pitch classes.\n\n For clarity, the best explanation is an example. Given 'G:Maj', the root\n and quality map are as follows::\n\n root=5\n quality=[1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0] # Relative chord shape\n\n After rotating to the root, the resulting bitmap becomes::\n\n abs_quality = [0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1] # G, B, and D\n\n Parameters\n ----------\n bitmap : np.ndarray, shape=(12,)\n Bitmap of active notes, relative to the given root.\n chord_root : int\n Absolute pitch class number.\n\n Returns\n -------\n bitmap : np.ndarray, shape=(12,)\n Absolute bitmap of active pitch classes.\n\n "
] |
Please provide a description of the function:def rotate_bitmaps_to_roots(bitmaps, roots):
abs_bitmaps = []
for bitmap, chord_root in zip(bitmaps, roots):
abs_bitmaps.append(rotate_bitmap_to_root(bitmap, chord_root))
return np.asarray(abs_bitmaps) | [
"Circularly shift a relative bitmaps to asbolute pitch classes.\n\n See :func:`rotate_bitmap_to_root` for more information.\n\n Parameters\n ----------\n bitmap : np.ndarray, shape=(N, 12)\n Bitmap of active notes, relative to the given root.\n root : np.ndarray, shape=(N,)\n Absolute pitch class number.\n\n Returns\n -------\n bitmap : np.ndarray, shape=(N, 12)\n Absolute bitmaps of active pitch classes.\n\n "
] |
Please provide a description of the function:def validate(reference_labels, estimated_labels):
N = len(reference_labels)
M = len(estimated_labels)
if N != M:
raise ValueError(
"Chord comparison received different length lists: "
"len(reference)=%d\tlen(estimates)=%d" % (N, M))
for labels in [reference_labels, estimated_labels]:
for chord_label in labels:
validate_chord_label(chord_label)
# When either label list is empty, warn the user
if len(reference_labels) == 0:
warnings.warn('Reference labels are empty')
if len(estimated_labels) == 0:
warnings.warn('Estimated labels are empty') | [
"Checks that the input annotations to a comparison function look like\n valid chord labels.\n\n Parameters\n ----------\n reference_labels : list, len=n\n Reference chord labels to score against.\n estimated_labels : list, len=n\n Estimated chord labels to score against.\n\n "
] |
Please provide a description of the function:def weighted_accuracy(comparisons, weights):
N = len(comparisons)
# There should be as many weights as comparisons
if weights.shape[0] != N:
raise ValueError('weights and comparisons should be of the same'
' length. len(weights) = {} but len(comparisons)'
' = {}'.format(weights.shape[0], N))
if (weights < 0).any():
raise ValueError('Weights should all be positive.')
if np.sum(weights) == 0:
warnings.warn('No nonzero weights, returning 0')
return 0
# Find all comparison scores which are valid
valid_idx = (comparisons >= 0)
# If no comparable chords were provided, warn and return 0
if valid_idx.sum() == 0:
warnings.warn("No reference chords were comparable "
"to estimated chords, returning 0.")
return 0
# Remove any uncomparable labels
comparisons = comparisons[valid_idx]
weights = weights[valid_idx]
# Normalize the weights
total_weight = float(np.sum(weights))
normalized_weights = np.asarray(weights, dtype=float)/total_weight
# Score is the sum of all weighted comparisons
return np.sum(comparisons*normalized_weights) | [
"Compute the weighted accuracy of a list of chord comparisons.\n\n Examples\n --------\n >>> (ref_intervals,\n ... ref_labels) = mir_eval.io.load_labeled_intervals('ref.lab')\n >>> (est_intervals,\n ... est_labels) = mir_eval.io.load_labeled_intervals('est.lab')\n >>> est_intervals, est_labels = mir_eval.util.adjust_intervals(\n ... est_intervals, est_labels, ref_intervals.min(),\n ... ref_intervals.max(), mir_eval.chord.NO_CHORD,\n ... mir_eval.chord.NO_CHORD)\n >>> (intervals,\n ... ref_labels,\n ... est_labels) = mir_eval.util.merge_labeled_intervals(\n ... ref_intervals, ref_labels, est_intervals, est_labels)\n >>> durations = mir_eval.util.intervals_to_durations(intervals)\n >>> # Here, we're using the \"thirds\" function to compare labels\n >>> # but any of the comparison functions would work.\n >>> comparisons = mir_eval.chord.thirds(ref_labels, est_labels)\n >>> score = mir_eval.chord.weighted_accuracy(comparisons, durations)\n\n Parameters\n ----------\n comparisons : np.ndarray\n List of chord comparison scores, in [0, 1] or -1\n weights : np.ndarray\n Weights (not necessarily normalized) for each comparison.\n This can be a list of interval durations\n\n Returns\n -------\n score : float\n Weighted accuracy\n\n "
] |
Please provide a description of the function:def thirds(reference_labels, estimated_labels):
validate(reference_labels, estimated_labels)
ref_roots, ref_semitones = encode_many(reference_labels, False)[:2]
est_roots, est_semitones = encode_many(estimated_labels, False)[:2]
eq_roots = ref_roots == est_roots
eq_thirds = ref_semitones[:, 3] == est_semitones[:, 3]
comparison_scores = (eq_roots * eq_thirds).astype(np.float)
# Ignore 'X' chords
comparison_scores[np.any(ref_semitones < 0, axis=1)] = -1.0
return comparison_scores | [
"Compare chords along root & third relationships.\n\n Examples\n --------\n >>> (ref_intervals,\n ... ref_labels) = mir_eval.io.load_labeled_intervals('ref.lab')\n >>> (est_intervals,\n ... est_labels) = mir_eval.io.load_labeled_intervals('est.lab')\n >>> est_intervals, est_labels = mir_eval.util.adjust_intervals(\n ... est_intervals, est_labels, ref_intervals.min(),\n ... ref_intervals.max(), mir_eval.chord.NO_CHORD,\n ... mir_eval.chord.NO_CHORD)\n >>> (intervals,\n ... ref_labels,\n ... est_labels) = mir_eval.util.merge_labeled_intervals(\n ... ref_intervals, ref_labels, est_intervals, est_labels)\n >>> durations = mir_eval.util.intervals_to_durations(intervals)\n >>> comparisons = mir_eval.chord.thirds(ref_labels, est_labels)\n >>> score = mir_eval.chord.weighted_accuracy(comparisons, durations)\n\n Parameters\n ----------\n reference_labels : list, len=n\n Reference chord labels to score against.\n estimated_labels : list, len=n\n Estimated chord labels to score against.\n\n Returns\n -------\n comparison_scores : np.ndarray, shape=(n,), dtype=float\n Comparison scores, in [0.0, 1.0]\n\n "
] |
Please provide a description of the function:def thirds_inv(reference_labels, estimated_labels):
validate(reference_labels, estimated_labels)
ref_roots, ref_semitones, ref_bass = encode_many(reference_labels, False)
est_roots, est_semitones, est_bass = encode_many(estimated_labels, False)
eq_root = ref_roots == est_roots
eq_bass = ref_bass == est_bass
eq_third = ref_semitones[:, 3] == est_semitones[:, 3]
comparison_scores = (eq_root * eq_third * eq_bass).astype(np.float)
# Ignore 'X' chords
comparison_scores[np.any(ref_semitones < 0, axis=1)] = -1.0
return comparison_scores | [
"Score chords along root, third, & bass relationships.\n\n Examples\n --------\n >>> (ref_intervals,\n ... ref_labels) = mir_eval.io.load_labeled_intervals('ref.lab')\n >>> (est_intervals,\n ... est_labels) = mir_eval.io.load_labeled_intervals('est.lab')\n >>> est_intervals, est_labels = mir_eval.util.adjust_intervals(\n ... est_intervals, est_labels, ref_intervals.min(),\n ... ref_intervals.max(), mir_eval.chord.NO_CHORD,\n ... mir_eval.chord.NO_CHORD)\n >>> (intervals,\n ... ref_labels,\n ... est_labels) = mir_eval.util.merge_labeled_intervals(\n ... ref_intervals, ref_labels, est_intervals, est_labels)\n >>> durations = mir_eval.util.intervals_to_durations(intervals)\n >>> comparisons = mir_eval.chord.thirds_inv(ref_labels, est_labels)\n >>> score = mir_eval.chord.weighted_accuracy(comparisons, durations)\n\n Parameters\n ----------\n reference_labels : list, len=n\n Reference chord labels to score against.\n estimated_labels : list, len=n\n Estimated chord labels to score against.\n\n Returns\n -------\n scores : np.ndarray, shape=(n,), dtype=float\n Comparison scores, in [0.0, 1.0]\n\n "
] |
Please provide a description of the function:def triads(reference_labels, estimated_labels):
validate(reference_labels, estimated_labels)
ref_roots, ref_semitones = encode_many(reference_labels, False)[:2]
est_roots, est_semitones = encode_many(estimated_labels, False)[:2]
eq_roots = ref_roots == est_roots
eq_semitones = np.all(
np.equal(ref_semitones[:, :8], est_semitones[:, :8]), axis=1)
comparison_scores = (eq_roots * eq_semitones).astype(np.float)
# Ignore 'X' chords
comparison_scores[np.any(ref_semitones < 0, axis=1)] = -1.0
return comparison_scores | [
"Compare chords along triad (root & quality to #5) relationships.\n\n Examples\n --------\n >>> (ref_intervals,\n ... ref_labels) = mir_eval.io.load_labeled_intervals('ref.lab')\n >>> (est_intervals,\n ... est_labels) = mir_eval.io.load_labeled_intervals('est.lab')\n >>> est_intervals, est_labels = mir_eval.util.adjust_intervals(\n ... est_intervals, est_labels, ref_intervals.min(),\n ... ref_intervals.max(), mir_eval.chord.NO_CHORD,\n ... mir_eval.chord.NO_CHORD)\n >>> (intervals,\n ... ref_labels,\n ... est_labels) = mir_eval.util.merge_labeled_intervals(\n ... ref_intervals, ref_labels, est_intervals, est_labels)\n >>> durations = mir_eval.util.intervals_to_durations(intervals)\n >>> comparisons = mir_eval.chord.triads(ref_labels, est_labels)\n >>> score = mir_eval.chord.weighted_accuracy(comparisons, durations)\n\n Parameters\n ----------\n reference_labels : list, len=n\n Reference chord labels to score against.\n estimated_labels : list, len=n\n Estimated chord labels to score against.\n\n Returns\n -------\n comparison_scores : np.ndarray, shape=(n,), dtype=float\n Comparison scores, in [0.0, 1.0]\n\n "
] |
Please provide a description of the function:def triads_inv(reference_labels, estimated_labels):
validate(reference_labels, estimated_labels)
ref_roots, ref_semitones, ref_bass = encode_many(reference_labels, False)
est_roots, est_semitones, est_bass = encode_many(estimated_labels, False)
eq_roots = ref_roots == est_roots
eq_basses = ref_bass == est_bass
eq_semitones = np.all(
np.equal(ref_semitones[:, :8], est_semitones[:, :8]), axis=1)
comparison_scores = (eq_roots * eq_semitones * eq_basses).astype(np.float)
# Ignore 'X' chords
comparison_scores[np.any(ref_semitones < 0, axis=1)] = -1.0
return comparison_scores | [
"Score chords along triad (root, quality to #5, & bass) relationships.\n\n Examples\n --------\n >>> (ref_intervals,\n ... ref_labels) = mir_eval.io.load_labeled_intervals('ref.lab')\n >>> (est_intervals,\n ... est_labels) = mir_eval.io.load_labeled_intervals('est.lab')\n >>> est_intervals, est_labels = mir_eval.util.adjust_intervals(\n ... est_intervals, est_labels, ref_intervals.min(),\n ... ref_intervals.max(), mir_eval.chord.NO_CHORD,\n ... mir_eval.chord.NO_CHORD)\n >>> (intervals,\n ... ref_labels,\n ... est_labels) = mir_eval.util.merge_labeled_intervals(\n ... ref_intervals, ref_labels, est_intervals, est_labels)\n >>> durations = mir_eval.util.intervals_to_durations(intervals)\n >>> comparisons = mir_eval.chord.triads_inv(ref_labels, est_labels)\n >>> score = mir_eval.chord.weighted_accuracy(comparisons, durations)\n\n Parameters\n ----------\n reference_labels : list, len=n\n Reference chord labels to score against.\n estimated_labels : list, len=n\n Estimated chord labels to score against.\n\n Returns\n -------\n scores : np.ndarray, shape=(n,), dtype=float\n Comparison scores, in [0.0, 1.0]\n\n "
] |
Please provide a description of the function:def root(reference_labels, estimated_labels):
validate(reference_labels, estimated_labels)
ref_roots, ref_semitones = encode_many(reference_labels, False)[:2]
est_roots = encode_many(estimated_labels, False)[0]
comparison_scores = (ref_roots == est_roots).astype(np.float)
# Ignore 'X' chords
comparison_scores[np.any(ref_semitones < 0, axis=1)] = -1.0
return comparison_scores | [
"Compare chords according to roots.\n\n Examples\n --------\n >>> (ref_intervals,\n ... ref_labels) = mir_eval.io.load_labeled_intervals('ref.lab')\n >>> (est_intervals,\n ... est_labels) = mir_eval.io.load_labeled_intervals('est.lab')\n >>> est_intervals, est_labels = mir_eval.util.adjust_intervals(\n ... est_intervals, est_labels, ref_intervals.min(),\n ... ref_intervals.max(), mir_eval.chord.NO_CHORD,\n ... mir_eval.chord.NO_CHORD)\n >>> (intervals,\n ... ref_labels,\n ... est_labels) = mir_eval.util.merge_labeled_intervals(\n ... ref_intervals, ref_labels, est_intervals, est_labels)\n >>> durations = mir_eval.util.intervals_to_durations(intervals)\n >>> comparisons = mir_eval.chord.root(ref_labels, est_labels)\n >>> score = mir_eval.chord.weighted_accuracy(comparisons, durations)\n\n Parameters\n ----------\n reference_labels : list, len=n\n Reference chord labels to score against.\n estimated_labels : list, len=n\n Estimated chord labels to score against.\n\n Returns\n -------\n comparison_scores : np.ndarray, shape=(n,), dtype=float\n Comparison scores, in [0.0, 1.0], or -1 if the comparison is out of\n gamut.\n\n "
] |
Please provide a description of the function:def mirex(reference_labels, estimated_labels):
validate(reference_labels, estimated_labels)
# TODO(?): Should this be an argument?
min_intersection = 3
ref_data = encode_many(reference_labels, False)
ref_chroma = rotate_bitmaps_to_roots(ref_data[1], ref_data[0])
est_data = encode_many(estimated_labels, False)
est_chroma = rotate_bitmaps_to_roots(est_data[1], est_data[0])
eq_chroma = (ref_chroma * est_chroma).sum(axis=-1)
# Chroma matching for set bits
comparison_scores = (eq_chroma >= min_intersection).astype(np.float)
# No-chord matching; match -1 roots, SKIP_CHORDS dropped next
no_root = np.logical_and(ref_data[0] == -1, est_data[0] == -1)
comparison_scores[no_root] = 1.0
# Skip chords where the number of active semitones `n` is
# 0 < n < `min_intersection`.
ref_semitone_count = (ref_data[1] > 0).sum(axis=1)
skip_idx = np.logical_and(ref_semitone_count > 0,
ref_semitone_count < min_intersection)
# Also ignore 'X' chords.
np.logical_or(skip_idx, np.any(ref_data[1] < 0, axis=1), skip_idx)
comparison_scores[skip_idx] = -1.0
return comparison_scores | [
"Compare chords along MIREX rules.\n\n Examples\n --------\n >>> (ref_intervals,\n ... ref_labels) = mir_eval.io.load_labeled_intervals('ref.lab')\n >>> (est_intervals,\n ... est_labels) = mir_eval.io.load_labeled_intervals('est.lab')\n >>> est_intervals, est_labels = mir_eval.util.adjust_intervals(\n ... est_intervals, est_labels, ref_intervals.min(),\n ... ref_intervals.max(), mir_eval.chord.NO_CHORD,\n ... mir_eval.chord.NO_CHORD)\n >>> (intervals,\n ... ref_labels,\n ... est_labels) = mir_eval.util.merge_labeled_intervals(\n ... ref_intervals, ref_labels, est_intervals, est_labels)\n >>> durations = mir_eval.util.intervals_to_durations(intervals)\n >>> comparisons = mir_eval.chord.mirex(ref_labels, est_labels)\n >>> score = mir_eval.chord.weighted_accuracy(comparisons, durations)\n\n Parameters\n ----------\n reference_labels : list, len=n\n Reference chord labels to score against.\n estimated_labels : list, len=n\n Estimated chord labels to score against.\n\n Returns\n -------\n comparison_scores : np.ndarray, shape=(n,), dtype=float\n Comparison scores, in [0.0, 1.0]\n\n "
] |
Please provide a description of the function:def majmin(reference_labels, estimated_labels):
validate(reference_labels, estimated_labels)
maj_semitones = np.array(QUALITIES['maj'][:8])
min_semitones = np.array(QUALITIES['min'][:8])
ref_roots, ref_semitones, _ = encode_many(reference_labels, False)
est_roots, est_semitones, _ = encode_many(estimated_labels, False)
eq_root = ref_roots == est_roots
eq_quality = np.all(np.equal(ref_semitones[:, :8],
est_semitones[:, :8]), axis=1)
comparison_scores = (eq_root * eq_quality).astype(np.float)
# Test for Major / Minor / No-chord
is_maj = np.all(np.equal(ref_semitones[:, :8], maj_semitones), axis=1)
is_min = np.all(np.equal(ref_semitones[:, :8], min_semitones), axis=1)
is_none = np.logical_and(ref_roots < 0, np.all(ref_semitones == 0, axis=1))
# Only keep majors, minors, and Nones (NOR)
comparison_scores[(is_maj + is_min + is_none) == 0] = -1
# Disable chords that disrupt this quality (apparently)
# ref_voicing = np.all(np.equal(ref_qualities[:, :8],
# ref_notes[:, :8]), axis=1)
# comparison_scores[ref_voicing == 0] = -1
# est_voicing = np.all(np.equal(est_qualities[:, :8],
# est_notes[:, :8]), axis=1)
# comparison_scores[est_voicing == 0] = -1
return comparison_scores | [
"Compare chords along major-minor rules. Chords with qualities outside\n Major/minor/no-chord are ignored.\n\n Examples\n --------\n >>> (ref_intervals,\n ... ref_labels) = mir_eval.io.load_labeled_intervals('ref.lab')\n >>> (est_intervals,\n ... est_labels) = mir_eval.io.load_labeled_intervals('est.lab')\n >>> est_intervals, est_labels = mir_eval.util.adjust_intervals(\n ... est_intervals, est_labels, ref_intervals.min(),\n ... ref_intervals.max(), mir_eval.chord.NO_CHORD,\n ... mir_eval.chord.NO_CHORD)\n >>> (intervals,\n ... ref_labels,\n ... est_labels) = mir_eval.util.merge_labeled_intervals(\n ... ref_intervals, ref_labels, est_intervals, est_labels)\n >>> durations = mir_eval.util.intervals_to_durations(intervals)\n >>> comparisons = mir_eval.chord.majmin(ref_labels, est_labels)\n >>> score = mir_eval.chord.weighted_accuracy(comparisons, durations)\n\n Parameters\n ----------\n reference_labels : list, len=n\n Reference chord labels to score against.\n estimated_labels : list, len=n\n Estimated chord labels to score against.\n\n Returns\n -------\n comparison_scores : np.ndarray, shape=(n,), dtype=float\n Comparison scores, in [0.0, 1.0], or -1 if the comparison is out of\n gamut.\n\n "
] |
Please provide a description of the function:def majmin_inv(reference_labels, estimated_labels):
validate(reference_labels, estimated_labels)
maj_semitones = np.array(QUALITIES['maj'][:8])
min_semitones = np.array(QUALITIES['min'][:8])
ref_roots, ref_semitones, ref_bass = encode_many(reference_labels, False)
est_roots, est_semitones, est_bass = encode_many(estimated_labels, False)
eq_root_bass = (ref_roots == est_roots) * (ref_bass == est_bass)
eq_semitones = np.all(np.equal(ref_semitones[:, :8],
est_semitones[:, :8]), axis=1)
comparison_scores = (eq_root_bass * eq_semitones).astype(np.float)
# Test for Major / Minor / No-chord
is_maj = np.all(np.equal(ref_semitones[:, :8], maj_semitones), axis=1)
is_min = np.all(np.equal(ref_semitones[:, :8], min_semitones), axis=1)
is_none = np.logical_and(ref_roots < 0, np.all(ref_semitones == 0, axis=1))
# Only keep majors, minors, and Nones (NOR)
comparison_scores[(is_maj + is_min + is_none) == 0] = -1
# Disable inversions that are not part of the quality
valid_inversion = np.ones(ref_bass.shape, dtype=bool)
bass_idx = ref_bass >= 0
valid_inversion[bass_idx] = ref_semitones[bass_idx, ref_bass[bass_idx]]
comparison_scores[valid_inversion == 0] = -1
return comparison_scores | [
"Compare chords along major-minor rules, with inversions. Chords with\n qualities outside Major/minor/no-chord are ignored, and the bass note must\n exist in the triad (bass in [1, 3, 5]).\n\n Examples\n --------\n >>> (ref_intervals,\n ... ref_labels) = mir_eval.io.load_labeled_intervals('ref.lab')\n >>> (est_intervals,\n ... est_labels) = mir_eval.io.load_labeled_intervals('est.lab')\n >>> est_intervals, est_labels = mir_eval.util.adjust_intervals(\n ... est_intervals, est_labels, ref_intervals.min(),\n ... ref_intervals.max(), mir_eval.chord.NO_CHORD,\n ... mir_eval.chord.NO_CHORD)\n >>> (intervals,\n ... ref_labels,\n ... est_labels) = mir_eval.util.merge_labeled_intervals(\n ... ref_intervals, ref_labels, est_intervals, est_labels)\n >>> durations = mir_eval.util.intervals_to_durations(intervals)\n >>> comparisons = mir_eval.chord.majmin_inv(ref_labels, est_labels)\n >>> score = mir_eval.chord.weighted_accuracy(comparisons, durations)\n\n Parameters\n ----------\n reference_labels : list, len=n\n Reference chord labels to score against.\n estimated_labels : list, len=n\n Estimated chord labels to score against.\n\n Returns\n -------\n comparison_scores : np.ndarray, shape=(n,), dtype=float\n Comparison scores, in [0.0, 1.0], or -1 if the comparison is out of\n gamut.\n\n "
] |
Please provide a description of the function:def sevenths(reference_labels, estimated_labels):
validate(reference_labels, estimated_labels)
seventh_qualities = ['maj', 'min', 'maj7', '7', 'min7', '']
valid_semitones = np.array([QUALITIES[name] for name in seventh_qualities])
ref_roots, ref_semitones = encode_many(reference_labels, False)[:2]
est_roots, est_semitones = encode_many(estimated_labels, False)[:2]
eq_root = ref_roots == est_roots
eq_semitones = np.all(np.equal(ref_semitones, est_semitones), axis=1)
comparison_scores = (eq_root * eq_semitones).astype(np.float)
# Test for reference chord inclusion
is_valid = np.array([np.all(np.equal(ref_semitones, semitones), axis=1)
for semitones in valid_semitones])
# Drop if NOR
comparison_scores[np.sum(is_valid, axis=0) == 0] = -1
return comparison_scores | [
"Compare chords along MIREX 'sevenths' rules. Chords with qualities\n outside [maj, maj7, 7, min, min7, N] are ignored.\n\n Examples\n --------\n >>> (ref_intervals,\n ... ref_labels) = mir_eval.io.load_labeled_intervals('ref.lab')\n >>> (est_intervals,\n ... est_labels) = mir_eval.io.load_labeled_intervals('est.lab')\n >>> est_intervals, est_labels = mir_eval.util.adjust_intervals(\n ... est_intervals, est_labels, ref_intervals.min(),\n ... ref_intervals.max(), mir_eval.chord.NO_CHORD,\n ... mir_eval.chord.NO_CHORD)\n >>> (intervals,\n ... ref_labels,\n ... est_labels) = mir_eval.util.merge_labeled_intervals(\n ... ref_intervals, ref_labels, est_intervals, est_labels)\n >>> durations = mir_eval.util.intervals_to_durations(intervals)\n >>> comparisons = mir_eval.chord.sevenths(ref_labels, est_labels)\n >>> score = mir_eval.chord.weighted_accuracy(comparisons, durations)\n\n Parameters\n ----------\n reference_labels : list, len=n\n Reference chord labels to score against.\n estimated_labels : list, len=n\n Estimated chord labels to score against.\n\n Returns\n -------\n comparison_scores : np.ndarray, shape=(n,), dtype=float\n Comparison scores, in [0.0, 1.0], or -1 if the comparison is out of\n gamut.\n\n "
] |
Please provide a description of the function:def sevenths_inv(reference_labels, estimated_labels):
validate(reference_labels, estimated_labels)
seventh_qualities = ['maj', 'min', 'maj7', '7', 'min7', '']
valid_semitones = np.array([QUALITIES[name] for name in seventh_qualities])
ref_roots, ref_semitones, ref_basses = encode_many(reference_labels, False)
est_roots, est_semitones, est_basses = encode_many(estimated_labels, False)
eq_roots_basses = (ref_roots == est_roots) * (ref_basses == est_basses)
eq_semitones = np.all(np.equal(ref_semitones, est_semitones), axis=1)
comparison_scores = (eq_roots_basses * eq_semitones).astype(np.float)
# Test for Major / Minor / No-chord
is_valid = np.array([np.all(np.equal(ref_semitones, semitones), axis=1)
for semitones in valid_semitones])
comparison_scores[np.sum(is_valid, axis=0) == 0] = -1
# Disable inversions that are not part of the quality
valid_inversion = np.ones(ref_basses.shape, dtype=bool)
bass_idx = ref_basses >= 0
valid_inversion[bass_idx] = ref_semitones[bass_idx, ref_basses[bass_idx]]
comparison_scores[valid_inversion == 0] = -1
return comparison_scores | [
"Compare chords along MIREX 'sevenths' rules. Chords with qualities\n outside [maj, maj7, 7, min, min7, N] are ignored.\n\n Examples\n --------\n >>> (ref_intervals,\n ... ref_labels) = mir_eval.io.load_labeled_intervals('ref.lab')\n >>> (est_intervals,\n ... est_labels) = mir_eval.io.load_labeled_intervals('est.lab')\n >>> est_intervals, est_labels = mir_eval.util.adjust_intervals(\n ... est_intervals, est_labels, ref_intervals.min(),\n ... ref_intervals.max(), mir_eval.chord.NO_CHORD,\n ... mir_eval.chord.NO_CHORD)\n >>> (intervals,\n ... ref_labels,\n ... est_labels) = mir_eval.util.merge_labeled_intervals(\n ... ref_intervals, ref_labels, est_intervals, est_labels)\n >>> durations = mir_eval.util.intervals_to_durations(intervals)\n >>> comparisons = mir_eval.chord.sevenths_inv(ref_labels, est_labels)\n >>> score = mir_eval.chord.weighted_accuracy(comparisons, durations)\n\n Parameters\n ----------\n reference_labels : list, len=n\n Reference chord labels to score against.\n estimated_labels : list, len=n\n Estimated chord labels to score against.\n\n Returns\n -------\n comparison_scores : np.ndarray, shape=(n,), dtype=float\n Comparison scores, in [0.0, 1.0], or -1 if the comparison is out of\n gamut.\n\n "
] |
Please provide a description of the function:def directional_hamming_distance(reference_intervals, estimated_intervals):
util.validate_intervals(estimated_intervals)
util.validate_intervals(reference_intervals)
# make sure chord intervals do not overlap
if len(reference_intervals) > 1 and (reference_intervals[:-1, 1] >
reference_intervals[1:, 0]).any():
raise ValueError('Chord Intervals must not overlap')
est_ts = np.unique(estimated_intervals.flatten())
seg = 0.
for start, end in reference_intervals:
dur = end - start
between_start_end = est_ts[(est_ts >= start) & (est_ts < end)]
seg_ts = np.hstack([start, between_start_end, end])
seg += dur - np.diff(seg_ts).max()
return seg / (reference_intervals[-1, 1] - reference_intervals[0, 0]) | [
"Compute the directional hamming distance between reference and\n estimated intervals as defined by [#harte2010towards]_ and used for MIREX\n 'OverSeg', 'UnderSeg' and 'MeanSeg' measures.\n\n Examples\n --------\n >>> (ref_intervals,\n ... ref_labels) = mir_eval.io.load_labeled_intervals('ref.lab')\n >>> (est_intervals,\n ... est_labels) = mir_eval.io.load_labeled_intervals('est.lab')\n >>> overseg = 1 - mir_eval.chord.directional_hamming_distance(\n ... ref_intervals, est_intervals)\n >>> underseg = 1 - mir_eval.chord.directional_hamming_distance(\n ... est_intervals, ref_intervals)\n >>> seg = min(overseg, underseg)\n\n Parameters\n ----------\n reference_intervals : np.ndarray, shape=(n, 2), dtype=float\n Reference chord intervals to score against.\n estimated_intervals : np.ndarray, shape=(m, 2), dtype=float\n Estimated chord intervals to score against.\n\n Returns\n -------\n directional hamming distance : float\n directional hamming distance between reference intervals and\n estimated intervals.\n "
] |
Please provide a description of the function:def seg(reference_intervals, estimated_intervals):
return min(underseg(reference_intervals, estimated_intervals),
overseg(reference_intervals, estimated_intervals)) | [
"Compute the MIREX 'MeanSeg' score.\n\n Examples\n --------\n >>> (ref_intervals,\n ... ref_labels) = mir_eval.io.load_labeled_intervals('ref.lab')\n >>> (est_intervals,\n ... est_labels) = mir_eval.io.load_labeled_intervals('est.lab')\n >>> score = mir_eval.chord.seg(ref_intervals, est_intervals)\n\n Parameters\n ----------\n reference_intervals : np.ndarray, shape=(n, 2), dtype=float\n Reference chord intervals to score against.\n estimated_intervals : np.ndarray, shape=(m, 2), dtype=float\n Estimated chord intervals to score against.\n\n Returns\n -------\n segmentation score : float\n Comparison score, in [0.0, 1.0], where 1.0 means perfect segmentation.\n "
] |
Please provide a description of the function:def merge_chord_intervals(intervals, labels):
roots, semitones, basses = encode_many(labels, True)
merged_ivs = []
prev_rt = None
prev_st = None
prev_ba = None
for s, e, rt, st, ba in zip(intervals[:, 0], intervals[:, 1],
roots, semitones, basses):
if rt != prev_rt or (st != prev_st).any() or ba != prev_ba:
prev_rt, prev_st, prev_ba = rt, st, ba
merged_ivs.append([s, e])
else:
merged_ivs[-1][-1] = e
return np.array(merged_ivs) | [
"\n Merge consecutive chord intervals if they represent the same chord.\n\n Parameters\n ----------\n intervals : np.ndarray, shape=(n, 2), dtype=float\n Chord intervals to be merged, in the format returned by\n :func:`mir_eval.io.load_labeled_intervals`.\n labels : list, shape=(n,)\n Chord labels to be merged, in the format returned by\n :func:`mir_eval.io.load_labeled_intervals`.\n\n Returns\n -------\n merged_ivs : np.ndarray, shape=(k, 2), dtype=float\n Merged chord intervals, k <= n\n\n "
] |
Please provide a description of the function:def evaluate(ref_intervals, ref_labels, est_intervals, est_labels, **kwargs):
# Append or crop estimated intervals so their span is the same as reference
est_intervals, est_labels = util.adjust_intervals(
est_intervals, est_labels, ref_intervals.min(), ref_intervals.max(),
NO_CHORD, NO_CHORD)
# use merged intervals for segmentation evaluation
merged_ref_intervals = merge_chord_intervals(ref_intervals, ref_labels)
merged_est_intervals = merge_chord_intervals(est_intervals, est_labels)
# Adjust the labels so that they span the same intervals
intervals, ref_labels, est_labels = util.merge_labeled_intervals(
ref_intervals, ref_labels, est_intervals, est_labels)
# Convert intervals to durations (used as weights)
durations = util.intervals_to_durations(intervals)
# Store scores for each comparison function
scores = collections.OrderedDict()
scores['thirds'] = weighted_accuracy(thirds(ref_labels, est_labels),
durations)
scores['thirds_inv'] = weighted_accuracy(thirds_inv(ref_labels,
est_labels), durations)
scores['triads'] = weighted_accuracy(triads(ref_labels, est_labels),
durations)
scores['triads_inv'] = weighted_accuracy(triads_inv(ref_labels,
est_labels), durations)
scores['tetrads'] = weighted_accuracy(tetrads(ref_labels, est_labels),
durations)
scores['tetrads_inv'] = weighted_accuracy(tetrads_inv(ref_labels,
est_labels),
durations)
scores['root'] = weighted_accuracy(root(ref_labels, est_labels), durations)
scores['mirex'] = weighted_accuracy(mirex(ref_labels, est_labels),
durations)
scores['majmin'] = weighted_accuracy(majmin(ref_labels, est_labels),
durations)
scores['majmin_inv'] = weighted_accuracy(majmin_inv(ref_labels,
est_labels), durations)
scores['sevenths'] = weighted_accuracy(sevenths(ref_labels, est_labels),
durations)
scores['sevenths_inv'] = weighted_accuracy(sevenths_inv(ref_labels,
est_labels),
durations)
scores['underseg'] = underseg(merged_ref_intervals, merged_est_intervals)
scores['overseg'] = overseg(merged_ref_intervals, merged_est_intervals)
scores['seg'] = min(scores['overseg'], scores['underseg'])
return scores | [
"Computes weighted accuracy for all comparison functions for the given\n reference and estimated annotations.\n\n Examples\n --------\n >>> (ref_intervals,\n ... ref_labels) = mir_eval.io.load_labeled_intervals('ref.lab')\n >>> (est_intervals,\n ... est_labels) = mir_eval.io.load_labeled_intervals('est.lab')\n >>> scores = mir_eval.chord.evaluate(ref_intervals, ref_labels,\n ... est_intervals, est_labels)\n\n Parameters\n ----------\n ref_intervals : np.ndarray, shape=(n, 2)\n Reference chord intervals, in the format returned by\n :func:`mir_eval.io.load_labeled_intervals`.\n\n ref_labels : list, shape=(n,)\n reference chord labels, in the format returned by\n :func:`mir_eval.io.load_labeled_intervals`.\n\n est_intervals : np.ndarray, shape=(m, 2)\n estimated chord intervals, in the format returned by\n :func:`mir_eval.io.load_labeled_intervals`.\n\n est_labels : list, shape=(m,)\n estimated chord labels, in the format returned by\n :func:`mir_eval.io.load_labeled_intervals`.\n\n kwargs\n Additional keyword arguments which will be passed to the\n appropriate metric or preprocessing functions.\n\n Returns\n -------\n scores : dict\n Dictionary of scores, where the key is the metric name (str) and\n the value is the (float) score achieved.\n\n "
] |
Please provide a description of the function:def _n_onset_midi(patterns):
return len([o_m for pat in patterns for occ in pat for o_m in occ]) | [
"Computes the number of onset_midi objects in a pattern\n\n Parameters\n ----------\n patterns :\n A list of patterns using the format returned by\n :func:`mir_eval.io.load_patterns()`\n\n Returns\n -------\n n_onsets : int\n Number of onsets within the pattern.\n\n "
] |
Please provide a description of the function:def validate(reference_patterns, estimated_patterns):
# Warn if pattern lists are empty
if _n_onset_midi(reference_patterns) == 0:
warnings.warn('Reference patterns are empty.')
if _n_onset_midi(estimated_patterns) == 0:
warnings.warn('Estimated patterns are empty.')
for patterns in [reference_patterns, estimated_patterns]:
for pattern in patterns:
if len(pattern) <= 0:
raise ValueError("Each pattern must contain at least one "
"occurrence.")
for occurrence in pattern:
for onset_midi in occurrence:
if len(onset_midi) != 2:
raise ValueError("The (onset, midi) tuple must "
"contain exactly 2 elements.") | [
"Checks that the input annotations to a metric look like valid pattern\n lists, and throws helpful errors if not.\n\n Parameters\n ----------\n reference_patterns : list\n The reference patterns using the format returned by\n :func:`mir_eval.io.load_patterns()`\n estimated_patterns : list\n The estimated patterns in the same format\n\n Returns\n -------\n\n "
] |
Please provide a description of the function:def _occurrence_intersection(occ_P, occ_Q):
set_P = set([tuple(onset_midi) for onset_midi in occ_P])
set_Q = set([tuple(onset_midi) for onset_midi in occ_Q])
return set_P & set_Q | [
"Computes the intersection between two occurrences.\n\n Parameters\n ----------\n occ_P : list of tuples\n (onset, midi) pairs representing the reference occurrence.\n occ_Q : list\n second list of (onset, midi) tuples\n\n Returns\n -------\n S : set\n Set of the intersection between occ_P and occ_Q.\n\n "
] |
Please provide a description of the function:def _compute_score_matrix(P, Q, similarity_metric="cardinality_score"):
sm = np.zeros((len(P), len(Q))) # The score matrix
for iP, occ_P in enumerate(P):
for iQ, occ_Q in enumerate(Q):
if similarity_metric == "cardinality_score":
denom = float(np.max([len(occ_P), len(occ_Q)]))
# Compute the score
sm[iP, iQ] = len(_occurrence_intersection(occ_P, occ_Q)) / \
denom
# TODO: More scores: 'normalised matching socre'
else:
raise ValueError("The similarity metric (%s) can only be: "
"'cardinality_score'.")
return sm | [
"Computes the score matrix between the patterns P and Q.\n\n Parameters\n ----------\n P : list\n Pattern containing a list of occurrences.\n Q : list\n Pattern containing a list of occurrences.\n similarity_metric : str\n A string representing the metric to be used\n when computing the similarity matrix. Accepted values:\n - \"cardinality_score\":\n Count of the intersection between occurrences.\n (Default value = \"cardinality_score\")\n\n Returns\n -------\n sm : np.array\n The score matrix between P and Q using the similarity_metric.\n\n "
] |
Please provide a description of the function:def standard_FPR(reference_patterns, estimated_patterns, tol=1e-5):
validate(reference_patterns, estimated_patterns)
nP = len(reference_patterns) # Number of patterns in the reference
nQ = len(estimated_patterns) # Number of patterns in the estimation
k = 0 # Number of patterns that match
# If no patterns were provided, metric is zero
if _n_onset_midi(reference_patterns) == 0 or \
_n_onset_midi(estimated_patterns) == 0:
return 0., 0., 0.
# Find matches of the prototype patterns
for ref_pattern in reference_patterns:
P = np.asarray(ref_pattern[0]) # Get reference prototype
for est_pattern in estimated_patterns:
Q = np.asarray(est_pattern[0]) # Get estimation prototype
if len(P) != len(Q):
continue
# Check transposition given a certain tolerance
if (len(P) == len(Q) == 1 or
np.max(np.abs(np.diff(P - Q, axis=0))) < tol):
k += 1
break
# Compute the standard measures
precision = k / float(nQ)
recall = k / float(nP)
f_measure = util.f_measure(precision, recall)
return f_measure, precision, recall | [
"Standard F1 Score, Precision and Recall.\n\n This metric checks if the prototype patterns of the reference match\n possible translated patterns in the prototype patterns of the estimations.\n Since the sizes of these prototypes must be equal, this metric is quite\n restictive and it tends to be 0 in most of 2013 MIREX results.\n\n Examples\n --------\n >>> ref_patterns = mir_eval.io.load_patterns(\"ref_pattern.txt\")\n >>> est_patterns = mir_eval.io.load_patterns(\"est_pattern.txt\")\n >>> F, P, R = mir_eval.pattern.standard_FPR(ref_patterns, est_patterns)\n\n Parameters\n ----------\n reference_patterns : list\n The reference patterns using the format returned by\n :func:`mir_eval.io.load_patterns()`\n estimated_patterns : list\n The estimated patterns in the same format\n tol : float\n Tolerance level when comparing reference against estimation.\n Default parameter is the one found in the original matlab code by\n Tom Collins used for MIREX 2013.\n (Default value = 1e-5)\n\n Returns\n -------\n f_measure : float\n The standard F1 Score\n precision : float\n The standard Precision\n recall : float\n The standard Recall\n\n "
] |
Please provide a description of the function:def establishment_FPR(reference_patterns, estimated_patterns,
similarity_metric="cardinality_score"):
validate(reference_patterns, estimated_patterns)
nP = len(reference_patterns) # Number of elements in reference
nQ = len(estimated_patterns) # Number of elements in estimation
S = np.zeros((nP, nQ)) # Establishment matrix
# If no patterns were provided, metric is zero
if _n_onset_midi(reference_patterns) == 0 or \
_n_onset_midi(estimated_patterns) == 0:
return 0., 0., 0.
for iP, ref_pattern in enumerate(reference_patterns):
for iQ, est_pattern in enumerate(estimated_patterns):
s = _compute_score_matrix(ref_pattern, est_pattern,
similarity_metric)
S[iP, iQ] = np.max(s)
# Compute scores
precision = np.mean(np.max(S, axis=0))
recall = np.mean(np.max(S, axis=1))
f_measure = util.f_measure(precision, recall)
return f_measure, precision, recall | [
"Establishment F1 Score, Precision and Recall.\n\n Examples\n --------\n >>> ref_patterns = mir_eval.io.load_patterns(\"ref_pattern.txt\")\n >>> est_patterns = mir_eval.io.load_patterns(\"est_pattern.txt\")\n >>> F, P, R = mir_eval.pattern.establishment_FPR(ref_patterns,\n ... est_patterns)\n\n\n Parameters\n ----------\n reference_patterns : list\n The reference patterns in the format returned by\n :func:`mir_eval.io.load_patterns()`\n\n estimated_patterns : list\n The estimated patterns in the same format\n\n similarity_metric : str\n A string representing the metric to be used when computing the\n similarity matrix. Accepted values:\n\n - \"cardinality_score\": Count of the intersection\n between occurrences.\n\n (Default value = \"cardinality_score\")\n\n\n Returns\n -------\n f_measure : float\n The establishment F1 Score\n precision : float\n The establishment Precision\n recall : float\n The establishment Recall\n\n "
] |
Please provide a description of the function:def occurrence_FPR(reference_patterns, estimated_patterns, thres=.75,
similarity_metric="cardinality_score"):
validate(reference_patterns, estimated_patterns)
# Number of elements in reference
nP = len(reference_patterns)
# Number of elements in estimation
nQ = len(estimated_patterns)
# Occurrence matrix with Precision and recall in its last dimension
O_PR = np.zeros((nP, nQ, 2))
# Index of the values that are greater than the specified threshold
rel_idx = np.empty((0, 2), dtype=int)
# If no patterns were provided, metric is zero
if _n_onset_midi(reference_patterns) == 0 or \
_n_onset_midi(estimated_patterns) == 0:
return 0., 0., 0.
for iP, ref_pattern in enumerate(reference_patterns):
for iQ, est_pattern in enumerate(estimated_patterns):
s = _compute_score_matrix(ref_pattern, est_pattern,
similarity_metric)
if np.max(s) >= thres:
O_PR[iP, iQ, 0] = np.mean(np.max(s, axis=0))
O_PR[iP, iQ, 1] = np.mean(np.max(s, axis=1))
rel_idx = np.vstack((rel_idx, [iP, iQ]))
# Compute the scores
if len(rel_idx) == 0:
precision = 0
recall = 0
else:
P = O_PR[:, :, 0]
precision = np.mean(np.max(P[np.ix_(rel_idx[:, 0], rel_idx[:, 1])],
axis=0))
R = O_PR[:, :, 1]
recall = np.mean(np.max(R[np.ix_(rel_idx[:, 0], rel_idx[:, 1])],
axis=1))
f_measure = util.f_measure(precision, recall)
return f_measure, precision, recall | [
"Establishment F1 Score, Precision and Recall.\n\n\n Examples\n --------\n >>> ref_patterns = mir_eval.io.load_patterns(\"ref_pattern.txt\")\n >>> est_patterns = mir_eval.io.load_patterns(\"est_pattern.txt\")\n >>> F, P, R = mir_eval.pattern.occurrence_FPR(ref_patterns,\n ... est_patterns)\n\n\n Parameters\n ----------\n reference_patterns : list\n The reference patterns in the format returned by\n :func:`mir_eval.io.load_patterns()`\n estimated_patterns : list\n The estimated patterns in the same format\n thres : float\n How similar two occcurrences must be in order to be considered\n equal\n (Default value = .75)\n similarity_metric : str\n A string representing the metric to be used\n when computing the similarity matrix. Accepted values:\n\n - \"cardinality_score\": Count of the intersection\n between occurrences.\n\n (Default value = \"cardinality_score\")\n\n\n Returns\n -------\n f_measure : float\n The establishment F1 Score\n precision : float\n The establishment Precision\n recall : float\n The establishment Recall\n\n "
] |
Please provide a description of the function:def three_layer_FPR(reference_patterns, estimated_patterns):
validate(reference_patterns, estimated_patterns)
def compute_first_layer_PR(ref_occs, est_occs):
# Find the length of the intersection between reference and estimation
s = len(_occurrence_intersection(ref_occs, est_occs))
# Compute the first layer scores
precision = s / float(len(ref_occs))
recall = s / float(len(est_occs))
return precision, recall
def compute_second_layer_PR(ref_pattern, est_pattern):
# Compute the first layer scores
F_1 = compute_layer(ref_pattern, est_pattern)
# Compute the second layer scores
precision = np.mean(np.max(F_1, axis=0))
recall = np.mean(np.max(F_1, axis=1))
return precision, recall
def compute_layer(ref_elements, est_elements, layer=1):
if layer != 1 and layer != 2:
raise ValueError("Layer (%d) must be an integer between 1 and 2"
% layer)
nP = len(ref_elements) # Number of elements in reference
nQ = len(est_elements) # Number of elements in estimation
F = np.zeros((nP, nQ)) # F-measure matrix for the given layer
for iP in range(nP):
for iQ in range(nQ):
if layer == 1:
func = compute_first_layer_PR
elif layer == 2:
func = compute_second_layer_PR
# Compute layer scores
precision, recall = func(ref_elements[iP], est_elements[iQ])
F[iP, iQ] = util.f_measure(precision, recall)
return F
# If no patterns were provided, metric is zero
if _n_onset_midi(reference_patterns) == 0 or \
_n_onset_midi(estimated_patterns) == 0:
return 0., 0., 0.
# Compute the second layer (it includes the first layer)
F_2 = compute_layer(reference_patterns, estimated_patterns, layer=2)
# Compute the final scores (third layer)
precision_3 = np.mean(np.max(F_2, axis=0))
recall_3 = np.mean(np.max(F_2, axis=1))
f_measure_3 = util.f_measure(precision_3, recall_3)
return f_measure_3, precision_3, recall_3 | [
"Three Layer F1 Score, Precision and Recall. As described by Meridith.\n\n Examples\n --------\n >>> ref_patterns = mir_eval.io.load_patterns(\"ref_pattern.txt\")\n >>> est_patterns = mir_eval.io.load_patterns(\"est_pattern.txt\")\n >>> F, P, R = mir_eval.pattern.three_layer_FPR(ref_patterns,\n ... est_patterns)\n\n Parameters\n ----------\n reference_patterns : list\n The reference patterns in the format returned by\n :func:`mir_eval.io.load_patterns()`\n estimated_patterns : list\n The estimated patterns in the same format\n\n Returns\n -------\n f_measure : float\n The three-layer F1 Score\n precision : float\n The three-layer Precision\n recall : float\n The three-layer Recall\n\n ",
"Computes the first layer Precision and Recall values given the\n set of occurrences in the reference and the set of occurrences in the\n estimation.\n\n Parameters\n ----------\n ref_occs :\n\n est_occs :\n\n\n Returns\n -------\n\n ",
"Computes the second layer Precision and Recall values given the\n set of occurrences in the reference and the set of occurrences in the\n estimation.\n\n Parameters\n ----------\n ref_pattern :\n\n est_pattern :\n\n\n Returns\n -------\n\n ",
"Computes the F-measure matrix for a given layer. The reference and\n estimated elements can be either patters or occurrences, depending\n on the layer.\n\n For layer 1, the elements must be occurrences.\n For layer 2, the elements must be patterns.\n\n Parameters\n ----------\n ref_elements :\n\n est_elements :\n\n layer :\n (Default value = 1)\n\n Returns\n -------\n\n "
] |
Please provide a description of the function:def first_n_three_layer_P(reference_patterns, estimated_patterns, n=5):
validate(reference_patterns, estimated_patterns)
# If no patterns were provided, metric is zero
if _n_onset_midi(reference_patterns) == 0 or \
_n_onset_midi(estimated_patterns) == 0:
return 0., 0., 0.
# Get only the first n patterns from the estimated results
fn_est_patterns = estimated_patterns[:min(len(estimated_patterns), n)]
# Compute the three-layer scores for the first n estimated patterns
F, P, R = three_layer_FPR(reference_patterns, fn_est_patterns)
return P | [
"First n three-layer precision.\n\n This metric is basically the same as the three-layer FPR but it is only\n applied to the first n estimated patterns, and it only returns the\n precision. In MIREX and typically, n = 5.\n\n Examples\n --------\n >>> ref_patterns = mir_eval.io.load_patterns(\"ref_pattern.txt\")\n >>> est_patterns = mir_eval.io.load_patterns(\"est_pattern.txt\")\n >>> P = mir_eval.pattern.first_n_three_layer_P(ref_patterns,\n ... est_patterns, n=5)\n\n Parameters\n ----------\n reference_patterns : list\n The reference patterns in the format returned by\n :func:`mir_eval.io.load_patterns()`\n estimated_patterns : list\n The estimated patterns in the same format\n n : int\n Number of patterns to consider from the estimated results, in\n the order they appear in the matrix\n (Default value = 5)\n\n Returns\n -------\n precision : float\n The first n three-layer Precision\n\n "
] |
Please provide a description of the function:def first_n_target_proportion_R(reference_patterns, estimated_patterns, n=5):
validate(reference_patterns, estimated_patterns)
# If no patterns were provided, metric is zero
if _n_onset_midi(reference_patterns) == 0 or \
_n_onset_midi(estimated_patterns) == 0:
return 0., 0., 0.
# Get only the first n patterns from the estimated results
fn_est_patterns = estimated_patterns[:min(len(estimated_patterns), n)]
F, P, R = establishment_FPR(reference_patterns, fn_est_patterns)
return R | [
"First n target proportion establishment recall metric.\n\n This metric is similar is similar to the establishment FPR score, but it\n only takes into account the first n estimated patterns and it only\n outputs the Recall value of it.\n\n Examples\n --------\n >>> ref_patterns = mir_eval.io.load_patterns(\"ref_pattern.txt\")\n >>> est_patterns = mir_eval.io.load_patterns(\"est_pattern.txt\")\n >>> R = mir_eval.pattern.first_n_target_proportion_R(\n ... ref_patterns, est_patterns, n=5)\n\n Parameters\n ----------\n reference_patterns : list\n The reference patterns in the format returned by\n :func:`mir_eval.io.load_patterns()`\n estimated_patterns : list\n The estimated patterns in the same format\n n : int\n Number of patterns to consider from the estimated results, in\n the order they appear in the matrix.\n (Default value = 5)\n\n Returns\n -------\n recall : float\n The first n target proportion Recall.\n\n "
] |
Please provide a description of the function:def evaluate(ref_patterns, est_patterns, **kwargs):
# Compute all the metrics
scores = collections.OrderedDict()
# Standard scores
scores['F'], scores['P'], scores['R'] = \
util.filter_kwargs(standard_FPR, ref_patterns, est_patterns, **kwargs)
# Establishment scores
scores['F_est'], scores['P_est'], scores['R_est'] = \
util.filter_kwargs(establishment_FPR, ref_patterns, est_patterns,
**kwargs)
# Occurrence scores
# Force these values for thresh
kwargs['thresh'] = .5
scores['F_occ.5'], scores['P_occ.5'], scores['R_occ.5'] = \
util.filter_kwargs(occurrence_FPR, ref_patterns, est_patterns,
**kwargs)
kwargs['thresh'] = .75
scores['F_occ.75'], scores['P_occ.75'], scores['R_occ.75'] = \
util.filter_kwargs(occurrence_FPR, ref_patterns, est_patterns,
**kwargs)
# Three-layer scores
scores['F_3'], scores['P_3'], scores['R_3'] = \
util.filter_kwargs(three_layer_FPR, ref_patterns, est_patterns,
**kwargs)
# First Five Patterns scores
# Set default value of n
if 'n' not in kwargs:
kwargs['n'] = 5
scores['FFP'] = util.filter_kwargs(first_n_three_layer_P, ref_patterns,
est_patterns, **kwargs)
scores['FFTP_est'] = \
util.filter_kwargs(first_n_target_proportion_R, ref_patterns,
est_patterns, **kwargs)
return scores | [
"Load data and perform the evaluation.\n\n Examples\n --------\n >>> ref_patterns = mir_eval.io.load_patterns(\"ref_pattern.txt\")\n >>> est_patterns = mir_eval.io.load_patterns(\"est_pattern.txt\")\n >>> scores = mir_eval.pattern.evaluate(ref_patterns, est_patterns)\n\n Parameters\n ----------\n ref_patterns : list\n The reference patterns in the format returned by\n :func:`mir_eval.io.load_patterns()`\n est_patterns : list\n The estimated patterns in the same format\n kwargs\n Additional keyword arguments which will be passed to the\n appropriate metric or preprocessing functions.\n\n Returns\n -------\n scores : dict\n Dictionary of scores, where the key is the metric name (str) and\n the value is the (float) score achieved.\n\n "
] |
Please provide a description of the function:def validate(ref_intervals, ref_pitches, ref_velocities, est_intervals,
est_pitches, est_velocities):
transcription.validate(ref_intervals, ref_pitches, est_intervals,
est_pitches)
# Check that velocities have the same length as intervals/pitches
if not ref_velocities.shape[0] == ref_pitches.shape[0]:
raise ValueError('Reference velocities must have the same length as '
'pitches and intervals.')
if not est_velocities.shape[0] == est_pitches.shape[0]:
raise ValueError('Estimated velocities must have the same length as '
'pitches and intervals.')
# Check that the velocities are positive
if ref_velocities.size > 0 and np.min(ref_velocities) < 0:
raise ValueError('Reference velocities must be positive.')
if est_velocities.size > 0 and np.min(est_velocities) < 0:
raise ValueError('Estimated velocities must be positive.') | [
"Checks that the input annotations have valid time intervals, pitches,\n and velocities, and throws helpful errors if not.\n\n Parameters\n ----------\n ref_intervals : np.ndarray, shape=(n,2)\n Array of reference notes time intervals (onset and offset times)\n ref_pitches : np.ndarray, shape=(n,)\n Array of reference pitch values in Hertz\n ref_velocities : np.ndarray, shape=(n,)\n Array of MIDI velocities (i.e. between 0 and 127) of reference notes\n est_intervals : np.ndarray, shape=(m,2)\n Array of estimated notes time intervals (onset and offset times)\n est_pitches : np.ndarray, shape=(m,)\n Array of estimated pitch values in Hertz\n est_velocities : np.ndarray, shape=(m,)\n Array of MIDI velocities (i.e. between 0 and 127) of estimated notes\n "
] |
Please provide a description of the function:def match_notes(
ref_intervals, ref_pitches, ref_velocities, est_intervals, est_pitches,
est_velocities, onset_tolerance=0.05, pitch_tolerance=50.0,
offset_ratio=0.2, offset_min_tolerance=0.05, strict=False,
velocity_tolerance=0.1):
# Compute note matching as usual using standard transcription function
matching = transcription.match_notes(
ref_intervals, ref_pitches, est_intervals, est_pitches,
onset_tolerance, pitch_tolerance, offset_ratio, offset_min_tolerance,
strict)
# Rescale reference velocities to the range [0, 1]
min_velocity, max_velocity = np.min(ref_velocities), np.max(ref_velocities)
# Make the smallest possible range 1 to avoid divide by zero
velocity_range = max(1, max_velocity - min_velocity)
ref_velocities = (ref_velocities - min_velocity)/float(velocity_range)
# Convert matching list-of-tuples to array for fancy indexing
matching = np.array(matching)
# When there is no matching, return an empty list
if matching.size == 0:
return []
# Grab velocities for matched notes
ref_matched_velocities = ref_velocities[matching[:, 0]]
est_matched_velocities = est_velocities[matching[:, 1]]
# Find slope and intercept of line which produces best least-squares fit
# between matched est and ref velocities
slope, intercept = np.linalg.lstsq(
np.vstack([est_matched_velocities,
np.ones(len(est_matched_velocities))]).T,
ref_matched_velocities)[0]
# Re-scale est velocities to match ref
est_matched_velocities = slope*est_matched_velocities + intercept
# Compute the absolute error of (rescaled) estimated velocities vs.
# normalized reference velocities. Error will be in [0, 1]
velocity_diff = np.abs(est_matched_velocities - ref_matched_velocities)
# Check whether each error is within the provided tolerance
velocity_within_tolerance = (velocity_diff < velocity_tolerance)
# Only keep matches whose velocity was within the provided tolerance
matching = matching[velocity_within_tolerance]
# Convert back to list-of-tuple format
matching = [tuple(_) for _ in matching]
return matching | [
"Match notes, taking note velocity into consideration.\n\n This function first calls :func:`mir_eval.transcription.match_notes` to\n match notes according to the supplied intervals, pitches, onset, offset,\n and pitch tolerances. The velocities of the matched notes are then used to\n estimate a slope and intercept which can rescale the estimated velocities\n so that they are as close as possible (in L2 sense) to their matched\n reference velocities. Velocities are then normalized to the range [0, 1]. A\n estimated note is then further only considered correct if its velocity is\n within ``velocity_tolerance`` of its matched (according to pitch and\n timing) reference note.\n\n Parameters\n ----------\n ref_intervals : np.ndarray, shape=(n,2)\n Array of reference notes time intervals (onset and offset times)\n ref_pitches : np.ndarray, shape=(n,)\n Array of reference pitch values in Hertz\n ref_velocities : np.ndarray, shape=(n,)\n Array of MIDI velocities (i.e. between 0 and 127) of reference notes\n est_intervals : np.ndarray, shape=(m,2)\n Array of estimated notes time intervals (onset and offset times)\n est_pitches : np.ndarray, shape=(m,)\n Array of estimated pitch values in Hertz\n est_velocities : np.ndarray, shape=(m,)\n Array of MIDI velocities (i.e. between 0 and 127) of estimated notes\n onset_tolerance : float > 0\n The tolerance for an estimated note's onset deviating from the\n reference note's onset, in seconds. Default is 0.05 (50 ms).\n pitch_tolerance : float > 0\n The tolerance for an estimated note's pitch deviating from the\n reference note's pitch, in cents. Default is 50.0 (50 cents).\n offset_ratio : float > 0 or None\n The ratio of the reference note's duration used to define the\n offset_tolerance. Default is 0.2 (20%), meaning the\n ``offset_tolerance`` will equal the ``ref_duration * 0.2``, or 0.05 (50\n ms), whichever is greater. If ``offset_ratio`` is set to ``None``,\n offsets are ignored in the matching.\n offset_min_tolerance : float > 0\n The minimum tolerance for offset matching. See offset_ratio description\n for an explanation of how the offset tolerance is determined. Note:\n this parameter only influences the results if ``offset_ratio`` is not\n ``None``.\n strict : bool\n If ``strict=False`` (the default), threshold checks for onset, offset,\n and pitch matching are performed using ``<=`` (less than or equal). If\n ``strict=True``, the threshold checks are performed using ``<`` (less\n than).\n velocity_tolerance : float > 0\n Estimated notes are considered correct if, after rescaling and\n normalization to [0, 1], they are within ``velocity_tolerance`` of a\n matched reference note.\n\n Returns\n -------\n matching : list of tuples\n A list of matched reference and estimated notes.\n ``matching[i] == (i, j)`` where reference note ``i`` matches estimated\n note ``j``.\n "
] |
Please provide a description of the function:def evaluate(ref_intervals, ref_pitches, ref_velocities, est_intervals,
est_pitches, est_velocities, **kwargs):
# Compute all the metrics
scores = collections.OrderedDict()
# Precision, recall and f-measure taking note offsets into account
kwargs.setdefault('offset_ratio', 0.2)
if kwargs['offset_ratio'] is not None:
(scores['Precision'],
scores['Recall'],
scores['F-measure'],
scores['Average_Overlap_Ratio']) = util.filter_kwargs(
precision_recall_f1_overlap, ref_intervals, ref_pitches,
ref_velocities, est_intervals, est_pitches, est_velocities,
**kwargs)
# Precision, recall and f-measure NOT taking note offsets into account
kwargs['offset_ratio'] = None
(scores['Precision_no_offset'],
scores['Recall_no_offset'],
scores['F-measure_no_offset'],
scores['Average_Overlap_Ratio_no_offset']) = util.filter_kwargs(
precision_recall_f1_overlap, ref_intervals, ref_pitches,
ref_velocities, est_intervals, est_pitches, est_velocities, **kwargs)
return scores | [
"Compute all metrics for the given reference and estimated annotations.\n\n Parameters\n ----------\n ref_intervals : np.ndarray, shape=(n,2)\n Array of reference notes time intervals (onset and offset times)\n ref_pitches : np.ndarray, shape=(n,)\n Array of reference pitch values in Hertz\n ref_velocities : np.ndarray, shape=(n,)\n Array of MIDI velocities (i.e. between 0 and 127) of reference notes\n est_intervals : np.ndarray, shape=(m,2)\n Array of estimated notes time intervals (onset and offset times)\n est_pitches : np.ndarray, shape=(m,)\n Array of estimated pitch values in Hertz\n est_velocities : np.ndarray, shape=(n,)\n Array of MIDI velocities (i.e. between 0 and 127) of estimated notes\n kwargs\n Additional keyword arguments which will be passed to the\n appropriate metric or preprocessing functions.\n\n Returns\n -------\n scores : dict\n Dictionary of scores, where the key is the metric name (str) and\n the value is the (float) score achieved.\n "
] |
Please provide a description of the function:def validate(reference_beats, estimated_beats):
# If reference or estimated beats are empty,
# warn because metric will be 0
if reference_beats.size == 0:
warnings.warn("Reference beats are empty.")
if estimated_beats.size == 0:
warnings.warn("Estimated beats are empty.")
for beats in [reference_beats, estimated_beats]:
util.validate_events(beats, MAX_TIME) | [
"Checks that the input annotations to a metric look like valid beat time\n arrays, and throws helpful errors if not.\n\n Parameters\n ----------\n reference_beats : np.ndarray\n reference beat times, in seconds\n estimated_beats : np.ndarray\n estimated beat times, in seconds\n "
] |
Please provide a description of the function:def _get_reference_beat_variations(reference_beats):
# Create annotations at twice the metric level
interpolated_indices = np.arange(0, reference_beats.shape[0]-.5, .5)
original_indices = np.arange(0, reference_beats.shape[0])
double_reference_beats = np.interp(interpolated_indices,
original_indices,
reference_beats)
# Return metric variations:
# True, off-beat, double tempo, half tempo odd, and half tempo even
return (reference_beats,
double_reference_beats[1::2],
double_reference_beats,
reference_beats[::2],
reference_beats[1::2]) | [
"Return metric variations of the reference beats\n\n Parameters\n ----------\n reference_beats : np.ndarray\n beat locations in seconds\n\n Returns\n -------\n reference_beats : np.ndarray\n Original beat locations\n off_beat : np.ndarray\n 180 degrees out of phase from the original beat locations\n double : np.ndarray\n Beats at 2x the original tempo\n half_odd : np.ndarray\n Half tempo, odd beats\n half_even : np.ndarray\n Half tempo, even beats\n\n "
] |
Please provide a description of the function:def f_measure(reference_beats,
estimated_beats,
f_measure_threshold=0.07):
validate(reference_beats, estimated_beats)
# When estimated beats are empty, no beats are correct; metric is 0
if estimated_beats.size == 0 or reference_beats.size == 0:
return 0.
# Compute the best-case matching between reference and estimated locations
matching = util.match_events(reference_beats,
estimated_beats,
f_measure_threshold)
precision = float(len(matching))/len(estimated_beats)
recall = float(len(matching))/len(reference_beats)
return util.f_measure(precision, recall) | [
"Compute the F-measure of correct vs incorrectly predicted beats.\n \"Correctness\" is determined over a small window.\n\n Examples\n --------\n >>> reference_beats = mir_eval.io.load_events('reference.txt')\n >>> reference_beats = mir_eval.beat.trim_beats(reference_beats)\n >>> estimated_beats = mir_eval.io.load_events('estimated.txt')\n >>> estimated_beats = mir_eval.beat.trim_beats(estimated_beats)\n >>> f_measure = mir_eval.beat.f_measure(reference_beats,\n estimated_beats)\n\n Parameters\n ----------\n reference_beats : np.ndarray\n reference beat times, in seconds\n estimated_beats : np.ndarray\n estimated beat times, in seconds\n f_measure_threshold : float\n Window size, in seconds\n (Default value = 0.07)\n\n Returns\n -------\n f_score : float\n The computed F-measure score\n\n "
] |
Please provide a description of the function:def cemgil(reference_beats,
estimated_beats,
cemgil_sigma=0.04):
validate(reference_beats, estimated_beats)
# When estimated beats are empty, no beats are correct; metric is 0
if estimated_beats.size == 0 or reference_beats.size == 0:
return 0., 0.
# We'll compute Cemgil's accuracy for each variation
accuracies = []
for reference_beats in _get_reference_beat_variations(reference_beats):
accuracy = 0
# Cycle through beats
for beat in reference_beats:
# Find the error for the closest beat to the reference beat
beat_diff = np.min(np.abs(beat - estimated_beats))
# Add gaussian error into the accuracy
accuracy += np.exp(-(beat_diff**2)/(2.0*cemgil_sigma**2))
# Normalize the accuracy
accuracy /= .5*(estimated_beats.shape[0] + reference_beats.shape[0])
# Add it to our list of accuracy scores
accuracies.append(accuracy)
# Return raw accuracy with non-varied annotations
# and maximal accuracy across all variations
return accuracies[0], np.max(accuracies) | [
"Cemgil's score, computes a gaussian error of each estimated beat.\n Compares against the original beat times and all metrical variations.\n\n Examples\n --------\n >>> reference_beats = mir_eval.io.load_events('reference.txt')\n >>> reference_beats = mir_eval.beat.trim_beats(reference_beats)\n >>> estimated_beats = mir_eval.io.load_events('estimated.txt')\n >>> estimated_beats = mir_eval.beat.trim_beats(estimated_beats)\n >>> cemgil_score, cemgil_max = mir_eval.beat.cemgil(reference_beats,\n estimated_beats)\n\n Parameters\n ----------\n reference_beats : np.ndarray\n reference beat times, in seconds\n estimated_beats : np.ndarray\n query beat times, in seconds\n cemgil_sigma : float\n Sigma parameter of gaussian error windows\n (Default value = 0.04)\n\n Returns\n -------\n cemgil_score : float\n Cemgil's score for the original reference beats\n cemgil_max : float\n The best Cemgil score for all metrical variations\n "
] |
Please provide a description of the function:def goto(reference_beats,
estimated_beats,
goto_threshold=0.35,
goto_mu=0.2,
goto_sigma=0.2):
validate(reference_beats, estimated_beats)
# When estimated beats are empty, no beats are correct; metric is 0
if estimated_beats.size == 0 or reference_beats.size == 0:
return 0.
# Error for each beat
beat_error = np.ones(reference_beats.shape[0])
# Flag for whether the reference and estimated beats are paired
paired = np.zeros(reference_beats.shape[0])
# Keep track of Goto's three criteria
goto_criteria = 0
for n in range(1, reference_beats.shape[0]-1):
# Get previous inner-reference-beat-interval
previous_interval = 0.5*(reference_beats[n] - reference_beats[n-1])
# Window start - in the middle of the current beat and the previous
window_min = reference_beats[n] - previous_interval
# Next inter-reference-beat-interval
next_interval = 0.5*(reference_beats[n+1] - reference_beats[n])
# Window end - in the middle of the current beat and the next
window_max = reference_beats[n] + next_interval
# Get estimated beats in the window
beats_in_window = np.logical_and((estimated_beats >= window_min),
(estimated_beats < window_max))
# False negative/positive
if beats_in_window.sum() == 0 or beats_in_window.sum() > 1:
paired[n] = 0
beat_error[n] = 1
else:
# Single beat is paired!
paired[n] = 1
# Get offset of the estimated beat and the reference beat
offset = estimated_beats[beats_in_window] - reference_beats[n]
# Scale by previous or next interval
if offset < 0:
beat_error[n] = offset/previous_interval
else:
beat_error[n] = offset/next_interval
# Get indices of incorrect beats
incorrect_beats = np.flatnonzero(np.abs(beat_error) > goto_threshold)
# All beats are correct (first and last will be 0 so always correct)
if incorrect_beats.shape[0] < 3:
# Get the track of correct beats
track = beat_error[incorrect_beats[0] + 1:incorrect_beats[-1] - 1]
goto_criteria = 1
else:
# Get the track of maximal length
track_len = np.max(np.diff(incorrect_beats))
track_start = np.flatnonzero(np.diff(incorrect_beats) == track_len)[0]
# Is the track length at least 25% of the song?
if track_len - 1 > .25*(reference_beats.shape[0] - 2):
goto_criteria = 1
start_beat = incorrect_beats[track_start]
end_beat = incorrect_beats[track_start + 1]
track = beat_error[start_beat:end_beat + 1]
# If we have a track
if goto_criteria:
# Are mean and std of the track less than the required thresholds?
if np.mean(np.abs(track)) < goto_mu \
and np.std(track, ddof=1) < goto_sigma:
goto_criteria = 3
# If all criteria are met, score is 100%!
return 1.0*(goto_criteria == 3) | [
"Calculate Goto's score, a binary 1 or 0 depending on some specific\n heuristic criteria\n\n Examples\n --------\n >>> reference_beats = mir_eval.io.load_events('reference.txt')\n >>> reference_beats = mir_eval.beat.trim_beats(reference_beats)\n >>> estimated_beats = mir_eval.io.load_events('estimated.txt')\n >>> estimated_beats = mir_eval.beat.trim_beats(estimated_beats)\n >>> goto_score = mir_eval.beat.goto(reference_beats, estimated_beats)\n\n Parameters\n ----------\n reference_beats : np.ndarray\n reference beat times, in seconds\n estimated_beats : np.ndarray\n query beat times, in seconds\n goto_threshold : float\n Threshold of beat error for a beat to be \"correct\"\n (Default value = 0.35)\n goto_mu : float\n The mean of the beat errors in the continuously correct\n track must be less than this\n (Default value = 0.2)\n goto_sigma : float\n The std of the beat errors in the continuously correct track must\n be less than this\n (Default value = 0.2)\n\n Returns\n -------\n goto_score : float\n Either 1.0 or 0.0 if some specific criteria are met\n "
] |
Please provide a description of the function:def p_score(reference_beats,
estimated_beats,
p_score_threshold=0.2):
validate(reference_beats, estimated_beats)
# Warn when only one beat is provided for either estimated or reference,
# report a warning
if reference_beats.size == 1:
warnings.warn("Only one reference beat was provided, so beat intervals"
" cannot be computed.")
if estimated_beats.size == 1:
warnings.warn("Only one estimated beat was provided, so beat intervals"
" cannot be computed.")
# When estimated or reference beats have <= 1 beats, can't compute the
# metric, so return 0
if estimated_beats.size <= 1 or reference_beats.size <= 1:
return 0.
# Quantize beats to 10ms
sampling_rate = int(1.0/0.010)
# Shift beats so that the minimum in either sequence is zero
offset = min(estimated_beats.min(), reference_beats.min())
estimated_beats = np.array(estimated_beats - offset)
reference_beats = np.array(reference_beats - offset)
# Get the largest time index
end_point = np.int(np.ceil(np.max([np.max(estimated_beats),
np.max(reference_beats)])))
# Make impulse trains with impulses at beat locations
reference_train = np.zeros(end_point*sampling_rate + 1)
beat_indices = np.ceil(reference_beats*sampling_rate).astype(np.int)
reference_train[beat_indices] = 1.0
estimated_train = np.zeros(end_point*sampling_rate + 1)
beat_indices = np.ceil(estimated_beats*sampling_rate).astype(np.int)
estimated_train[beat_indices] = 1.0
# Window size to take the correlation over
# defined as .2*median(inter-annotation-intervals)
annotation_intervals = np.diff(np.flatnonzero(reference_train))
win_size = int(np.round(p_score_threshold*np.median(annotation_intervals)))
# Get full correlation
train_correlation = np.correlate(reference_train, estimated_train, 'full')
# Get the middle element - note we are rounding down on purpose here
middle_lag = train_correlation.shape[0]//2
# Truncate to only valid lags (those corresponding to the window)
start = middle_lag - win_size
end = middle_lag + win_size + 1
train_correlation = train_correlation[start:end]
# Compute and return the P-score
n_beats = np.max([estimated_beats.shape[0], reference_beats.shape[0]])
return np.sum(train_correlation)/n_beats | [
"Get McKinney's P-score.\n Based on the autocorrelation of the reference and estimated beats\n\n Examples\n --------\n >>> reference_beats = mir_eval.io.load_events('reference.txt')\n >>> reference_beats = mir_eval.beat.trim_beats(reference_beats)\n >>> estimated_beats = mir_eval.io.load_events('estimated.txt')\n >>> estimated_beats = mir_eval.beat.trim_beats(estimated_beats)\n >>> p_score = mir_eval.beat.p_score(reference_beats, estimated_beats)\n\n Parameters\n ----------\n reference_beats : np.ndarray\n reference beat times, in seconds\n estimated_beats : np.ndarray\n query beat times, in seconds\n p_score_threshold : float\n Window size will be\n ``p_score_threshold*np.median(inter_annotation_intervals)``,\n (Default value = 0.2)\n\n Returns\n -------\n correlation : float\n McKinney's P-score\n\n "
] |
Please provide a description of the function:def continuity(reference_beats,
estimated_beats,
continuity_phase_threshold=0.175,
continuity_period_threshold=0.175):
validate(reference_beats, estimated_beats)
# Warn when only one beat is provided for either estimated or reference,
# report a warning
if reference_beats.size == 1:
warnings.warn("Only one reference beat was provided, so beat intervals"
" cannot be computed.")
if estimated_beats.size == 1:
warnings.warn("Only one estimated beat was provided, so beat intervals"
" cannot be computed.")
# When estimated or reference beats have <= 1 beats, can't compute the
# metric, so return 0
if estimated_beats.size <= 1 or reference_beats.size <= 1:
return 0., 0., 0., 0.
# Accuracies for each variation
continuous_accuracies = []
total_accuracies = []
# Get accuracy for each variation
for reference_beats in _get_reference_beat_variations(reference_beats):
# Annotations that have been used
n_annotations = np.max([reference_beats.shape[0],
estimated_beats.shape[0]])
used_annotations = np.zeros(n_annotations)
# Whether or not we are continuous at any given point
beat_successes = np.zeros(n_annotations)
for m in range(estimated_beats.shape[0]):
# Is this beat correct?
beat_success = 0
# Get differences for this beat
beat_differences = np.abs(estimated_beats[m] - reference_beats)
# Get nearest annotation index
nearest = np.argmin(beat_differences)
min_difference = beat_differences[nearest]
# Have we already used this annotation?
if used_annotations[nearest] == 0:
# Is this the first beat or first annotation?
# If so, look forward.
if m == 0 or nearest == 0:
# How far is the estimated beat from the reference beat,
# relative to the inter-annotation-interval?
if nearest + 1 < reference_beats.shape[0]:
reference_interval = (reference_beats[nearest + 1] -
reference_beats[nearest])
else:
# Special case when nearest + 1 is too large - use the
# previous interval instead
reference_interval = (reference_beats[nearest] -
reference_beats[nearest - 1])
# Handle this special case when beats are not unique
if reference_interval == 0:
if min_difference == 0:
phase = 1
else:
phase = np.inf
else:
phase = np.abs(min_difference/reference_interval)
# How close is the inter-beat-interval
# to the inter-annotation-interval?
if m + 1 < estimated_beats.shape[0]:
estimated_interval = (estimated_beats[m + 1] -
estimated_beats[m])
else:
# Special case when m + 1 is too large - use the
# previous interval
estimated_interval = (estimated_beats[m] -
estimated_beats[m - 1])
# Handle this special case when beats are not unique
if reference_interval == 0:
if estimated_interval == 0:
period = 0
else:
period = np.inf
else:
period = \
np.abs(1 - estimated_interval/reference_interval)
if phase < continuity_phase_threshold and \
period < continuity_period_threshold:
# Set this annotation as used
used_annotations[nearest] = 1
# This beat is matched
beat_success = 1
# This beat/annotation is not the first
else:
# How far is the estimated beat from the reference beat,
# relative to the inter-annotation-interval?
reference_interval = (reference_beats[nearest] -
reference_beats[nearest - 1])
phase = np.abs(min_difference/reference_interval)
# How close is the inter-beat-interval
# to the inter-annotation-interval?
estimated_interval = (estimated_beats[m] -
estimated_beats[m - 1])
reference_interval = (reference_beats[nearest] -
reference_beats[nearest - 1])
period = np.abs(1 - estimated_interval/reference_interval)
if phase < continuity_phase_threshold and \
period < continuity_period_threshold:
# Set this annotation as used
used_annotations[nearest] = 1
# This beat is matched
beat_success = 1
# Set whether this beat is matched or not
beat_successes[m] = beat_success
# Add 0s at the begnning and end
# so that we at least find the beginning/end of the estimated beats
beat_successes = np.append(np.append(0, beat_successes), 0)
# Where is the beat not a match?
beat_failures = np.nonzero(beat_successes == 0)[0]
# Take out those zeros we added
beat_successes = beat_successes[1:-1]
# Get the continuous accuracy as the longest track of successful beats
longest_track = np.max(np.diff(beat_failures)) - 1
continuous_accuracy = longest_track/(1.0*beat_successes.shape[0])
continuous_accuracies.append(continuous_accuracy)
# Get the total accuracy - all sequences
total_accuracy = np.sum(beat_successes)/(1.0*beat_successes.shape[0])
total_accuracies.append(total_accuracy)
# Grab accuracy scores
return (continuous_accuracies[0],
total_accuracies[0],
np.max(continuous_accuracies),
np.max(total_accuracies)) | [
"Get metrics based on how much of the estimated beat sequence is\n continually correct.\n\n Examples\n --------\n >>> reference_beats = mir_eval.io.load_events('reference.txt')\n >>> reference_beats = mir_eval.beat.trim_beats(reference_beats)\n >>> estimated_beats = mir_eval.io.load_events('estimated.txt')\n >>> estimated_beats = mir_eval.beat.trim_beats(estimated_beats)\n >>> CMLc, CMLt, AMLc, AMLt = mir_eval.beat.continuity(reference_beats,\n estimated_beats)\n\n Parameters\n ----------\n reference_beats : np.ndarray\n reference beat times, in seconds\n estimated_beats : np.ndarray\n query beat times, in seconds\n continuity_phase_threshold : float\n Allowable ratio of how far is the estimated beat\n can be from the reference beat\n (Default value = 0.175)\n continuity_period_threshold : float\n Allowable distance between the inter-beat-interval\n and the inter-annotation-interval\n (Default value = 0.175)\n\n Returns\n -------\n CMLc : float\n Correct metric level, continuous accuracy\n CMLt : float\n Correct metric level, total accuracy (continuity not required)\n AMLc : float\n Any metric level, continuous accuracy\n AMLt : float\n Any metric level, total accuracy (continuity not required)\n "
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.