repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
list
docstring
stringlengths
3
17.3k
docstring_tokens
list
sha
stringlengths
40
40
url
stringlengths
87
242
partition
stringclasses
1 value
LucidtechAI/las-sdk-python
las/client.py
Client.put_document
def put_document(document_path: str, content_type: str, presigned_url: str) -> str: """Convenience method for putting a document to presigned url. >>> from las import Client >>> client = Client(endpoint='<api endpoint>') >>> client.put_document(document_path='document.jpeg', content_type='image/jpeg', >>> presigned_url='<presigned url>') :param document_path: Path to document to upload :type document_path: str :param content_type: Mime type of document to upload. Same as provided to :py:func:`~las.Client.post_documents` :type content_type: str :param presigned_url: Presigned upload url from :py:func:`~las.Client.post_documents` :type presigned_url: str :return: Response from put operation :rtype: str :raises requests.exception.RequestException: If error was raised by requests """ body = pathlib.Path(document_path).read_bytes() headers = {'Content-Type': content_type} put_document_response = requests.put(presigned_url, data=body, headers=headers) put_document_response.raise_for_status() return put_document_response.content.decode()
python
def put_document(document_path: str, content_type: str, presigned_url: str) -> str: """Convenience method for putting a document to presigned url. >>> from las import Client >>> client = Client(endpoint='<api endpoint>') >>> client.put_document(document_path='document.jpeg', content_type='image/jpeg', >>> presigned_url='<presigned url>') :param document_path: Path to document to upload :type document_path: str :param content_type: Mime type of document to upload. Same as provided to :py:func:`~las.Client.post_documents` :type content_type: str :param presigned_url: Presigned upload url from :py:func:`~las.Client.post_documents` :type presigned_url: str :return: Response from put operation :rtype: str :raises requests.exception.RequestException: If error was raised by requests """ body = pathlib.Path(document_path).read_bytes() headers = {'Content-Type': content_type} put_document_response = requests.put(presigned_url, data=body, headers=headers) put_document_response.raise_for_status() return put_document_response.content.decode()
[ "def", "put_document", "(", "document_path", ":", "str", ",", "content_type", ":", "str", ",", "presigned_url", ":", "str", ")", "->", "str", ":", "body", "=", "pathlib", ".", "Path", "(", "document_path", ")", ".", "read_bytes", "(", ")", "headers", "=", "{", "'Content-Type'", ":", "content_type", "}", "put_document_response", "=", "requests", ".", "put", "(", "presigned_url", ",", "data", "=", "body", ",", "headers", "=", "headers", ")", "put_document_response", ".", "raise_for_status", "(", ")", "return", "put_document_response", ".", "content", ".", "decode", "(", ")" ]
Convenience method for putting a document to presigned url. >>> from las import Client >>> client = Client(endpoint='<api endpoint>') >>> client.put_document(document_path='document.jpeg', content_type='image/jpeg', >>> presigned_url='<presigned url>') :param document_path: Path to document to upload :type document_path: str :param content_type: Mime type of document to upload. Same as provided to :py:func:`~las.Client.post_documents` :type content_type: str :param presigned_url: Presigned upload url from :py:func:`~las.Client.post_documents` :type presigned_url: str :return: Response from put operation :rtype: str :raises requests.exception.RequestException: If error was raised by requests
[ "Convenience", "method", "for", "putting", "a", "document", "to", "presigned", "url", "." ]
5f39dee7983baff28a1deb93c12d36414d835d12
https://github.com/LucidtechAI/las-sdk-python/blob/5f39dee7983baff28a1deb93c12d36414d835d12/las/client.py#L117-L140
train
owncloud/pyocclient
owncloud/owncloud.py
ShareInfo.get_expiration
def get_expiration(self): """Returns the expiration date. :returns: expiration date :rtype: datetime object """ exp = self._get_int('expiration') if exp is not None: return datetime.datetime.fromtimestamp( exp ) return None
python
def get_expiration(self): """Returns the expiration date. :returns: expiration date :rtype: datetime object """ exp = self._get_int('expiration') if exp is not None: return datetime.datetime.fromtimestamp( exp ) return None
[ "def", "get_expiration", "(", "self", ")", ":", "exp", "=", "self", ".", "_get_int", "(", "'expiration'", ")", "if", "exp", "is", "not", "None", ":", "return", "datetime", ".", "datetime", ".", "fromtimestamp", "(", "exp", ")", "return", "None" ]
Returns the expiration date. :returns: expiration date :rtype: datetime object
[ "Returns", "the", "expiration", "date", "." ]
b9e1f04cdbde74588e86f1bebb6144571d82966c
https://github.com/owncloud/pyocclient/blob/b9e1f04cdbde74588e86f1bebb6144571d82966c/owncloud/owncloud.py#L140-L151
train
owncloud/pyocclient
owncloud/owncloud.py
Client.login
def login(self, user_id, password): """Authenticate to ownCloud. This will create a session on the server. :param user_id: user id :param password: password :raises: HTTPResponseError in case an HTTP error status was returned """ self._session = requests.session() self._session.verify = self._verify_certs self._session.auth = (user_id, password) try: self._update_capabilities() url_components = parse.urlparse(self.url) if self._dav_endpoint_version == 1: self._davpath = url_components.path + 'remote.php/dav/files/' + parse.quote(user_id) self._webdav_url = self.url + 'remote.php/dav/files/' + parse.quote(user_id) else: self._davpath = url_components.path + 'remote.php/webdav' self._webdav_url = self.url + 'remote.php/webdav' except HTTPResponseError as e: self._session.close() self._session = None raise e
python
def login(self, user_id, password): """Authenticate to ownCloud. This will create a session on the server. :param user_id: user id :param password: password :raises: HTTPResponseError in case an HTTP error status was returned """ self._session = requests.session() self._session.verify = self._verify_certs self._session.auth = (user_id, password) try: self._update_capabilities() url_components = parse.urlparse(self.url) if self._dav_endpoint_version == 1: self._davpath = url_components.path + 'remote.php/dav/files/' + parse.quote(user_id) self._webdav_url = self.url + 'remote.php/dav/files/' + parse.quote(user_id) else: self._davpath = url_components.path + 'remote.php/webdav' self._webdav_url = self.url + 'remote.php/webdav' except HTTPResponseError as e: self._session.close() self._session = None raise e
[ "def", "login", "(", "self", ",", "user_id", ",", "password", ")", ":", "self", ".", "_session", "=", "requests", ".", "session", "(", ")", "self", ".", "_session", ".", "verify", "=", "self", ".", "_verify_certs", "self", ".", "_session", ".", "auth", "=", "(", "user_id", ",", "password", ")", "try", ":", "self", ".", "_update_capabilities", "(", ")", "url_components", "=", "parse", ".", "urlparse", "(", "self", ".", "url", ")", "if", "self", ".", "_dav_endpoint_version", "==", "1", ":", "self", ".", "_davpath", "=", "url_components", ".", "path", "+", "'remote.php/dav/files/'", "+", "parse", ".", "quote", "(", "user_id", ")", "self", ".", "_webdav_url", "=", "self", ".", "url", "+", "'remote.php/dav/files/'", "+", "parse", ".", "quote", "(", "user_id", ")", "else", ":", "self", ".", "_davpath", "=", "url_components", ".", "path", "+", "'remote.php/webdav'", "self", ".", "_webdav_url", "=", "self", ".", "url", "+", "'remote.php/webdav'", "except", "HTTPResponseError", "as", "e", ":", "self", ".", "_session", ".", "close", "(", ")", "self", ".", "_session", "=", "None", "raise", "e" ]
Authenticate to ownCloud. This will create a session on the server. :param user_id: user id :param password: password :raises: HTTPResponseError in case an HTTP error status was returned
[ "Authenticate", "to", "ownCloud", ".", "This", "will", "create", "a", "session", "on", "the", "server", "." ]
b9e1f04cdbde74588e86f1bebb6144571d82966c
https://github.com/owncloud/pyocclient/blob/b9e1f04cdbde74588e86f1bebb6144571d82966c/owncloud/owncloud.py#L339-L366
train
owncloud/pyocclient
owncloud/owncloud.py
Client.file_info
def file_info(self, path): """Returns the file info for the given remote file :param path: path to the remote file :returns: file info :rtype: :class:`FileInfo` object or `None` if file was not found :raises: HTTPResponseError in case an HTTP error status was returned """ res = self._make_dav_request('PROPFIND', path, headers={'Depth': '0'}) if res: return res[0] return None
python
def file_info(self, path): """Returns the file info for the given remote file :param path: path to the remote file :returns: file info :rtype: :class:`FileInfo` object or `None` if file was not found :raises: HTTPResponseError in case an HTTP error status was returned """ res = self._make_dav_request('PROPFIND', path, headers={'Depth': '0'}) if res: return res[0] return None
[ "def", "file_info", "(", "self", ",", "path", ")", ":", "res", "=", "self", ".", "_make_dav_request", "(", "'PROPFIND'", ",", "path", ",", "headers", "=", "{", "'Depth'", ":", "'0'", "}", ")", "if", "res", ":", "return", "res", "[", "0", "]", "return", "None" ]
Returns the file info for the given remote file :param path: path to the remote file :returns: file info :rtype: :class:`FileInfo` object or `None` if file was not found :raises: HTTPResponseError in case an HTTP error status was returned
[ "Returns", "the", "file", "info", "for", "the", "given", "remote", "file" ]
b9e1f04cdbde74588e86f1bebb6144571d82966c
https://github.com/owncloud/pyocclient/blob/b9e1f04cdbde74588e86f1bebb6144571d82966c/owncloud/owncloud.py#L378-L390
train
owncloud/pyocclient
owncloud/owncloud.py
Client.get_file_contents
def get_file_contents(self, path): """Returns the contents of a remote file :param path: path to the remote file :returns: file contents :rtype: binary data :raises: HTTPResponseError in case an HTTP error status was returned """ path = self._normalize_path(path) res = self._session.get( self._webdav_url + parse.quote(self._encode_string(path)) ) if res.status_code == 200: return res.content elif res.status_code >= 400: raise HTTPResponseError(res) return False
python
def get_file_contents(self, path): """Returns the contents of a remote file :param path: path to the remote file :returns: file contents :rtype: binary data :raises: HTTPResponseError in case an HTTP error status was returned """ path = self._normalize_path(path) res = self._session.get( self._webdav_url + parse.quote(self._encode_string(path)) ) if res.status_code == 200: return res.content elif res.status_code >= 400: raise HTTPResponseError(res) return False
[ "def", "get_file_contents", "(", "self", ",", "path", ")", ":", "path", "=", "self", ".", "_normalize_path", "(", "path", ")", "res", "=", "self", ".", "_session", ".", "get", "(", "self", ".", "_webdav_url", "+", "parse", ".", "quote", "(", "self", ".", "_encode_string", "(", "path", ")", ")", ")", "if", "res", ".", "status_code", "==", "200", ":", "return", "res", ".", "content", "elif", "res", ".", "status_code", ">=", "400", ":", "raise", "HTTPResponseError", "(", "res", ")", "return", "False" ]
Returns the contents of a remote file :param path: path to the remote file :returns: file contents :rtype: binary data :raises: HTTPResponseError in case an HTTP error status was returned
[ "Returns", "the", "contents", "of", "a", "remote", "file" ]
b9e1f04cdbde74588e86f1bebb6144571d82966c
https://github.com/owncloud/pyocclient/blob/b9e1f04cdbde74588e86f1bebb6144571d82966c/owncloud/owncloud.py#L414-L430
train
owncloud/pyocclient
owncloud/owncloud.py
Client.get_directory_as_zip
def get_directory_as_zip(self, remote_path, local_file): """Downloads a remote directory as zip :param remote_path: path to the remote directory to download :param local_file: path and name of the target local file :returns: True if the operation succeeded, False otherwise :raises: HTTPResponseError in case an HTTP error status was returned """ remote_path = self._normalize_path(remote_path) url = self.url + 'index.php/apps/files/ajax/download.php?dir=' \ + parse.quote(remote_path) res = self._session.get(url, stream=True) if res.status_code == 200: if local_file is None: # use downloaded file name from Content-Disposition # targetFile = res.headers['content-disposition'] local_file = os.path.basename(remote_path) file_handle = open(local_file, 'wb', 8192) for chunk in res.iter_content(8192): file_handle.write(chunk) file_handle.close() return True elif res.status_code >= 400: raise HTTPResponseError(res) return False
python
def get_directory_as_zip(self, remote_path, local_file): """Downloads a remote directory as zip :param remote_path: path to the remote directory to download :param local_file: path and name of the target local file :returns: True if the operation succeeded, False otherwise :raises: HTTPResponseError in case an HTTP error status was returned """ remote_path = self._normalize_path(remote_path) url = self.url + 'index.php/apps/files/ajax/download.php?dir=' \ + parse.quote(remote_path) res = self._session.get(url, stream=True) if res.status_code == 200: if local_file is None: # use downloaded file name from Content-Disposition # targetFile = res.headers['content-disposition'] local_file = os.path.basename(remote_path) file_handle = open(local_file, 'wb', 8192) for chunk in res.iter_content(8192): file_handle.write(chunk) file_handle.close() return True elif res.status_code >= 400: raise HTTPResponseError(res) return False
[ "def", "get_directory_as_zip", "(", "self", ",", "remote_path", ",", "local_file", ")", ":", "remote_path", "=", "self", ".", "_normalize_path", "(", "remote_path", ")", "url", "=", "self", ".", "url", "+", "'index.php/apps/files/ajax/download.php?dir='", "+", "parse", ".", "quote", "(", "remote_path", ")", "res", "=", "self", ".", "_session", ".", "get", "(", "url", ",", "stream", "=", "True", ")", "if", "res", ".", "status_code", "==", "200", ":", "if", "local_file", "is", "None", ":", "# use downloaded file name from Content-Disposition", "# targetFile = res.headers['content-disposition']", "local_file", "=", "os", ".", "path", ".", "basename", "(", "remote_path", ")", "file_handle", "=", "open", "(", "local_file", ",", "'wb'", ",", "8192", ")", "for", "chunk", "in", "res", ".", "iter_content", "(", "8192", ")", ":", "file_handle", ".", "write", "(", "chunk", ")", "file_handle", ".", "close", "(", ")", "return", "True", "elif", "res", ".", "status_code", ">=", "400", ":", "raise", "HTTPResponseError", "(", "res", ")", "return", "False" ]
Downloads a remote directory as zip :param remote_path: path to the remote directory to download :param local_file: path and name of the target local file :returns: True if the operation succeeded, False otherwise :raises: HTTPResponseError in case an HTTP error status was returned
[ "Downloads", "a", "remote", "directory", "as", "zip" ]
b9e1f04cdbde74588e86f1bebb6144571d82966c
https://github.com/owncloud/pyocclient/blob/b9e1f04cdbde74588e86f1bebb6144571d82966c/owncloud/owncloud.py#L461-L486
train
owncloud/pyocclient
owncloud/owncloud.py
Client.put_directory
def put_directory(self, target_path, local_directory, **kwargs): """Upload a directory with all its contents :param target_path: path of the directory to upload into :param local_directory: path to the local directory to upload :param \*\*kwargs: optional arguments that ``put_file`` accepts :returns: True if the operation succeeded, False otherwise :raises: HTTPResponseError in case an HTTP error status was returned """ target_path = self._normalize_path(target_path) if not target_path.endswith('/'): target_path += '/' gathered_files = [] if not local_directory.endswith('/'): local_directory += '/' basedir = os.path.basename(local_directory[0: -1]) + '/' # gather files to upload for path, _, files in os.walk(local_directory): gathered_files.append( (path, basedir + path[len(local_directory):], files) ) for path, remote_path, files in gathered_files: self.mkdir(target_path + remote_path + '/') for name in files: if not self.put_file(target_path + remote_path + '/', path + '/' + name, **kwargs): return False return True
python
def put_directory(self, target_path, local_directory, **kwargs): """Upload a directory with all its contents :param target_path: path of the directory to upload into :param local_directory: path to the local directory to upload :param \*\*kwargs: optional arguments that ``put_file`` accepts :returns: True if the operation succeeded, False otherwise :raises: HTTPResponseError in case an HTTP error status was returned """ target_path = self._normalize_path(target_path) if not target_path.endswith('/'): target_path += '/' gathered_files = [] if not local_directory.endswith('/'): local_directory += '/' basedir = os.path.basename(local_directory[0: -1]) + '/' # gather files to upload for path, _, files in os.walk(local_directory): gathered_files.append( (path, basedir + path[len(local_directory):], files) ) for path, remote_path, files in gathered_files: self.mkdir(target_path + remote_path + '/') for name in files: if not self.put_file(target_path + remote_path + '/', path + '/' + name, **kwargs): return False return True
[ "def", "put_directory", "(", "self", ",", "target_path", ",", "local_directory", ",", "*", "*", "kwargs", ")", ":", "target_path", "=", "self", ".", "_normalize_path", "(", "target_path", ")", "if", "not", "target_path", ".", "endswith", "(", "'/'", ")", ":", "target_path", "+=", "'/'", "gathered_files", "=", "[", "]", "if", "not", "local_directory", ".", "endswith", "(", "'/'", ")", ":", "local_directory", "+=", "'/'", "basedir", "=", "os", ".", "path", ".", "basename", "(", "local_directory", "[", "0", ":", "-", "1", "]", ")", "+", "'/'", "# gather files to upload", "for", "path", ",", "_", ",", "files", "in", "os", ".", "walk", "(", "local_directory", ")", ":", "gathered_files", ".", "append", "(", "(", "path", ",", "basedir", "+", "path", "[", "len", "(", "local_directory", ")", ":", "]", ",", "files", ")", ")", "for", "path", ",", "remote_path", ",", "files", "in", "gathered_files", ":", "self", ".", "mkdir", "(", "target_path", "+", "remote_path", "+", "'/'", ")", "for", "name", "in", "files", ":", "if", "not", "self", ".", "put_file", "(", "target_path", "+", "remote_path", "+", "'/'", ",", "path", "+", "'/'", "+", "name", ",", "*", "*", "kwargs", ")", ":", "return", "False", "return", "True" ]
Upload a directory with all its contents :param target_path: path of the directory to upload into :param local_directory: path to the local directory to upload :param \*\*kwargs: optional arguments that ``put_file`` accepts :returns: True if the operation succeeded, False otherwise :raises: HTTPResponseError in case an HTTP error status was returned
[ "Upload", "a", "directory", "with", "all", "its", "contents" ]
b9e1f04cdbde74588e86f1bebb6144571d82966c
https://github.com/owncloud/pyocclient/blob/b9e1f04cdbde74588e86f1bebb6144571d82966c/owncloud/owncloud.py#L536-L566
train
owncloud/pyocclient
owncloud/owncloud.py
Client._put_file_chunked
def _put_file_chunked(self, remote_path, local_source_file, **kwargs): """Uploads a file using chunks. If the file is smaller than ``chunk_size`` it will be uploaded directly. :param remote_path: path to the target file. A target directory can also be specified instead by appending a "/" :param local_source_file: path to the local file to upload :param \*\*kwargs: optional arguments that ``put_file`` accepts :returns: True if the operation succeeded, False otherwise :raises: HTTPResponseError in case an HTTP error status was returned """ chunk_size = kwargs.get('chunk_size', 10 * 1024 * 1024) result = True transfer_id = int(time.time()) remote_path = self._normalize_path(remote_path) if remote_path.endswith('/'): remote_path += os.path.basename(local_source_file) stat_result = os.stat(local_source_file) file_handle = open(local_source_file, 'rb', 8192) file_handle.seek(0, os.SEEK_END) size = file_handle.tell() file_handle.seek(0) headers = {} if kwargs.get('keep_mtime', True): headers['X-OC-MTIME'] = str(int(stat_result.st_mtime)) if size == 0: return self._make_dav_request( 'PUT', remote_path, data='', headers=headers ) chunk_count = int(math.ceil(float(size) / float(chunk_size))) if chunk_count > 1: headers['OC-CHUNKED'] = '1' for chunk_index in range(0, int(chunk_count)): data = file_handle.read(chunk_size) if chunk_count > 1: chunk_name = '%s-chunking-%s-%i-%i' % \ (remote_path, transfer_id, chunk_count, chunk_index) else: chunk_name = remote_path if not self._make_dav_request( 'PUT', chunk_name, data=data, headers=headers ): result = False break file_handle.close() return result
python
def _put_file_chunked(self, remote_path, local_source_file, **kwargs): """Uploads a file using chunks. If the file is smaller than ``chunk_size`` it will be uploaded directly. :param remote_path: path to the target file. A target directory can also be specified instead by appending a "/" :param local_source_file: path to the local file to upload :param \*\*kwargs: optional arguments that ``put_file`` accepts :returns: True if the operation succeeded, False otherwise :raises: HTTPResponseError in case an HTTP error status was returned """ chunk_size = kwargs.get('chunk_size', 10 * 1024 * 1024) result = True transfer_id = int(time.time()) remote_path = self._normalize_path(remote_path) if remote_path.endswith('/'): remote_path += os.path.basename(local_source_file) stat_result = os.stat(local_source_file) file_handle = open(local_source_file, 'rb', 8192) file_handle.seek(0, os.SEEK_END) size = file_handle.tell() file_handle.seek(0) headers = {} if kwargs.get('keep_mtime', True): headers['X-OC-MTIME'] = str(int(stat_result.st_mtime)) if size == 0: return self._make_dav_request( 'PUT', remote_path, data='', headers=headers ) chunk_count = int(math.ceil(float(size) / float(chunk_size))) if chunk_count > 1: headers['OC-CHUNKED'] = '1' for chunk_index in range(0, int(chunk_count)): data = file_handle.read(chunk_size) if chunk_count > 1: chunk_name = '%s-chunking-%s-%i-%i' % \ (remote_path, transfer_id, chunk_count, chunk_index) else: chunk_name = remote_path if not self._make_dav_request( 'PUT', chunk_name, data=data, headers=headers ): result = False break file_handle.close() return result
[ "def", "_put_file_chunked", "(", "self", ",", "remote_path", ",", "local_source_file", ",", "*", "*", "kwargs", ")", ":", "chunk_size", "=", "kwargs", ".", "get", "(", "'chunk_size'", ",", "10", "*", "1024", "*", "1024", ")", "result", "=", "True", "transfer_id", "=", "int", "(", "time", ".", "time", "(", ")", ")", "remote_path", "=", "self", ".", "_normalize_path", "(", "remote_path", ")", "if", "remote_path", ".", "endswith", "(", "'/'", ")", ":", "remote_path", "+=", "os", ".", "path", ".", "basename", "(", "local_source_file", ")", "stat_result", "=", "os", ".", "stat", "(", "local_source_file", ")", "file_handle", "=", "open", "(", "local_source_file", ",", "'rb'", ",", "8192", ")", "file_handle", ".", "seek", "(", "0", ",", "os", ".", "SEEK_END", ")", "size", "=", "file_handle", ".", "tell", "(", ")", "file_handle", ".", "seek", "(", "0", ")", "headers", "=", "{", "}", "if", "kwargs", ".", "get", "(", "'keep_mtime'", ",", "True", ")", ":", "headers", "[", "'X-OC-MTIME'", "]", "=", "str", "(", "int", "(", "stat_result", ".", "st_mtime", ")", ")", "if", "size", "==", "0", ":", "return", "self", ".", "_make_dav_request", "(", "'PUT'", ",", "remote_path", ",", "data", "=", "''", ",", "headers", "=", "headers", ")", "chunk_count", "=", "int", "(", "math", ".", "ceil", "(", "float", "(", "size", ")", "/", "float", "(", "chunk_size", ")", ")", ")", "if", "chunk_count", ">", "1", ":", "headers", "[", "'OC-CHUNKED'", "]", "=", "'1'", "for", "chunk_index", "in", "range", "(", "0", ",", "int", "(", "chunk_count", ")", ")", ":", "data", "=", "file_handle", ".", "read", "(", "chunk_size", ")", "if", "chunk_count", ">", "1", ":", "chunk_name", "=", "'%s-chunking-%s-%i-%i'", "%", "(", "remote_path", ",", "transfer_id", ",", "chunk_count", ",", "chunk_index", ")", "else", ":", "chunk_name", "=", "remote_path", "if", "not", "self", ".", "_make_dav_request", "(", "'PUT'", ",", "chunk_name", ",", "data", "=", "data", ",", "headers", "=", "headers", ")", ":", "result", "=", "False", "break", "file_handle", ".", "close", "(", ")", "return", "result" ]
Uploads a file using chunks. If the file is smaller than ``chunk_size`` it will be uploaded directly. :param remote_path: path to the target file. A target directory can also be specified instead by appending a "/" :param local_source_file: path to the local file to upload :param \*\*kwargs: optional arguments that ``put_file`` accepts :returns: True if the operation succeeded, False otherwise :raises: HTTPResponseError in case an HTTP error status was returned
[ "Uploads", "a", "file", "using", "chunks", ".", "If", "the", "file", "is", "smaller", "than", "chunk_size", "it", "will", "be", "uploaded", "directly", "." ]
b9e1f04cdbde74588e86f1bebb6144571d82966c
https://github.com/owncloud/pyocclient/blob/b9e1f04cdbde74588e86f1bebb6144571d82966c/owncloud/owncloud.py#L568-L630
train
owncloud/pyocclient
owncloud/owncloud.py
Client.list_open_remote_share
def list_open_remote_share(self): """List all pending remote shares :returns: array of pending remote shares :raises: HTTPResponseError in case an HTTP error status was returned """ res = self._make_ocs_request( 'GET', self.OCS_SERVICE_SHARE, 'remote_shares/pending' ) if res.status_code == 200: tree = ET.fromstring(res.content) self._check_ocs_status(tree) shares = [] for element in tree.find('data').iter('element'): share_attr = {} for child in element: key = child.tag value = child.text share_attr[key] = value shares.append(share_attr) return shares raise HTTPResponseError(res)
python
def list_open_remote_share(self): """List all pending remote shares :returns: array of pending remote shares :raises: HTTPResponseError in case an HTTP error status was returned """ res = self._make_ocs_request( 'GET', self.OCS_SERVICE_SHARE, 'remote_shares/pending' ) if res.status_code == 200: tree = ET.fromstring(res.content) self._check_ocs_status(tree) shares = [] for element in tree.find('data').iter('element'): share_attr = {} for child in element: key = child.tag value = child.text share_attr[key] = value shares.append(share_attr) return shares raise HTTPResponseError(res)
[ "def", "list_open_remote_share", "(", "self", ")", ":", "res", "=", "self", ".", "_make_ocs_request", "(", "'GET'", ",", "self", ".", "OCS_SERVICE_SHARE", ",", "'remote_shares/pending'", ")", "if", "res", ".", "status_code", "==", "200", ":", "tree", "=", "ET", ".", "fromstring", "(", "res", ".", "content", ")", "self", ".", "_check_ocs_status", "(", "tree", ")", "shares", "=", "[", "]", "for", "element", "in", "tree", ".", "find", "(", "'data'", ")", ".", "iter", "(", "'element'", ")", ":", "share_attr", "=", "{", "}", "for", "child", "in", "element", ":", "key", "=", "child", ".", "tag", "value", "=", "child", ".", "text", "share_attr", "[", "key", "]", "=", "value", "shares", ".", "append", "(", "share_attr", ")", "return", "shares", "raise", "HTTPResponseError", "(", "res", ")" ]
List all pending remote shares :returns: array of pending remote shares :raises: HTTPResponseError in case an HTTP error status was returned
[ "List", "all", "pending", "remote", "shares" ]
b9e1f04cdbde74588e86f1bebb6144571d82966c
https://github.com/owncloud/pyocclient/blob/b9e1f04cdbde74588e86f1bebb6144571d82966c/owncloud/owncloud.py#L652-L676
train
owncloud/pyocclient
owncloud/owncloud.py
Client.accept_remote_share
def accept_remote_share(self, share_id): """Accepts a remote share :param share_id: Share ID (int) :returns: True if the operation succeeded, False otherwise :raises: HTTPResponseError in case an HTTP error status was returned """ if not isinstance(share_id, int): return False res = self._make_ocs_request( 'POST', self.OCS_SERVICE_SHARE, 'remote_shares/pending/' + str(share_id) ) if res.status_code == 200: return res raise HTTPResponseError(res)
python
def accept_remote_share(self, share_id): """Accepts a remote share :param share_id: Share ID (int) :returns: True if the operation succeeded, False otherwise :raises: HTTPResponseError in case an HTTP error status was returned """ if not isinstance(share_id, int): return False res = self._make_ocs_request( 'POST', self.OCS_SERVICE_SHARE, 'remote_shares/pending/' + str(share_id) ) if res.status_code == 200: return res raise HTTPResponseError(res)
[ "def", "accept_remote_share", "(", "self", ",", "share_id", ")", ":", "if", "not", "isinstance", "(", "share_id", ",", "int", ")", ":", "return", "False", "res", "=", "self", ".", "_make_ocs_request", "(", "'POST'", ",", "self", ".", "OCS_SERVICE_SHARE", ",", "'remote_shares/pending/'", "+", "str", "(", "share_id", ")", ")", "if", "res", ".", "status_code", "==", "200", ":", "return", "res", "raise", "HTTPResponseError", "(", "res", ")" ]
Accepts a remote share :param share_id: Share ID (int) :returns: True if the operation succeeded, False otherwise :raises: HTTPResponseError in case an HTTP error status was returned
[ "Accepts", "a", "remote", "share" ]
b9e1f04cdbde74588e86f1bebb6144571d82966c
https://github.com/owncloud/pyocclient/blob/b9e1f04cdbde74588e86f1bebb6144571d82966c/owncloud/owncloud.py#L678-L695
train
owncloud/pyocclient
owncloud/owncloud.py
Client.update_share
def update_share(self, share_id, **kwargs): """Updates a given share :param share_id: (int) Share ID :param perms: (int) update permissions (see share_file_with_user() below) :param password: (string) updated password for public link Share :param public_upload: (boolean) enable/disable public upload for public shares :returns: True if the operation succeeded, False otherwise :raises: HTTPResponseError in case an HTTP error status was returned """ perms = kwargs.get('perms', None) password = kwargs.get('password', None) public_upload = kwargs.get('public_upload', None) if (isinstance(perms, int)) and (perms > self.OCS_PERMISSION_ALL): perms = None if not (perms or password or (public_upload is not None)): return False if not isinstance(share_id, int): return False data = {} if perms: data['permissions'] = perms if isinstance(password, six.string_types): data['password'] = password if (public_upload is not None) and (isinstance(public_upload, bool)): data['publicUpload'] = str(public_upload).lower() res = self._make_ocs_request( 'PUT', self.OCS_SERVICE_SHARE, 'shares/' + str(share_id), data=data ) if res.status_code == 200: return True raise HTTPResponseError(res)
python
def update_share(self, share_id, **kwargs): """Updates a given share :param share_id: (int) Share ID :param perms: (int) update permissions (see share_file_with_user() below) :param password: (string) updated password for public link Share :param public_upload: (boolean) enable/disable public upload for public shares :returns: True if the operation succeeded, False otherwise :raises: HTTPResponseError in case an HTTP error status was returned """ perms = kwargs.get('perms', None) password = kwargs.get('password', None) public_upload = kwargs.get('public_upload', None) if (isinstance(perms, int)) and (perms > self.OCS_PERMISSION_ALL): perms = None if not (perms or password or (public_upload is not None)): return False if not isinstance(share_id, int): return False data = {} if perms: data['permissions'] = perms if isinstance(password, six.string_types): data['password'] = password if (public_upload is not None) and (isinstance(public_upload, bool)): data['publicUpload'] = str(public_upload).lower() res = self._make_ocs_request( 'PUT', self.OCS_SERVICE_SHARE, 'shares/' + str(share_id), data=data ) if res.status_code == 200: return True raise HTTPResponseError(res)
[ "def", "update_share", "(", "self", ",", "share_id", ",", "*", "*", "kwargs", ")", ":", "perms", "=", "kwargs", ".", "get", "(", "'perms'", ",", "None", ")", "password", "=", "kwargs", ".", "get", "(", "'password'", ",", "None", ")", "public_upload", "=", "kwargs", ".", "get", "(", "'public_upload'", ",", "None", ")", "if", "(", "isinstance", "(", "perms", ",", "int", ")", ")", "and", "(", "perms", ">", "self", ".", "OCS_PERMISSION_ALL", ")", ":", "perms", "=", "None", "if", "not", "(", "perms", "or", "password", "or", "(", "public_upload", "is", "not", "None", ")", ")", ":", "return", "False", "if", "not", "isinstance", "(", "share_id", ",", "int", ")", ":", "return", "False", "data", "=", "{", "}", "if", "perms", ":", "data", "[", "'permissions'", "]", "=", "perms", "if", "isinstance", "(", "password", ",", "six", ".", "string_types", ")", ":", "data", "[", "'password'", "]", "=", "password", "if", "(", "public_upload", "is", "not", "None", ")", "and", "(", "isinstance", "(", "public_upload", ",", "bool", ")", ")", ":", "data", "[", "'publicUpload'", "]", "=", "str", "(", "public_upload", ")", ".", "lower", "(", ")", "res", "=", "self", ".", "_make_ocs_request", "(", "'PUT'", ",", "self", ".", "OCS_SERVICE_SHARE", ",", "'shares/'", "+", "str", "(", "share_id", ")", ",", "data", "=", "data", ")", "if", "res", ".", "status_code", "==", "200", ":", "return", "True", "raise", "HTTPResponseError", "(", "res", ")" ]
Updates a given share :param share_id: (int) Share ID :param perms: (int) update permissions (see share_file_with_user() below) :param password: (string) updated password for public link Share :param public_upload: (boolean) enable/disable public upload for public shares :returns: True if the operation succeeded, False otherwise :raises: HTTPResponseError in case an HTTP error status was returned
[ "Updates", "a", "given", "share" ]
b9e1f04cdbde74588e86f1bebb6144571d82966c
https://github.com/owncloud/pyocclient/blob/b9e1f04cdbde74588e86f1bebb6144571d82966c/owncloud/owncloud.py#L735-L772
train
owncloud/pyocclient
owncloud/owncloud.py
Client.share_file_with_link
def share_file_with_link(self, path, **kwargs): """Shares a remote file with link :param path: path to the remote file to share :param perms (optional): permission of the shared object defaults to read only (1) :param public_upload (optional): allows users to upload files or folders :param password (optional): sets a password http://doc.owncloud.org/server/6.0/admin_manual/sharing_api/index.html :returns: instance of :class:`ShareInfo` with the share info or False if the operation failed :raises: HTTPResponseError in case an HTTP error status was returned """ perms = kwargs.get('perms', None) public_upload = kwargs.get('public_upload', 'false') password = kwargs.get('password', None) path = self._normalize_path(path) post_data = { 'shareType': self.OCS_SHARE_TYPE_LINK, 'path': self._encode_string(path), } if (public_upload is not None) and (isinstance(public_upload, bool)): post_data['publicUpload'] = str(public_upload).lower() if isinstance(password, six.string_types): post_data['password'] = password if perms: post_data['permissions'] = perms res = self._make_ocs_request( 'POST', self.OCS_SERVICE_SHARE, 'shares', data=post_data ) if res.status_code == 200: tree = ET.fromstring(res.content) self._check_ocs_status(tree) data_el = tree.find('data') return ShareInfo( { 'id': data_el.find('id').text, 'path': path, 'url': data_el.find('url').text, 'token': data_el.find('token').text } ) raise HTTPResponseError(res)
python
def share_file_with_link(self, path, **kwargs): """Shares a remote file with link :param path: path to the remote file to share :param perms (optional): permission of the shared object defaults to read only (1) :param public_upload (optional): allows users to upload files or folders :param password (optional): sets a password http://doc.owncloud.org/server/6.0/admin_manual/sharing_api/index.html :returns: instance of :class:`ShareInfo` with the share info or False if the operation failed :raises: HTTPResponseError in case an HTTP error status was returned """ perms = kwargs.get('perms', None) public_upload = kwargs.get('public_upload', 'false') password = kwargs.get('password', None) path = self._normalize_path(path) post_data = { 'shareType': self.OCS_SHARE_TYPE_LINK, 'path': self._encode_string(path), } if (public_upload is not None) and (isinstance(public_upload, bool)): post_data['publicUpload'] = str(public_upload).lower() if isinstance(password, six.string_types): post_data['password'] = password if perms: post_data['permissions'] = perms res = self._make_ocs_request( 'POST', self.OCS_SERVICE_SHARE, 'shares', data=post_data ) if res.status_code == 200: tree = ET.fromstring(res.content) self._check_ocs_status(tree) data_el = tree.find('data') return ShareInfo( { 'id': data_el.find('id').text, 'path': path, 'url': data_el.find('url').text, 'token': data_el.find('token').text } ) raise HTTPResponseError(res)
[ "def", "share_file_with_link", "(", "self", ",", "path", ",", "*", "*", "kwargs", ")", ":", "perms", "=", "kwargs", ".", "get", "(", "'perms'", ",", "None", ")", "public_upload", "=", "kwargs", ".", "get", "(", "'public_upload'", ",", "'false'", ")", "password", "=", "kwargs", ".", "get", "(", "'password'", ",", "None", ")", "path", "=", "self", ".", "_normalize_path", "(", "path", ")", "post_data", "=", "{", "'shareType'", ":", "self", ".", "OCS_SHARE_TYPE_LINK", ",", "'path'", ":", "self", ".", "_encode_string", "(", "path", ")", ",", "}", "if", "(", "public_upload", "is", "not", "None", ")", "and", "(", "isinstance", "(", "public_upload", ",", "bool", ")", ")", ":", "post_data", "[", "'publicUpload'", "]", "=", "str", "(", "public_upload", ")", ".", "lower", "(", ")", "if", "isinstance", "(", "password", ",", "six", ".", "string_types", ")", ":", "post_data", "[", "'password'", "]", "=", "password", "if", "perms", ":", "post_data", "[", "'permissions'", "]", "=", "perms", "res", "=", "self", ".", "_make_ocs_request", "(", "'POST'", ",", "self", ".", "OCS_SERVICE_SHARE", ",", "'shares'", ",", "data", "=", "post_data", ")", "if", "res", ".", "status_code", "==", "200", ":", "tree", "=", "ET", ".", "fromstring", "(", "res", ".", "content", ")", "self", ".", "_check_ocs_status", "(", "tree", ")", "data_el", "=", "tree", ".", "find", "(", "'data'", ")", "return", "ShareInfo", "(", "{", "'id'", ":", "data_el", ".", "find", "(", "'id'", ")", ".", "text", ",", "'path'", ":", "path", ",", "'url'", ":", "data_el", ".", "find", "(", "'url'", ")", ".", "text", ",", "'token'", ":", "data_el", ".", "find", "(", "'token'", ")", ".", "text", "}", ")", "raise", "HTTPResponseError", "(", "res", ")" ]
Shares a remote file with link :param path: path to the remote file to share :param perms (optional): permission of the shared object defaults to read only (1) :param public_upload (optional): allows users to upload files or folders :param password (optional): sets a password http://doc.owncloud.org/server/6.0/admin_manual/sharing_api/index.html :returns: instance of :class:`ShareInfo` with the share info or False if the operation failed :raises: HTTPResponseError in case an HTTP error status was returned
[ "Shares", "a", "remote", "file", "with", "link" ]
b9e1f04cdbde74588e86f1bebb6144571d82966c
https://github.com/owncloud/pyocclient/blob/b9e1f04cdbde74588e86f1bebb6144571d82966c/owncloud/owncloud.py#L800-L847
train
owncloud/pyocclient
owncloud/owncloud.py
Client.is_shared
def is_shared(self, path): """Checks whether a path is already shared :param path: path to the share to be checked :returns: True if the path is already shared, else False :raises: HTTPResponseError in case an HTTP error status was returned """ # make sure that the path exist - if not, raise HTTPResponseError self.file_info(path) try: result = self.get_shares(path) if result: return len(result) > 0 except OCSResponseError as e: if e.status_code != 404: raise e return False return False
python
def is_shared(self, path): """Checks whether a path is already shared :param path: path to the share to be checked :returns: True if the path is already shared, else False :raises: HTTPResponseError in case an HTTP error status was returned """ # make sure that the path exist - if not, raise HTTPResponseError self.file_info(path) try: result = self.get_shares(path) if result: return len(result) > 0 except OCSResponseError as e: if e.status_code != 404: raise e return False return False
[ "def", "is_shared", "(", "self", ",", "path", ")", ":", "# make sure that the path exist - if not, raise HTTPResponseError", "self", ".", "file_info", "(", "path", ")", "try", ":", "result", "=", "self", ".", "get_shares", "(", "path", ")", "if", "result", ":", "return", "len", "(", "result", ")", ">", "0", "except", "OCSResponseError", "as", "e", ":", "if", "e", ".", "status_code", "!=", "404", ":", "raise", "e", "return", "False", "return", "False" ]
Checks whether a path is already shared :param path: path to the share to be checked :returns: True if the path is already shared, else False :raises: HTTPResponseError in case an HTTP error status was returned
[ "Checks", "whether", "a", "path", "is", "already", "shared" ]
b9e1f04cdbde74588e86f1bebb6144571d82966c
https://github.com/owncloud/pyocclient/blob/b9e1f04cdbde74588e86f1bebb6144571d82966c/owncloud/owncloud.py#L849-L866
train
owncloud/pyocclient
owncloud/owncloud.py
Client.get_share
def get_share(self, share_id): """Returns share information about known share :param share_id: id of the share to be checked :returns: instance of ShareInfo class :raises: ResponseError in case an HTTP error status was returned """ if (share_id is None) or not (isinstance(share_id, int)): return None res = self._make_ocs_request( 'GET', self.OCS_SERVICE_SHARE, 'shares/' + str(share_id) ) if res.status_code == 200: tree = ET.fromstring(res.content) self._check_ocs_status(tree) return self._get_shareinfo(tree.find('data').find('element')) raise HTTPResponseError(res)
python
def get_share(self, share_id): """Returns share information about known share :param share_id: id of the share to be checked :returns: instance of ShareInfo class :raises: ResponseError in case an HTTP error status was returned """ if (share_id is None) or not (isinstance(share_id, int)): return None res = self._make_ocs_request( 'GET', self.OCS_SERVICE_SHARE, 'shares/' + str(share_id) ) if res.status_code == 200: tree = ET.fromstring(res.content) self._check_ocs_status(tree) return self._get_shareinfo(tree.find('data').find('element')) raise HTTPResponseError(res)
[ "def", "get_share", "(", "self", ",", "share_id", ")", ":", "if", "(", "share_id", "is", "None", ")", "or", "not", "(", "isinstance", "(", "share_id", ",", "int", ")", ")", ":", "return", "None", "res", "=", "self", ".", "_make_ocs_request", "(", "'GET'", ",", "self", ".", "OCS_SERVICE_SHARE", ",", "'shares/'", "+", "str", "(", "share_id", ")", ")", "if", "res", ".", "status_code", "==", "200", ":", "tree", "=", "ET", ".", "fromstring", "(", "res", ".", "content", ")", "self", ".", "_check_ocs_status", "(", "tree", ")", "return", "self", ".", "_get_shareinfo", "(", "tree", ".", "find", "(", "'data'", ")", ".", "find", "(", "'element'", ")", ")", "raise", "HTTPResponseError", "(", "res", ")" ]
Returns share information about known share :param share_id: id of the share to be checked :returns: instance of ShareInfo class :raises: ResponseError in case an HTTP error status was returned
[ "Returns", "share", "information", "about", "known", "share" ]
b9e1f04cdbde74588e86f1bebb6144571d82966c
https://github.com/owncloud/pyocclient/blob/b9e1f04cdbde74588e86f1bebb6144571d82966c/owncloud/owncloud.py#L868-L887
train
owncloud/pyocclient
owncloud/owncloud.py
Client.get_shares
def get_shares(self, path='', **kwargs): """Returns array of shares :param path: path to the share to be checked :param reshares: (optional, boolean) returns not only the shares from the current user but all shares from the given file (default: False) :param subfiles: (optional, boolean) returns all shares within a folder, given that path defines a folder (default: False) :param shared_with_me: (optional, boolean) returns all shares which are shared with me (default: False) :returns: array of shares ShareInfo instances or empty array if the operation failed :raises: HTTPResponseError in case an HTTP error status was returned """ if not (isinstance(path, six.string_types)): return None data = 'shares' if path != '': data += '?' path = self._encode_string(self._normalize_path(path)) args = {'path': path} reshares = kwargs.get('reshares', False) if isinstance(reshares, bool) and reshares: args['reshares'] = reshares subfiles = kwargs.get('subfiles', False) if isinstance(subfiles, bool) and subfiles: args['subfiles'] = str(subfiles).lower() shared_with_me = kwargs.get('shared_with_me', False) if isinstance(shared_with_me, bool) and shared_with_me: args['shared_with_me'] = "true" del args['path'] data += parse.urlencode(args) res = self._make_ocs_request( 'GET', self.OCS_SERVICE_SHARE, data ) if res.status_code == 200: tree = ET.fromstring(res.content) self._check_ocs_status(tree) shares = [] for element in tree.find('data').iter('element'): '''share_attr = {} for child in element: key = child.tag value = child.text share_attr[key] = value shares.append(share_attr)''' shares.append(self._get_shareinfo(element)) return shares raise HTTPResponseError(res)
python
def get_shares(self, path='', **kwargs): """Returns array of shares :param path: path to the share to be checked :param reshares: (optional, boolean) returns not only the shares from the current user but all shares from the given file (default: False) :param subfiles: (optional, boolean) returns all shares within a folder, given that path defines a folder (default: False) :param shared_with_me: (optional, boolean) returns all shares which are shared with me (default: False) :returns: array of shares ShareInfo instances or empty array if the operation failed :raises: HTTPResponseError in case an HTTP error status was returned """ if not (isinstance(path, six.string_types)): return None data = 'shares' if path != '': data += '?' path = self._encode_string(self._normalize_path(path)) args = {'path': path} reshares = kwargs.get('reshares', False) if isinstance(reshares, bool) and reshares: args['reshares'] = reshares subfiles = kwargs.get('subfiles', False) if isinstance(subfiles, bool) and subfiles: args['subfiles'] = str(subfiles).lower() shared_with_me = kwargs.get('shared_with_me', False) if isinstance(shared_with_me, bool) and shared_with_me: args['shared_with_me'] = "true" del args['path'] data += parse.urlencode(args) res = self._make_ocs_request( 'GET', self.OCS_SERVICE_SHARE, data ) if res.status_code == 200: tree = ET.fromstring(res.content) self._check_ocs_status(tree) shares = [] for element in tree.find('data').iter('element'): '''share_attr = {} for child in element: key = child.tag value = child.text share_attr[key] = value shares.append(share_attr)''' shares.append(self._get_shareinfo(element)) return shares raise HTTPResponseError(res)
[ "def", "get_shares", "(", "self", ",", "path", "=", "''", ",", "*", "*", "kwargs", ")", ":", "if", "not", "(", "isinstance", "(", "path", ",", "six", ".", "string_types", ")", ")", ":", "return", "None", "data", "=", "'shares'", "if", "path", "!=", "''", ":", "data", "+=", "'?'", "path", "=", "self", ".", "_encode_string", "(", "self", ".", "_normalize_path", "(", "path", ")", ")", "args", "=", "{", "'path'", ":", "path", "}", "reshares", "=", "kwargs", ".", "get", "(", "'reshares'", ",", "False", ")", "if", "isinstance", "(", "reshares", ",", "bool", ")", "and", "reshares", ":", "args", "[", "'reshares'", "]", "=", "reshares", "subfiles", "=", "kwargs", ".", "get", "(", "'subfiles'", ",", "False", ")", "if", "isinstance", "(", "subfiles", ",", "bool", ")", "and", "subfiles", ":", "args", "[", "'subfiles'", "]", "=", "str", "(", "subfiles", ")", ".", "lower", "(", ")", "shared_with_me", "=", "kwargs", ".", "get", "(", "'shared_with_me'", ",", "False", ")", "if", "isinstance", "(", "shared_with_me", ",", "bool", ")", "and", "shared_with_me", ":", "args", "[", "'shared_with_me'", "]", "=", "\"true\"", "del", "args", "[", "'path'", "]", "data", "+=", "parse", ".", "urlencode", "(", "args", ")", "res", "=", "self", ".", "_make_ocs_request", "(", "'GET'", ",", "self", ".", "OCS_SERVICE_SHARE", ",", "data", ")", "if", "res", ".", "status_code", "==", "200", ":", "tree", "=", "ET", ".", "fromstring", "(", "res", ".", "content", ")", "self", ".", "_check_ocs_status", "(", "tree", ")", "shares", "=", "[", "]", "for", "element", "in", "tree", ".", "find", "(", "'data'", ")", ".", "iter", "(", "'element'", ")", ":", "'''share_attr = {}\n for child in element:\n key = child.tag\n value = child.text\n share_attr[key] = value\n shares.append(share_attr)'''", "shares", ".", "append", "(", "self", ".", "_get_shareinfo", "(", "element", ")", ")", "return", "shares", "raise", "HTTPResponseError", "(", "res", ")" ]
Returns array of shares :param path: path to the share to be checked :param reshares: (optional, boolean) returns not only the shares from the current user but all shares from the given file (default: False) :param subfiles: (optional, boolean) returns all shares within a folder, given that path defines a folder (default: False) :param shared_with_me: (optional, boolean) returns all shares which are shared with me (default: False) :returns: array of shares ShareInfo instances or empty array if the operation failed :raises: HTTPResponseError in case an HTTP error status was returned
[ "Returns", "array", "of", "shares" ]
b9e1f04cdbde74588e86f1bebb6144571d82966c
https://github.com/owncloud/pyocclient/blob/b9e1f04cdbde74588e86f1bebb6144571d82966c/owncloud/owncloud.py#L889-L943
train
owncloud/pyocclient
owncloud/owncloud.py
Client.create_user
def create_user(self, user_name, initial_password): """Create a new user with an initial password via provisioning API. It is not an error, if the user already existed before. If you get back an error 999, then the provisioning API is not enabled. :param user_name: name of user to be created :param initial_password: password for user being created :returns: True on success :raises: HTTPResponseError in case an HTTP error status was returned """ res = self._make_ocs_request( 'POST', self.OCS_SERVICE_CLOUD, 'users', data={'password': initial_password, 'userid': user_name} ) # We get 200 when the user was just created. if res.status_code == 200: tree = ET.fromstring(res.content) self._check_ocs_status(tree, [100]) return True raise HTTPResponseError(res)
python
def create_user(self, user_name, initial_password): """Create a new user with an initial password via provisioning API. It is not an error, if the user already existed before. If you get back an error 999, then the provisioning API is not enabled. :param user_name: name of user to be created :param initial_password: password for user being created :returns: True on success :raises: HTTPResponseError in case an HTTP error status was returned """ res = self._make_ocs_request( 'POST', self.OCS_SERVICE_CLOUD, 'users', data={'password': initial_password, 'userid': user_name} ) # We get 200 when the user was just created. if res.status_code == 200: tree = ET.fromstring(res.content) self._check_ocs_status(tree, [100]) return True raise HTTPResponseError(res)
[ "def", "create_user", "(", "self", ",", "user_name", ",", "initial_password", ")", ":", "res", "=", "self", ".", "_make_ocs_request", "(", "'POST'", ",", "self", ".", "OCS_SERVICE_CLOUD", ",", "'users'", ",", "data", "=", "{", "'password'", ":", "initial_password", ",", "'userid'", ":", "user_name", "}", ")", "# We get 200 when the user was just created.", "if", "res", ".", "status_code", "==", "200", ":", "tree", "=", "ET", ".", "fromstring", "(", "res", ".", "content", ")", "self", ".", "_check_ocs_status", "(", "tree", ",", "[", "100", "]", ")", "return", "True", "raise", "HTTPResponseError", "(", "res", ")" ]
Create a new user with an initial password via provisioning API. It is not an error, if the user already existed before. If you get back an error 999, then the provisioning API is not enabled. :param user_name: name of user to be created :param initial_password: password for user being created :returns: True on success :raises: HTTPResponseError in case an HTTP error status was returned
[ "Create", "a", "new", "user", "with", "an", "initial", "password", "via", "provisioning", "API", ".", "It", "is", "not", "an", "error", "if", "the", "user", "already", "existed", "before", ".", "If", "you", "get", "back", "an", "error", "999", "then", "the", "provisioning", "API", "is", "not", "enabled", "." ]
b9e1f04cdbde74588e86f1bebb6144571d82966c
https://github.com/owncloud/pyocclient/blob/b9e1f04cdbde74588e86f1bebb6144571d82966c/owncloud/owncloud.py#L945-L969
train
owncloud/pyocclient
owncloud/owncloud.py
Client.delete_user
def delete_user(self, user_name): """Deletes a user via provisioning API. If you get back an error 999, then the provisioning API is not enabled. :param user_name: name of user to be deleted :returns: True on success :raises: HTTPResponseError in case an HTTP error status was returned """ res = self._make_ocs_request( 'DELETE', self.OCS_SERVICE_CLOUD, 'users/' + user_name ) # We get 200 when the user was deleted. if res.status_code == 200: return True raise HTTPResponseError(res)
python
def delete_user(self, user_name): """Deletes a user via provisioning API. If you get back an error 999, then the provisioning API is not enabled. :param user_name: name of user to be deleted :returns: True on success :raises: HTTPResponseError in case an HTTP error status was returned """ res = self._make_ocs_request( 'DELETE', self.OCS_SERVICE_CLOUD, 'users/' + user_name ) # We get 200 when the user was deleted. if res.status_code == 200: return True raise HTTPResponseError(res)
[ "def", "delete_user", "(", "self", ",", "user_name", ")", ":", "res", "=", "self", ".", "_make_ocs_request", "(", "'DELETE'", ",", "self", ".", "OCS_SERVICE_CLOUD", ",", "'users/'", "+", "user_name", ")", "# We get 200 when the user was deleted.", "if", "res", ".", "status_code", "==", "200", ":", "return", "True", "raise", "HTTPResponseError", "(", "res", ")" ]
Deletes a user via provisioning API. If you get back an error 999, then the provisioning API is not enabled. :param user_name: name of user to be deleted :returns: True on success :raises: HTTPResponseError in case an HTTP error status was returned
[ "Deletes", "a", "user", "via", "provisioning", "API", ".", "If", "you", "get", "back", "an", "error", "999", "then", "the", "provisioning", "API", "is", "not", "enabled", "." ]
b9e1f04cdbde74588e86f1bebb6144571d82966c
https://github.com/owncloud/pyocclient/blob/b9e1f04cdbde74588e86f1bebb6144571d82966c/owncloud/owncloud.py#L971-L990
train
owncloud/pyocclient
owncloud/owncloud.py
Client.search_users
def search_users(self, user_name): """Searches for users via provisioning API. If you get back an error 999, then the provisioning API is not enabled. :param user_name: name of user to be searched for :returns: list of usernames that contain user_name as substring :raises: HTTPResponseError in case an HTTP error status was returned """ action_path = 'users' if user_name: action_path += '?search={}'.format(user_name) res = self._make_ocs_request( 'GET', self.OCS_SERVICE_CLOUD, action_path ) if res.status_code == 200: tree = ET.fromstring(res.content) users = [x.text for x in tree.findall('data/users/element')] return users raise HTTPResponseError(res)
python
def search_users(self, user_name): """Searches for users via provisioning API. If you get back an error 999, then the provisioning API is not enabled. :param user_name: name of user to be searched for :returns: list of usernames that contain user_name as substring :raises: HTTPResponseError in case an HTTP error status was returned """ action_path = 'users' if user_name: action_path += '?search={}'.format(user_name) res = self._make_ocs_request( 'GET', self.OCS_SERVICE_CLOUD, action_path ) if res.status_code == 200: tree = ET.fromstring(res.content) users = [x.text for x in tree.findall('data/users/element')] return users raise HTTPResponseError(res)
[ "def", "search_users", "(", "self", ",", "user_name", ")", ":", "action_path", "=", "'users'", "if", "user_name", ":", "action_path", "+=", "'?search={}'", ".", "format", "(", "user_name", ")", "res", "=", "self", ".", "_make_ocs_request", "(", "'GET'", ",", "self", ".", "OCS_SERVICE_CLOUD", ",", "action_path", ")", "if", "res", ".", "status_code", "==", "200", ":", "tree", "=", "ET", ".", "fromstring", "(", "res", ".", "content", ")", "users", "=", "[", "x", ".", "text", "for", "x", "in", "tree", ".", "findall", "(", "'data/users/element'", ")", "]", "return", "users", "raise", "HTTPResponseError", "(", "res", ")" ]
Searches for users via provisioning API. If you get back an error 999, then the provisioning API is not enabled. :param user_name: name of user to be searched for :returns: list of usernames that contain user_name as substring :raises: HTTPResponseError in case an HTTP error status was returned
[ "Searches", "for", "users", "via", "provisioning", "API", ".", "If", "you", "get", "back", "an", "error", "999", "then", "the", "provisioning", "API", "is", "not", "enabled", "." ]
b9e1f04cdbde74588e86f1bebb6144571d82966c
https://github.com/owncloud/pyocclient/blob/b9e1f04cdbde74588e86f1bebb6144571d82966c/owncloud/owncloud.py#L1004-L1029
train
owncloud/pyocclient
owncloud/owncloud.py
Client.set_user_attribute
def set_user_attribute(self, user_name, key, value): """Sets a user attribute :param user_name: name of user to modify :param key: key of the attribute to set :param value: value to set :returns: True if the operation succeeded, False otherwise :raises: HTTPResponseError in case an HTTP error status was returned """ res = self._make_ocs_request( 'PUT', self.OCS_SERVICE_CLOUD, 'users/' + parse.quote(user_name), data={'key': self._encode_string(key), 'value': self._encode_string(value)} ) if res.status_code == 200: tree = ET.fromstring(res.content) self._check_ocs_status(tree, [100]) return True raise HTTPResponseError(res)
python
def set_user_attribute(self, user_name, key, value): """Sets a user attribute :param user_name: name of user to modify :param key: key of the attribute to set :param value: value to set :returns: True if the operation succeeded, False otherwise :raises: HTTPResponseError in case an HTTP error status was returned """ res = self._make_ocs_request( 'PUT', self.OCS_SERVICE_CLOUD, 'users/' + parse.quote(user_name), data={'key': self._encode_string(key), 'value': self._encode_string(value)} ) if res.status_code == 200: tree = ET.fromstring(res.content) self._check_ocs_status(tree, [100]) return True raise HTTPResponseError(res)
[ "def", "set_user_attribute", "(", "self", ",", "user_name", ",", "key", ",", "value", ")", ":", "res", "=", "self", ".", "_make_ocs_request", "(", "'PUT'", ",", "self", ".", "OCS_SERVICE_CLOUD", ",", "'users/'", "+", "parse", ".", "quote", "(", "user_name", ")", ",", "data", "=", "{", "'key'", ":", "self", ".", "_encode_string", "(", "key", ")", ",", "'value'", ":", "self", ".", "_encode_string", "(", "value", ")", "}", ")", "if", "res", ".", "status_code", "==", "200", ":", "tree", "=", "ET", ".", "fromstring", "(", "res", ".", "content", ")", "self", ".", "_check_ocs_status", "(", "tree", ",", "[", "100", "]", ")", "return", "True", "raise", "HTTPResponseError", "(", "res", ")" ]
Sets a user attribute :param user_name: name of user to modify :param key: key of the attribute to set :param value: value to set :returns: True if the operation succeeded, False otherwise :raises: HTTPResponseError in case an HTTP error status was returned
[ "Sets", "a", "user", "attribute" ]
b9e1f04cdbde74588e86f1bebb6144571d82966c
https://github.com/owncloud/pyocclient/blob/b9e1f04cdbde74588e86f1bebb6144571d82966c/owncloud/owncloud.py#L1041-L1063
train
owncloud/pyocclient
owncloud/owncloud.py
Client.add_user_to_group
def add_user_to_group(self, user_name, group_name): """Adds a user to a group. :param user_name: name of user to be added :param group_name: name of group user is to be added to :returns: True if user added :raises: HTTPResponseError in case an HTTP error status was returned """ res = self._make_ocs_request( 'POST', self.OCS_SERVICE_CLOUD, 'users/' + user_name + '/groups', data={'groupid': group_name} ) if res.status_code == 200: tree = ET.fromstring(res.content) self._check_ocs_status(tree, [100]) return True raise HTTPResponseError(res)
python
def add_user_to_group(self, user_name, group_name): """Adds a user to a group. :param user_name: name of user to be added :param group_name: name of group user is to be added to :returns: True if user added :raises: HTTPResponseError in case an HTTP error status was returned """ res = self._make_ocs_request( 'POST', self.OCS_SERVICE_CLOUD, 'users/' + user_name + '/groups', data={'groupid': group_name} ) if res.status_code == 200: tree = ET.fromstring(res.content) self._check_ocs_status(tree, [100]) return True raise HTTPResponseError(res)
[ "def", "add_user_to_group", "(", "self", ",", "user_name", ",", "group_name", ")", ":", "res", "=", "self", ".", "_make_ocs_request", "(", "'POST'", ",", "self", ".", "OCS_SERVICE_CLOUD", ",", "'users/'", "+", "user_name", "+", "'/groups'", ",", "data", "=", "{", "'groupid'", ":", "group_name", "}", ")", "if", "res", ".", "status_code", "==", "200", ":", "tree", "=", "ET", ".", "fromstring", "(", "res", ".", "content", ")", "self", ".", "_check_ocs_status", "(", "tree", ",", "[", "100", "]", ")", "return", "True", "raise", "HTTPResponseError", "(", "res", ")" ]
Adds a user to a group. :param user_name: name of user to be added :param group_name: name of group user is to be added to :returns: True if user added :raises: HTTPResponseError in case an HTTP error status was returned
[ "Adds", "a", "user", "to", "a", "group", "." ]
b9e1f04cdbde74588e86f1bebb6144571d82966c
https://github.com/owncloud/pyocclient/blob/b9e1f04cdbde74588e86f1bebb6144571d82966c/owncloud/owncloud.py#L1065-L1087
train
owncloud/pyocclient
owncloud/owncloud.py
Client.get_user_groups
def get_user_groups(self, user_name): """Get a list of groups associated to a user. :param user_name: name of user to list groups :returns: list of groups :raises: HTTPResponseError in case an HTTP error status was returned """ res = self._make_ocs_request( 'GET', self.OCS_SERVICE_CLOUD, 'users/' + user_name + '/groups', ) if res.status_code == 200: tree = ET.fromstring(res.content) self._check_ocs_status(tree, [100]) return [group.text for group in tree.find('data/groups')] raise HTTPResponseError(res)
python
def get_user_groups(self, user_name): """Get a list of groups associated to a user. :param user_name: name of user to list groups :returns: list of groups :raises: HTTPResponseError in case an HTTP error status was returned """ res = self._make_ocs_request( 'GET', self.OCS_SERVICE_CLOUD, 'users/' + user_name + '/groups', ) if res.status_code == 200: tree = ET.fromstring(res.content) self._check_ocs_status(tree, [100]) return [group.text for group in tree.find('data/groups')] raise HTTPResponseError(res)
[ "def", "get_user_groups", "(", "self", ",", "user_name", ")", ":", "res", "=", "self", ".", "_make_ocs_request", "(", "'GET'", ",", "self", ".", "OCS_SERVICE_CLOUD", ",", "'users/'", "+", "user_name", "+", "'/groups'", ",", ")", "if", "res", ".", "status_code", "==", "200", ":", "tree", "=", "ET", ".", "fromstring", "(", "res", ".", "content", ")", "self", ".", "_check_ocs_status", "(", "tree", ",", "[", "100", "]", ")", "return", "[", "group", ".", "text", "for", "group", "in", "tree", ".", "find", "(", "'data/groups'", ")", "]", "raise", "HTTPResponseError", "(", "res", ")" ]
Get a list of groups associated to a user. :param user_name: name of user to list groups :returns: list of groups :raises: HTTPResponseError in case an HTTP error status was returned
[ "Get", "a", "list", "of", "groups", "associated", "to", "a", "user", "." ]
b9e1f04cdbde74588e86f1bebb6144571d82966c
https://github.com/owncloud/pyocclient/blob/b9e1f04cdbde74588e86f1bebb6144571d82966c/owncloud/owncloud.py#L1089-L1109
train
owncloud/pyocclient
owncloud/owncloud.py
Client.get_user
def get_user(self, user_name): """Retrieves information about a user :param user_name: name of user to query :returns: Dictionary of information about user :raises: ResponseError in case an HTTP error status was returned """ res = self._make_ocs_request( 'GET', self.OCS_SERVICE_CLOUD, 'users/' + parse.quote(user_name), data={} ) tree = ET.fromstring(res.content) self._check_ocs_status(tree) # <ocs><meta><statuscode>100</statuscode><status>ok</status></meta> # <data> # <email>[email protected]</email><quota>0</quota><enabled>true</enabled> # </data> # </ocs> data_element = tree.find('data') return self._xml_to_dict(data_element)
python
def get_user(self, user_name): """Retrieves information about a user :param user_name: name of user to query :returns: Dictionary of information about user :raises: ResponseError in case an HTTP error status was returned """ res = self._make_ocs_request( 'GET', self.OCS_SERVICE_CLOUD, 'users/' + parse.quote(user_name), data={} ) tree = ET.fromstring(res.content) self._check_ocs_status(tree) # <ocs><meta><statuscode>100</statuscode><status>ok</status></meta> # <data> # <email>[email protected]</email><quota>0</quota><enabled>true</enabled> # </data> # </ocs> data_element = tree.find('data') return self._xml_to_dict(data_element)
[ "def", "get_user", "(", "self", ",", "user_name", ")", ":", "res", "=", "self", ".", "_make_ocs_request", "(", "'GET'", ",", "self", ".", "OCS_SERVICE_CLOUD", ",", "'users/'", "+", "parse", ".", "quote", "(", "user_name", ")", ",", "data", "=", "{", "}", ")", "tree", "=", "ET", ".", "fromstring", "(", "res", ".", "content", ")", "self", ".", "_check_ocs_status", "(", "tree", ")", "# <ocs><meta><statuscode>100</statuscode><status>ok</status></meta>", "# <data>", "# <email>[email protected]</email><quota>0</quota><enabled>true</enabled>", "# </data>", "# </ocs>", "data_element", "=", "tree", ".", "find", "(", "'data'", ")", "return", "self", ".", "_xml_to_dict", "(", "data_element", ")" ]
Retrieves information about a user :param user_name: name of user to query :returns: Dictionary of information about user :raises: ResponseError in case an HTTP error status was returned
[ "Retrieves", "information", "about", "a", "user" ]
b9e1f04cdbde74588e86f1bebb6144571d82966c
https://github.com/owncloud/pyocclient/blob/b9e1f04cdbde74588e86f1bebb6144571d82966c/owncloud/owncloud.py#L1121-L1145
train
owncloud/pyocclient
owncloud/owncloud.py
Client.get_user_subadmin_groups
def get_user_subadmin_groups(self, user_name): """Get a list of subadmin groups associated to a user. :param user_name: name of user :returns: list of subadmin groups :raises: HTTPResponseError in case an HTTP error status was returned """ res = self._make_ocs_request( 'GET', self.OCS_SERVICE_CLOUD, 'users/' + user_name + '/subadmins', ) if res.status_code == 200: tree = ET.fromstring(res.content) self._check_ocs_status(tree, [100]) groups = tree.find('data') return groups raise HTTPResponseError(res)
python
def get_user_subadmin_groups(self, user_name): """Get a list of subadmin groups associated to a user. :param user_name: name of user :returns: list of subadmin groups :raises: HTTPResponseError in case an HTTP error status was returned """ res = self._make_ocs_request( 'GET', self.OCS_SERVICE_CLOUD, 'users/' + user_name + '/subadmins', ) if res.status_code == 200: tree = ET.fromstring(res.content) self._check_ocs_status(tree, [100]) groups = tree.find('data') return groups raise HTTPResponseError(res)
[ "def", "get_user_subadmin_groups", "(", "self", ",", "user_name", ")", ":", "res", "=", "self", ".", "_make_ocs_request", "(", "'GET'", ",", "self", ".", "OCS_SERVICE_CLOUD", ",", "'users/'", "+", "user_name", "+", "'/subadmins'", ",", ")", "if", "res", ".", "status_code", "==", "200", ":", "tree", "=", "ET", ".", "fromstring", "(", "res", ".", "content", ")", "self", ".", "_check_ocs_status", "(", "tree", ",", "[", "100", "]", ")", "groups", "=", "tree", ".", "find", "(", "'data'", ")", "return", "groups", "raise", "HTTPResponseError", "(", "res", ")" ]
Get a list of subadmin groups associated to a user. :param user_name: name of user :returns: list of subadmin groups :raises: HTTPResponseError in case an HTTP error status was returned
[ "Get", "a", "list", "of", "subadmin", "groups", "associated", "to", "a", "user", "." ]
b9e1f04cdbde74588e86f1bebb6144571d82966c
https://github.com/owncloud/pyocclient/blob/b9e1f04cdbde74588e86f1bebb6144571d82966c/owncloud/owncloud.py#L1194-L1217
train
owncloud/pyocclient
owncloud/owncloud.py
Client.share_file_with_user
def share_file_with_user(self, path, user, **kwargs): """Shares a remote file with specified user :param path: path to the remote file to share :param user: name of the user whom we want to share a file/folder :param perms (optional): permissions of the shared object defaults to read only (1) http://doc.owncloud.org/server/6.0/admin_manual/sharing_api/index.html :param remote_user (optional): True if it is a federated users defaults to False if it is a local user :returns: instance of :class:`ShareInfo` with the share info or False if the operation failed :raises: HTTPResponseError in case an HTTP error status was returned """ remote_user = kwargs.get('remote_user', False) perms = kwargs.get('perms', self.OCS_PERMISSION_READ) if (((not isinstance(perms, int)) or (perms > self.OCS_PERMISSION_ALL)) or ((not isinstance(user, six.string_types)) or (user == ''))): return False if remote_user and (not user.endswith('/')): user = user + '/' path = self._normalize_path(path) post_data = { 'shareType': self.OCS_SHARE_TYPE_REMOTE if remote_user else self.OCS_SHARE_TYPE_USER, 'shareWith': user, 'path': self._encode_string(path), 'permissions': perms } res = self._make_ocs_request( 'POST', self.OCS_SERVICE_SHARE, 'shares', data=post_data ) if self._debug: print('OCS share_file request for file %s with permissions %i ' 'returned: %i' % (path, perms, res.status_code)) if res.status_code == 200: tree = ET.fromstring(res.content) self._check_ocs_status(tree) data_el = tree.find('data') return ShareInfo( { 'id': data_el.find('id').text, 'path': path, 'permissions': perms } ) raise HTTPResponseError(res)
python
def share_file_with_user(self, path, user, **kwargs): """Shares a remote file with specified user :param path: path to the remote file to share :param user: name of the user whom we want to share a file/folder :param perms (optional): permissions of the shared object defaults to read only (1) http://doc.owncloud.org/server/6.0/admin_manual/sharing_api/index.html :param remote_user (optional): True if it is a federated users defaults to False if it is a local user :returns: instance of :class:`ShareInfo` with the share info or False if the operation failed :raises: HTTPResponseError in case an HTTP error status was returned """ remote_user = kwargs.get('remote_user', False) perms = kwargs.get('perms', self.OCS_PERMISSION_READ) if (((not isinstance(perms, int)) or (perms > self.OCS_PERMISSION_ALL)) or ((not isinstance(user, six.string_types)) or (user == ''))): return False if remote_user and (not user.endswith('/')): user = user + '/' path = self._normalize_path(path) post_data = { 'shareType': self.OCS_SHARE_TYPE_REMOTE if remote_user else self.OCS_SHARE_TYPE_USER, 'shareWith': user, 'path': self._encode_string(path), 'permissions': perms } res = self._make_ocs_request( 'POST', self.OCS_SERVICE_SHARE, 'shares', data=post_data ) if self._debug: print('OCS share_file request for file %s with permissions %i ' 'returned: %i' % (path, perms, res.status_code)) if res.status_code == 200: tree = ET.fromstring(res.content) self._check_ocs_status(tree) data_el = tree.find('data') return ShareInfo( { 'id': data_el.find('id').text, 'path': path, 'permissions': perms } ) raise HTTPResponseError(res)
[ "def", "share_file_with_user", "(", "self", ",", "path", ",", "user", ",", "*", "*", "kwargs", ")", ":", "remote_user", "=", "kwargs", ".", "get", "(", "'remote_user'", ",", "False", ")", "perms", "=", "kwargs", ".", "get", "(", "'perms'", ",", "self", ".", "OCS_PERMISSION_READ", ")", "if", "(", "(", "(", "not", "isinstance", "(", "perms", ",", "int", ")", ")", "or", "(", "perms", ">", "self", ".", "OCS_PERMISSION_ALL", ")", ")", "or", "(", "(", "not", "isinstance", "(", "user", ",", "six", ".", "string_types", ")", ")", "or", "(", "user", "==", "''", ")", ")", ")", ":", "return", "False", "if", "remote_user", "and", "(", "not", "user", ".", "endswith", "(", "'/'", ")", ")", ":", "user", "=", "user", "+", "'/'", "path", "=", "self", ".", "_normalize_path", "(", "path", ")", "post_data", "=", "{", "'shareType'", ":", "self", ".", "OCS_SHARE_TYPE_REMOTE", "if", "remote_user", "else", "self", ".", "OCS_SHARE_TYPE_USER", ",", "'shareWith'", ":", "user", ",", "'path'", ":", "self", ".", "_encode_string", "(", "path", ")", ",", "'permissions'", ":", "perms", "}", "res", "=", "self", ".", "_make_ocs_request", "(", "'POST'", ",", "self", ".", "OCS_SERVICE_SHARE", ",", "'shares'", ",", "data", "=", "post_data", ")", "if", "self", ".", "_debug", ":", "print", "(", "'OCS share_file request for file %s with permissions %i '", "'returned: %i'", "%", "(", "path", ",", "perms", ",", "res", ".", "status_code", ")", ")", "if", "res", ".", "status_code", "==", "200", ":", "tree", "=", "ET", ".", "fromstring", "(", "res", ".", "content", ")", "self", ".", "_check_ocs_status", "(", "tree", ")", "data_el", "=", "tree", ".", "find", "(", "'data'", ")", "return", "ShareInfo", "(", "{", "'id'", ":", "data_el", ".", "find", "(", "'id'", ")", ".", "text", ",", "'path'", ":", "path", ",", "'permissions'", ":", "perms", "}", ")", "raise", "HTTPResponseError", "(", "res", ")" ]
Shares a remote file with specified user :param path: path to the remote file to share :param user: name of the user whom we want to share a file/folder :param perms (optional): permissions of the shared object defaults to read only (1) http://doc.owncloud.org/server/6.0/admin_manual/sharing_api/index.html :param remote_user (optional): True if it is a federated users defaults to False if it is a local user :returns: instance of :class:`ShareInfo` with the share info or False if the operation failed :raises: HTTPResponseError in case an HTTP error status was returned
[ "Shares", "a", "remote", "file", "with", "specified", "user" ]
b9e1f04cdbde74588e86f1bebb6144571d82966c
https://github.com/owncloud/pyocclient/blob/b9e1f04cdbde74588e86f1bebb6144571d82966c/owncloud/owncloud.py#L1229-L1281
train
owncloud/pyocclient
owncloud/owncloud.py
Client.delete_group
def delete_group(self, group_name): """Delete a group via provisioning API. If you get back an error 999, then the provisioning API is not enabled. :param group_name: name of group to be deleted :returns: True if group deleted :raises: HTTPResponseError in case an HTTP error status was returned """ res = self._make_ocs_request( 'DELETE', self.OCS_SERVICE_CLOUD, 'groups/' + group_name ) # We get 200 when the group was just deleted. if res.status_code == 200: return True raise HTTPResponseError(res)
python
def delete_group(self, group_name): """Delete a group via provisioning API. If you get back an error 999, then the provisioning API is not enabled. :param group_name: name of group to be deleted :returns: True if group deleted :raises: HTTPResponseError in case an HTTP error status was returned """ res = self._make_ocs_request( 'DELETE', self.OCS_SERVICE_CLOUD, 'groups/' + group_name ) # We get 200 when the group was just deleted. if res.status_code == 200: return True raise HTTPResponseError(res)
[ "def", "delete_group", "(", "self", ",", "group_name", ")", ":", "res", "=", "self", ".", "_make_ocs_request", "(", "'DELETE'", ",", "self", ".", "OCS_SERVICE_CLOUD", ",", "'groups/'", "+", "group_name", ")", "# We get 200 when the group was just deleted.", "if", "res", ".", "status_code", "==", "200", ":", "return", "True", "raise", "HTTPResponseError", "(", "res", ")" ]
Delete a group via provisioning API. If you get back an error 999, then the provisioning API is not enabled. :param group_name: name of group to be deleted :returns: True if group deleted :raises: HTTPResponseError in case an HTTP error status was returned
[ "Delete", "a", "group", "via", "provisioning", "API", ".", "If", "you", "get", "back", "an", "error", "999", "then", "the", "provisioning", "API", "is", "not", "enabled", "." ]
b9e1f04cdbde74588e86f1bebb6144571d82966c
https://github.com/owncloud/pyocclient/blob/b9e1f04cdbde74588e86f1bebb6144571d82966c/owncloud/owncloud.py#L1307-L1326
train
owncloud/pyocclient
owncloud/owncloud.py
Client.get_groups
def get_groups(self): """Get groups via provisioning API. If you get back an error 999, then the provisioning API is not enabled. :returns: list of groups :raises: HTTPResponseError in case an HTTP error status was returned """ res = self._make_ocs_request( 'GET', self.OCS_SERVICE_CLOUD, 'groups' ) if res.status_code == 200: tree = ET.fromstring(res.content) groups = [x.text for x in tree.findall('data/groups/element')] return groups raise HTTPResponseError(res)
python
def get_groups(self): """Get groups via provisioning API. If you get back an error 999, then the provisioning API is not enabled. :returns: list of groups :raises: HTTPResponseError in case an HTTP error status was returned """ res = self._make_ocs_request( 'GET', self.OCS_SERVICE_CLOUD, 'groups' ) if res.status_code == 200: tree = ET.fromstring(res.content) groups = [x.text for x in tree.findall('data/groups/element')] return groups raise HTTPResponseError(res)
[ "def", "get_groups", "(", "self", ")", ":", "res", "=", "self", ".", "_make_ocs_request", "(", "'GET'", ",", "self", ".", "OCS_SERVICE_CLOUD", ",", "'groups'", ")", "if", "res", ".", "status_code", "==", "200", ":", "tree", "=", "ET", ".", "fromstring", "(", "res", ".", "content", ")", "groups", "=", "[", "x", ".", "text", "for", "x", "in", "tree", ".", "findall", "(", "'data/groups/element'", ")", "]", "return", "groups", "raise", "HTTPResponseError", "(", "res", ")" ]
Get groups via provisioning API. If you get back an error 999, then the provisioning API is not enabled. :returns: list of groups :raises: HTTPResponseError in case an HTTP error status was returned
[ "Get", "groups", "via", "provisioning", "API", ".", "If", "you", "get", "back", "an", "error", "999", "then", "the", "provisioning", "API", "is", "not", "enabled", "." ]
b9e1f04cdbde74588e86f1bebb6144571d82966c
https://github.com/owncloud/pyocclient/blob/b9e1f04cdbde74588e86f1bebb6144571d82966c/owncloud/owncloud.py#L1328-L1348
train
owncloud/pyocclient
owncloud/owncloud.py
Client.get_group_members
def get_group_members(self, group_name): """Get group members via provisioning API. If you get back an error 999, then the provisioning API is not enabled. :param group_name: name of group to list members :returns: list of group members :raises: HTTPResponseError in case an HTTP error status was returned """ res = self._make_ocs_request( 'GET', self.OCS_SERVICE_CLOUD, 'groups/' + group_name ) if res.status_code == 200: tree = ET.fromstring(res.content) self._check_ocs_status(tree, [100]) return [group.text for group in tree.find('data/users')] raise HTTPResponseError(res)
python
def get_group_members(self, group_name): """Get group members via provisioning API. If you get back an error 999, then the provisioning API is not enabled. :param group_name: name of group to list members :returns: list of group members :raises: HTTPResponseError in case an HTTP error status was returned """ res = self._make_ocs_request( 'GET', self.OCS_SERVICE_CLOUD, 'groups/' + group_name ) if res.status_code == 200: tree = ET.fromstring(res.content) self._check_ocs_status(tree, [100]) return [group.text for group in tree.find('data/users')] raise HTTPResponseError(res)
[ "def", "get_group_members", "(", "self", ",", "group_name", ")", ":", "res", "=", "self", ".", "_make_ocs_request", "(", "'GET'", ",", "self", ".", "OCS_SERVICE_CLOUD", ",", "'groups/'", "+", "group_name", ")", "if", "res", ".", "status_code", "==", "200", ":", "tree", "=", "ET", ".", "fromstring", "(", "res", ".", "content", ")", "self", ".", "_check_ocs_status", "(", "tree", ",", "[", "100", "]", ")", "return", "[", "group", ".", "text", "for", "group", "in", "tree", ".", "find", "(", "'data/users'", ")", "]", "raise", "HTTPResponseError", "(", "res", ")" ]
Get group members via provisioning API. If you get back an error 999, then the provisioning API is not enabled. :param group_name: name of group to list members :returns: list of group members :raises: HTTPResponseError in case an HTTP error status was returned
[ "Get", "group", "members", "via", "provisioning", "API", ".", "If", "you", "get", "back", "an", "error", "999", "then", "the", "provisioning", "API", "is", "not", "enabled", "." ]
b9e1f04cdbde74588e86f1bebb6144571d82966c
https://github.com/owncloud/pyocclient/blob/b9e1f04cdbde74588e86f1bebb6144571d82966c/owncloud/owncloud.py#L1350-L1370
train
owncloud/pyocclient
owncloud/owncloud.py
Client.group_exists
def group_exists(self, group_name): """Checks a group via provisioning API. If you get back an error 999, then the provisioning API is not enabled. :param group_name: name of group to be checked :returns: True if group exists :raises: HTTPResponseError in case an HTTP error status was returned """ res = self._make_ocs_request( 'GET', self.OCS_SERVICE_CLOUD, 'groups?search=' + group_name ) if res.status_code == 200: tree = ET.fromstring(res.content) for code_el in tree.findall('data/groups/element'): if code_el is not None and code_el.text == group_name: return True return False raise HTTPResponseError(res)
python
def group_exists(self, group_name): """Checks a group via provisioning API. If you get back an error 999, then the provisioning API is not enabled. :param group_name: name of group to be checked :returns: True if group exists :raises: HTTPResponseError in case an HTTP error status was returned """ res = self._make_ocs_request( 'GET', self.OCS_SERVICE_CLOUD, 'groups?search=' + group_name ) if res.status_code == 200: tree = ET.fromstring(res.content) for code_el in tree.findall('data/groups/element'): if code_el is not None and code_el.text == group_name: return True return False raise HTTPResponseError(res)
[ "def", "group_exists", "(", "self", ",", "group_name", ")", ":", "res", "=", "self", ".", "_make_ocs_request", "(", "'GET'", ",", "self", ".", "OCS_SERVICE_CLOUD", ",", "'groups?search='", "+", "group_name", ")", "if", "res", ".", "status_code", "==", "200", ":", "tree", "=", "ET", ".", "fromstring", "(", "res", ".", "content", ")", "for", "code_el", "in", "tree", ".", "findall", "(", "'data/groups/element'", ")", ":", "if", "code_el", "is", "not", "None", "and", "code_el", ".", "text", "==", "group_name", ":", "return", "True", "return", "False", "raise", "HTTPResponseError", "(", "res", ")" ]
Checks a group via provisioning API. If you get back an error 999, then the provisioning API is not enabled. :param group_name: name of group to be checked :returns: True if group exists :raises: HTTPResponseError in case an HTTP error status was returned
[ "Checks", "a", "group", "via", "provisioning", "API", ".", "If", "you", "get", "back", "an", "error", "999", "then", "the", "provisioning", "API", "is", "not", "enabled", "." ]
b9e1f04cdbde74588e86f1bebb6144571d82966c
https://github.com/owncloud/pyocclient/blob/b9e1f04cdbde74588e86f1bebb6144571d82966c/owncloud/owncloud.py#L1372-L1396
train
owncloud/pyocclient
owncloud/owncloud.py
Client.share_file_with_group
def share_file_with_group(self, path, group, **kwargs): """Shares a remote file with specified group :param path: path to the remote file to share :param group: name of the group with which we want to share a file/folder :param perms (optional): permissions of the shared object defaults to read only (1) http://doc.owncloud.org/server/6.0/admin_manual/sharing_api/index.html :returns: instance of :class:`ShareInfo` with the share info or False if the operation failed :raises: HTTPResponseError in case an HTTP error status was returned """ perms = kwargs.get('perms', self.OCS_PERMISSION_READ) if (((not isinstance(perms, int)) or (perms > self.OCS_PERMISSION_ALL)) or ((not isinstance(group, six.string_types)) or (group == ''))): return False path = self._normalize_path(path) post_data = {'shareType': self.OCS_SHARE_TYPE_GROUP, 'shareWith': group, 'path': path, 'permissions': perms} res = self._make_ocs_request( 'POST', self.OCS_SERVICE_SHARE, 'shares', data=post_data ) if res.status_code == 200: tree = ET.fromstring(res.content) self._check_ocs_status(tree) data_el = tree.find('data') return ShareInfo( { 'id': data_el.find('id').text, 'path': path, 'permissions': perms } ) raise HTTPResponseError(res)
python
def share_file_with_group(self, path, group, **kwargs): """Shares a remote file with specified group :param path: path to the remote file to share :param group: name of the group with which we want to share a file/folder :param perms (optional): permissions of the shared object defaults to read only (1) http://doc.owncloud.org/server/6.0/admin_manual/sharing_api/index.html :returns: instance of :class:`ShareInfo` with the share info or False if the operation failed :raises: HTTPResponseError in case an HTTP error status was returned """ perms = kwargs.get('perms', self.OCS_PERMISSION_READ) if (((not isinstance(perms, int)) or (perms > self.OCS_PERMISSION_ALL)) or ((not isinstance(group, six.string_types)) or (group == ''))): return False path = self._normalize_path(path) post_data = {'shareType': self.OCS_SHARE_TYPE_GROUP, 'shareWith': group, 'path': path, 'permissions': perms} res = self._make_ocs_request( 'POST', self.OCS_SERVICE_SHARE, 'shares', data=post_data ) if res.status_code == 200: tree = ET.fromstring(res.content) self._check_ocs_status(tree) data_el = tree.find('data') return ShareInfo( { 'id': data_el.find('id').text, 'path': path, 'permissions': perms } ) raise HTTPResponseError(res)
[ "def", "share_file_with_group", "(", "self", ",", "path", ",", "group", ",", "*", "*", "kwargs", ")", ":", "perms", "=", "kwargs", ".", "get", "(", "'perms'", ",", "self", ".", "OCS_PERMISSION_READ", ")", "if", "(", "(", "(", "not", "isinstance", "(", "perms", ",", "int", ")", ")", "or", "(", "perms", ">", "self", ".", "OCS_PERMISSION_ALL", ")", ")", "or", "(", "(", "not", "isinstance", "(", "group", ",", "six", ".", "string_types", ")", ")", "or", "(", "group", "==", "''", ")", ")", ")", ":", "return", "False", "path", "=", "self", ".", "_normalize_path", "(", "path", ")", "post_data", "=", "{", "'shareType'", ":", "self", ".", "OCS_SHARE_TYPE_GROUP", ",", "'shareWith'", ":", "group", ",", "'path'", ":", "path", ",", "'permissions'", ":", "perms", "}", "res", "=", "self", ".", "_make_ocs_request", "(", "'POST'", ",", "self", ".", "OCS_SERVICE_SHARE", ",", "'shares'", ",", "data", "=", "post_data", ")", "if", "res", ".", "status_code", "==", "200", ":", "tree", "=", "ET", ".", "fromstring", "(", "res", ".", "content", ")", "self", ".", "_check_ocs_status", "(", "tree", ")", "data_el", "=", "tree", ".", "find", "(", "'data'", ")", "return", "ShareInfo", "(", "{", "'id'", ":", "data_el", ".", "find", "(", "'id'", ")", ".", "text", ",", "'path'", ":", "path", ",", "'permissions'", ":", "perms", "}", ")", "raise", "HTTPResponseError", "(", "res", ")" ]
Shares a remote file with specified group :param path: path to the remote file to share :param group: name of the group with which we want to share a file/folder :param perms (optional): permissions of the shared object defaults to read only (1) http://doc.owncloud.org/server/6.0/admin_manual/sharing_api/index.html :returns: instance of :class:`ShareInfo` with the share info or False if the operation failed :raises: HTTPResponseError in case an HTTP error status was returned
[ "Shares", "a", "remote", "file", "with", "specified", "group" ]
b9e1f04cdbde74588e86f1bebb6144571d82966c
https://github.com/owncloud/pyocclient/blob/b9e1f04cdbde74588e86f1bebb6144571d82966c/owncloud/owncloud.py#L1398-L1438
train
owncloud/pyocclient
owncloud/owncloud.py
Client.get_attribute
def get_attribute(self, app=None, key=None): """Returns an application attribute :param app: application id :param key: attribute key or None to retrieve all values for the given application :returns: attribute value if key was specified, or an array of tuples (key, value) for each attribute :raises: HTTPResponseError in case an HTTP error status was returned """ path = 'getattribute' if app is not None: path += '/' + parse.quote(app, '') if key is not None: path += '/' + parse.quote(self._encode_string(key), '') res = self._make_ocs_request( 'GET', self.OCS_SERVICE_PRIVATEDATA, path ) if res.status_code == 200: tree = ET.fromstring(res.content) self._check_ocs_status(tree) values = [] for element in tree.find('data').iter('element'): app_text = element.find('app').text key_text = element.find('key').text value_text = element.find('value').text or '' if key is None: if app is None: values.append((app_text, key_text, value_text)) else: values.append((key_text, value_text)) else: return value_text if len(values) == 0 and key is not None: return None return values raise HTTPResponseError(res)
python
def get_attribute(self, app=None, key=None): """Returns an application attribute :param app: application id :param key: attribute key or None to retrieve all values for the given application :returns: attribute value if key was specified, or an array of tuples (key, value) for each attribute :raises: HTTPResponseError in case an HTTP error status was returned """ path = 'getattribute' if app is not None: path += '/' + parse.quote(app, '') if key is not None: path += '/' + parse.quote(self._encode_string(key), '') res = self._make_ocs_request( 'GET', self.OCS_SERVICE_PRIVATEDATA, path ) if res.status_code == 200: tree = ET.fromstring(res.content) self._check_ocs_status(tree) values = [] for element in tree.find('data').iter('element'): app_text = element.find('app').text key_text = element.find('key').text value_text = element.find('value').text or '' if key is None: if app is None: values.append((app_text, key_text, value_text)) else: values.append((key_text, value_text)) else: return value_text if len(values) == 0 and key is not None: return None return values raise HTTPResponseError(res)
[ "def", "get_attribute", "(", "self", ",", "app", "=", "None", ",", "key", "=", "None", ")", ":", "path", "=", "'getattribute'", "if", "app", "is", "not", "None", ":", "path", "+=", "'/'", "+", "parse", ".", "quote", "(", "app", ",", "''", ")", "if", "key", "is", "not", "None", ":", "path", "+=", "'/'", "+", "parse", ".", "quote", "(", "self", ".", "_encode_string", "(", "key", ")", ",", "''", ")", "res", "=", "self", ".", "_make_ocs_request", "(", "'GET'", ",", "self", ".", "OCS_SERVICE_PRIVATEDATA", ",", "path", ")", "if", "res", ".", "status_code", "==", "200", ":", "tree", "=", "ET", ".", "fromstring", "(", "res", ".", "content", ")", "self", ".", "_check_ocs_status", "(", "tree", ")", "values", "=", "[", "]", "for", "element", "in", "tree", ".", "find", "(", "'data'", ")", ".", "iter", "(", "'element'", ")", ":", "app_text", "=", "element", ".", "find", "(", "'app'", ")", ".", "text", "key_text", "=", "element", ".", "find", "(", "'key'", ")", ".", "text", "value_text", "=", "element", ".", "find", "(", "'value'", ")", ".", "text", "or", "''", "if", "key", "is", "None", ":", "if", "app", "is", "None", ":", "values", ".", "append", "(", "(", "app_text", ",", "key_text", ",", "value_text", ")", ")", "else", ":", "values", ".", "append", "(", "(", "key_text", ",", "value_text", ")", ")", "else", ":", "return", "value_text", "if", "len", "(", "values", ")", "==", "0", "and", "key", "is", "not", "None", ":", "return", "None", "return", "values", "raise", "HTTPResponseError", "(", "res", ")" ]
Returns an application attribute :param app: application id :param key: attribute key or None to retrieve all values for the given application :returns: attribute value if key was specified, or an array of tuples (key, value) for each attribute :raises: HTTPResponseError in case an HTTP error status was returned
[ "Returns", "an", "application", "attribute" ]
b9e1f04cdbde74588e86f1bebb6144571d82966c
https://github.com/owncloud/pyocclient/blob/b9e1f04cdbde74588e86f1bebb6144571d82966c/owncloud/owncloud.py#L1469-L1508
train
owncloud/pyocclient
owncloud/owncloud.py
Client.set_attribute
def set_attribute(self, app, key, value): """Sets an application attribute :param app: application id :param key: key of the attribute to set :param value: value to set :returns: True if the operation succeeded, False otherwise :raises: HTTPResponseError in case an HTTP error status was returned """ path = 'setattribute/' + parse.quote(app, '') + '/' + parse.quote( self._encode_string(key), '') res = self._make_ocs_request( 'POST', self.OCS_SERVICE_PRIVATEDATA, path, data={'value': self._encode_string(value)} ) if res.status_code == 200: tree = ET.fromstring(res.content) self._check_ocs_status(tree) return True raise HTTPResponseError(res)
python
def set_attribute(self, app, key, value): """Sets an application attribute :param app: application id :param key: key of the attribute to set :param value: value to set :returns: True if the operation succeeded, False otherwise :raises: HTTPResponseError in case an HTTP error status was returned """ path = 'setattribute/' + parse.quote(app, '') + '/' + parse.quote( self._encode_string(key), '') res = self._make_ocs_request( 'POST', self.OCS_SERVICE_PRIVATEDATA, path, data={'value': self._encode_string(value)} ) if res.status_code == 200: tree = ET.fromstring(res.content) self._check_ocs_status(tree) return True raise HTTPResponseError(res)
[ "def", "set_attribute", "(", "self", ",", "app", ",", "key", ",", "value", ")", ":", "path", "=", "'setattribute/'", "+", "parse", ".", "quote", "(", "app", ",", "''", ")", "+", "'/'", "+", "parse", ".", "quote", "(", "self", ".", "_encode_string", "(", "key", ")", ",", "''", ")", "res", "=", "self", ".", "_make_ocs_request", "(", "'POST'", ",", "self", ".", "OCS_SERVICE_PRIVATEDATA", ",", "path", ",", "data", "=", "{", "'value'", ":", "self", ".", "_encode_string", "(", "value", ")", "}", ")", "if", "res", ".", "status_code", "==", "200", ":", "tree", "=", "ET", ".", "fromstring", "(", "res", ".", "content", ")", "self", ".", "_check_ocs_status", "(", "tree", ")", "return", "True", "raise", "HTTPResponseError", "(", "res", ")" ]
Sets an application attribute :param app: application id :param key: key of the attribute to set :param value: value to set :returns: True if the operation succeeded, False otherwise :raises: HTTPResponseError in case an HTTP error status was returned
[ "Sets", "an", "application", "attribute" ]
b9e1f04cdbde74588e86f1bebb6144571d82966c
https://github.com/owncloud/pyocclient/blob/b9e1f04cdbde74588e86f1bebb6144571d82966c/owncloud/owncloud.py#L1510-L1531
train
owncloud/pyocclient
owncloud/owncloud.py
Client.get_apps
def get_apps(self): """ List all enabled apps through the provisioning api. :returns: a dict of apps, with values True/False, representing the enabled state. :raises: HTTPResponseError in case an HTTP error status was returned """ ena_apps = {} res = self._make_ocs_request('GET', self.OCS_SERVICE_CLOUD, 'apps') if res.status_code != 200: raise HTTPResponseError(res) tree = ET.fromstring(res.content) self._check_ocs_status(tree) # <data><apps><element>files</element><element>activity</element> ... for el in tree.findall('data/apps/element'): ena_apps[el.text] = False res = self._make_ocs_request('GET', self.OCS_SERVICE_CLOUD, 'apps?filter=enabled') if res.status_code != 200: raise HTTPResponseError(res) tree = ET.fromstring(res.content) self._check_ocs_status(tree) for el in tree.findall('data/apps/element'): ena_apps[el.text] = True return ena_apps
python
def get_apps(self): """ List all enabled apps through the provisioning api. :returns: a dict of apps, with values True/False, representing the enabled state. :raises: HTTPResponseError in case an HTTP error status was returned """ ena_apps = {} res = self._make_ocs_request('GET', self.OCS_SERVICE_CLOUD, 'apps') if res.status_code != 200: raise HTTPResponseError(res) tree = ET.fromstring(res.content) self._check_ocs_status(tree) # <data><apps><element>files</element><element>activity</element> ... for el in tree.findall('data/apps/element'): ena_apps[el.text] = False res = self._make_ocs_request('GET', self.OCS_SERVICE_CLOUD, 'apps?filter=enabled') if res.status_code != 200: raise HTTPResponseError(res) tree = ET.fromstring(res.content) self._check_ocs_status(tree) for el in tree.findall('data/apps/element'): ena_apps[el.text] = True return ena_apps
[ "def", "get_apps", "(", "self", ")", ":", "ena_apps", "=", "{", "}", "res", "=", "self", ".", "_make_ocs_request", "(", "'GET'", ",", "self", ".", "OCS_SERVICE_CLOUD", ",", "'apps'", ")", "if", "res", ".", "status_code", "!=", "200", ":", "raise", "HTTPResponseError", "(", "res", ")", "tree", "=", "ET", ".", "fromstring", "(", "res", ".", "content", ")", "self", ".", "_check_ocs_status", "(", "tree", ")", "# <data><apps><element>files</element><element>activity</element> ...", "for", "el", "in", "tree", ".", "findall", "(", "'data/apps/element'", ")", ":", "ena_apps", "[", "el", ".", "text", "]", "=", "False", "res", "=", "self", ".", "_make_ocs_request", "(", "'GET'", ",", "self", ".", "OCS_SERVICE_CLOUD", ",", "'apps?filter=enabled'", ")", "if", "res", ".", "status_code", "!=", "200", ":", "raise", "HTTPResponseError", "(", "res", ")", "tree", "=", "ET", ".", "fromstring", "(", "res", ".", "content", ")", "self", ".", "_check_ocs_status", "(", "tree", ")", "for", "el", "in", "tree", ".", "findall", "(", "'data/apps/element'", ")", ":", "ena_apps", "[", "el", ".", "text", "]", "=", "True", "return", "ena_apps" ]
List all enabled apps through the provisioning api. :returns: a dict of apps, with values True/False, representing the enabled state. :raises: HTTPResponseError in case an HTTP error status was returned
[ "List", "all", "enabled", "apps", "through", "the", "provisioning", "api", "." ]
b9e1f04cdbde74588e86f1bebb6144571d82966c
https://github.com/owncloud/pyocclient/blob/b9e1f04cdbde74588e86f1bebb6144571d82966c/owncloud/owncloud.py#L1554-L1580
train
owncloud/pyocclient
owncloud/owncloud.py
Client.enable_app
def enable_app(self, appname): """Enable an app through provisioning_api :param appname: Name of app to be enabled :returns: True if the operation succeeded, False otherwise :raises: HTTPResponseError in case an HTTP error status was returned """ res = self._make_ocs_request('POST', self.OCS_SERVICE_CLOUD, 'apps/' + appname) if res.status_code == 200: return True raise HTTPResponseError(res)
python
def enable_app(self, appname): """Enable an app through provisioning_api :param appname: Name of app to be enabled :returns: True if the operation succeeded, False otherwise :raises: HTTPResponseError in case an HTTP error status was returned """ res = self._make_ocs_request('POST', self.OCS_SERVICE_CLOUD, 'apps/' + appname) if res.status_code == 200: return True raise HTTPResponseError(res)
[ "def", "enable_app", "(", "self", ",", "appname", ")", ":", "res", "=", "self", ".", "_make_ocs_request", "(", "'POST'", ",", "self", ".", "OCS_SERVICE_CLOUD", ",", "'apps/'", "+", "appname", ")", "if", "res", ".", "status_code", "==", "200", ":", "return", "True", "raise", "HTTPResponseError", "(", "res", ")" ]
Enable an app through provisioning_api :param appname: Name of app to be enabled :returns: True if the operation succeeded, False otherwise :raises: HTTPResponseError in case an HTTP error status was returned
[ "Enable", "an", "app", "through", "provisioning_api" ]
b9e1f04cdbde74588e86f1bebb6144571d82966c
https://github.com/owncloud/pyocclient/blob/b9e1f04cdbde74588e86f1bebb6144571d82966c/owncloud/owncloud.py#L1601-L1614
train
owncloud/pyocclient
owncloud/owncloud.py
Client._encode_string
def _encode_string(s): """Encodes a unicode instance to utf-8. If a str is passed it will simply be returned :param s: str or unicode to encode :returns: encoded output as str """ if six.PY2 and isinstance(s, unicode): return s.encode('utf-8') return s
python
def _encode_string(s): """Encodes a unicode instance to utf-8. If a str is passed it will simply be returned :param s: str or unicode to encode :returns: encoded output as str """ if six.PY2 and isinstance(s, unicode): return s.encode('utf-8') return s
[ "def", "_encode_string", "(", "s", ")", ":", "if", "six", ".", "PY2", "and", "isinstance", "(", "s", ",", "unicode", ")", ":", "return", "s", ".", "encode", "(", "'utf-8'", ")", "return", "s" ]
Encodes a unicode instance to utf-8. If a str is passed it will simply be returned :param s: str or unicode to encode :returns: encoded output as str
[ "Encodes", "a", "unicode", "instance", "to", "utf", "-", "8", ".", "If", "a", "str", "is", "passed", "it", "will", "simply", "be", "returned" ]
b9e1f04cdbde74588e86f1bebb6144571d82966c
https://github.com/owncloud/pyocclient/blob/b9e1f04cdbde74588e86f1bebb6144571d82966c/owncloud/owncloud.py#L1644-L1653
train
owncloud/pyocclient
owncloud/owncloud.py
Client._check_ocs_status
def _check_ocs_status(tree, accepted_codes=[100]): """Checks the status code of an OCS request :param tree: response parsed with elementtree :param accepted_codes: list of statuscodes we consider good. E.g. [100,102] can be used to accept a POST returning an 'already exists' condition :raises: HTTPResponseError if the http status is not 200, or OCSResponseError if the OCS status is not one of the accepted_codes. """ code_el = tree.find('meta/statuscode') if code_el is not None and int(code_el.text) not in accepted_codes: r = requests.Response() msg_el = tree.find('meta/message') if msg_el is None: msg_el = tree # fallback to the entire ocs response, if we find no message. r._content = ET.tostring(msg_el) r.status_code = int(code_el.text) raise OCSResponseError(r)
python
def _check_ocs_status(tree, accepted_codes=[100]): """Checks the status code of an OCS request :param tree: response parsed with elementtree :param accepted_codes: list of statuscodes we consider good. E.g. [100,102] can be used to accept a POST returning an 'already exists' condition :raises: HTTPResponseError if the http status is not 200, or OCSResponseError if the OCS status is not one of the accepted_codes. """ code_el = tree.find('meta/statuscode') if code_el is not None and int(code_el.text) not in accepted_codes: r = requests.Response() msg_el = tree.find('meta/message') if msg_el is None: msg_el = tree # fallback to the entire ocs response, if we find no message. r._content = ET.tostring(msg_el) r.status_code = int(code_el.text) raise OCSResponseError(r)
[ "def", "_check_ocs_status", "(", "tree", ",", "accepted_codes", "=", "[", "100", "]", ")", ":", "code_el", "=", "tree", ".", "find", "(", "'meta/statuscode'", ")", "if", "code_el", "is", "not", "None", "and", "int", "(", "code_el", ".", "text", ")", "not", "in", "accepted_codes", ":", "r", "=", "requests", ".", "Response", "(", ")", "msg_el", "=", "tree", ".", "find", "(", "'meta/message'", ")", "if", "msg_el", "is", "None", ":", "msg_el", "=", "tree", "# fallback to the entire ocs response, if we find no message.", "r", ".", "_content", "=", "ET", ".", "tostring", "(", "msg_el", ")", "r", ".", "status_code", "=", "int", "(", "code_el", ".", "text", ")", "raise", "OCSResponseError", "(", "r", ")" ]
Checks the status code of an OCS request :param tree: response parsed with elementtree :param accepted_codes: list of statuscodes we consider good. E.g. [100,102] can be used to accept a POST returning an 'already exists' condition :raises: HTTPResponseError if the http status is not 200, or OCSResponseError if the OCS status is not one of the accepted_codes.
[ "Checks", "the", "status", "code", "of", "an", "OCS", "request" ]
b9e1f04cdbde74588e86f1bebb6144571d82966c
https://github.com/owncloud/pyocclient/blob/b9e1f04cdbde74588e86f1bebb6144571d82966c/owncloud/owncloud.py#L1656-L1672
train
owncloud/pyocclient
owncloud/owncloud.py
Client.make_ocs_request
def make_ocs_request(self, method, service, action, **kwargs): """Makes a OCS API request and analyses the response :param method: HTTP method :param service: service name :param action: action path :param \*\*kwargs: optional arguments that ``requests.Request.request`` accepts :returns :class:`requests.Response` instance """ accepted_codes = kwargs.pop('accepted_codes', [100]) res = self._make_ocs_request(method, service, action, **kwargs) if res.status_code == 200: tree = ET.fromstring(res.content) self._check_ocs_status(tree, accepted_codes=accepted_codes) return res raise OCSResponseError(res)
python
def make_ocs_request(self, method, service, action, **kwargs): """Makes a OCS API request and analyses the response :param method: HTTP method :param service: service name :param action: action path :param \*\*kwargs: optional arguments that ``requests.Request.request`` accepts :returns :class:`requests.Response` instance """ accepted_codes = kwargs.pop('accepted_codes', [100]) res = self._make_ocs_request(method, service, action, **kwargs) if res.status_code == 200: tree = ET.fromstring(res.content) self._check_ocs_status(tree, accepted_codes=accepted_codes) return res raise OCSResponseError(res)
[ "def", "make_ocs_request", "(", "self", ",", "method", ",", "service", ",", "action", ",", "*", "*", "kwargs", ")", ":", "accepted_codes", "=", "kwargs", ".", "pop", "(", "'accepted_codes'", ",", "[", "100", "]", ")", "res", "=", "self", ".", "_make_ocs_request", "(", "method", ",", "service", ",", "action", ",", "*", "*", "kwargs", ")", "if", "res", ".", "status_code", "==", "200", ":", "tree", "=", "ET", ".", "fromstring", "(", "res", ".", "content", ")", "self", ".", "_check_ocs_status", "(", "tree", ",", "accepted_codes", "=", "accepted_codes", ")", "return", "res", "raise", "OCSResponseError", "(", "res", ")" ]
Makes a OCS API request and analyses the response :param method: HTTP method :param service: service name :param action: action path :param \*\*kwargs: optional arguments that ``requests.Request.request`` accepts :returns :class:`requests.Response` instance
[ "Makes", "a", "OCS", "API", "request", "and", "analyses", "the", "response" ]
b9e1f04cdbde74588e86f1bebb6144571d82966c
https://github.com/owncloud/pyocclient/blob/b9e1f04cdbde74588e86f1bebb6144571d82966c/owncloud/owncloud.py#L1674-L1692
train
owncloud/pyocclient
owncloud/owncloud.py
Client._make_ocs_request
def _make_ocs_request(self, method, service, action, **kwargs): """Makes a OCS API request :param method: HTTP method :param service: service name :param action: action path :param \*\*kwargs: optional arguments that ``requests.Request.request`` accepts :returns :class:`requests.Response` instance """ slash = '' if service: slash = '/' path = self.OCS_BASEPATH + service + slash + action attributes = kwargs.copy() if 'headers' not in attributes: attributes['headers'] = {} attributes['headers']['OCS-APIREQUEST'] = 'true' if self._debug: print('OCS request: %s %s %s' % (method, self.url + path, attributes)) res = self._session.request(method, self.url + path, **attributes) return res
python
def _make_ocs_request(self, method, service, action, **kwargs): """Makes a OCS API request :param method: HTTP method :param service: service name :param action: action path :param \*\*kwargs: optional arguments that ``requests.Request.request`` accepts :returns :class:`requests.Response` instance """ slash = '' if service: slash = '/' path = self.OCS_BASEPATH + service + slash + action attributes = kwargs.copy() if 'headers' not in attributes: attributes['headers'] = {} attributes['headers']['OCS-APIREQUEST'] = 'true' if self._debug: print('OCS request: %s %s %s' % (method, self.url + path, attributes)) res = self._session.request(method, self.url + path, **attributes) return res
[ "def", "_make_ocs_request", "(", "self", ",", "method", ",", "service", ",", "action", ",", "*", "*", "kwargs", ")", ":", "slash", "=", "''", "if", "service", ":", "slash", "=", "'/'", "path", "=", "self", ".", "OCS_BASEPATH", "+", "service", "+", "slash", "+", "action", "attributes", "=", "kwargs", ".", "copy", "(", ")", "if", "'headers'", "not", "in", "attributes", ":", "attributes", "[", "'headers'", "]", "=", "{", "}", "attributes", "[", "'headers'", "]", "[", "'OCS-APIREQUEST'", "]", "=", "'true'", "if", "self", ".", "_debug", ":", "print", "(", "'OCS request: %s %s %s'", "%", "(", "method", ",", "self", ".", "url", "+", "path", ",", "attributes", ")", ")", "res", "=", "self", ".", "_session", ".", "request", "(", "method", ",", "self", ".", "url", "+", "path", ",", "*", "*", "attributes", ")", "return", "res" ]
Makes a OCS API request :param method: HTTP method :param service: service name :param action: action path :param \*\*kwargs: optional arguments that ``requests.Request.request`` accepts :returns :class:`requests.Response` instance
[ "Makes", "a", "OCS", "API", "request" ]
b9e1f04cdbde74588e86f1bebb6144571d82966c
https://github.com/owncloud/pyocclient/blob/b9e1f04cdbde74588e86f1bebb6144571d82966c/owncloud/owncloud.py#L1694-L1720
train
owncloud/pyocclient
owncloud/owncloud.py
Client._make_dav_request
def _make_dav_request(self, method, path, **kwargs): """Makes a WebDAV request :param method: HTTP method :param path: remote path of the targetted file :param \*\*kwargs: optional arguments that ``requests.Request.request`` accepts :returns array of :class:`FileInfo` if the response contains it, or True if the operation succeded, False if it didn't """ if self._debug: print('DAV request: %s %s' % (method, path)) if kwargs.get('headers'): print('Headers: ', kwargs.get('headers')) path = self._normalize_path(path) res = self._session.request( method, self._webdav_url + parse.quote(self._encode_string(path)), **kwargs ) if self._debug: print('DAV status: %i' % res.status_code) if res.status_code in [200, 207]: return self._parse_dav_response(res) if res.status_code in [204, 201]: return True raise HTTPResponseError(res)
python
def _make_dav_request(self, method, path, **kwargs): """Makes a WebDAV request :param method: HTTP method :param path: remote path of the targetted file :param \*\*kwargs: optional arguments that ``requests.Request.request`` accepts :returns array of :class:`FileInfo` if the response contains it, or True if the operation succeded, False if it didn't """ if self._debug: print('DAV request: %s %s' % (method, path)) if kwargs.get('headers'): print('Headers: ', kwargs.get('headers')) path = self._normalize_path(path) res = self._session.request( method, self._webdav_url + parse.quote(self._encode_string(path)), **kwargs ) if self._debug: print('DAV status: %i' % res.status_code) if res.status_code in [200, 207]: return self._parse_dav_response(res) if res.status_code in [204, 201]: return True raise HTTPResponseError(res)
[ "def", "_make_dav_request", "(", "self", ",", "method", ",", "path", ",", "*", "*", "kwargs", ")", ":", "if", "self", ".", "_debug", ":", "print", "(", "'DAV request: %s %s'", "%", "(", "method", ",", "path", ")", ")", "if", "kwargs", ".", "get", "(", "'headers'", ")", ":", "print", "(", "'Headers: '", ",", "kwargs", ".", "get", "(", "'headers'", ")", ")", "path", "=", "self", ".", "_normalize_path", "(", "path", ")", "res", "=", "self", ".", "_session", ".", "request", "(", "method", ",", "self", ".", "_webdav_url", "+", "parse", ".", "quote", "(", "self", ".", "_encode_string", "(", "path", ")", ")", ",", "*", "*", "kwargs", ")", "if", "self", ".", "_debug", ":", "print", "(", "'DAV status: %i'", "%", "res", ".", "status_code", ")", "if", "res", ".", "status_code", "in", "[", "200", ",", "207", "]", ":", "return", "self", ".", "_parse_dav_response", "(", "res", ")", "if", "res", ".", "status_code", "in", "[", "204", ",", "201", "]", ":", "return", "True", "raise", "HTTPResponseError", "(", "res", ")" ]
Makes a WebDAV request :param method: HTTP method :param path: remote path of the targetted file :param \*\*kwargs: optional arguments that ``requests.Request.request`` accepts :returns array of :class:`FileInfo` if the response contains it, or True if the operation succeded, False if it didn't
[ "Makes", "a", "WebDAV", "request" ]
b9e1f04cdbde74588e86f1bebb6144571d82966c
https://github.com/owncloud/pyocclient/blob/b9e1f04cdbde74588e86f1bebb6144571d82966c/owncloud/owncloud.py#L1722-L1749
train
owncloud/pyocclient
owncloud/owncloud.py
Client._parse_dav_response
def _parse_dav_response(self, res): """Parses the DAV responses from a multi-status response :param res: DAV response :returns array of :class:`FileInfo` or False if the operation did not succeed """ if res.status_code == 207: tree = ET.fromstring(res.content) items = [] for child in tree: items.append(self._parse_dav_element(child)) return items return False
python
def _parse_dav_response(self, res): """Parses the DAV responses from a multi-status response :param res: DAV response :returns array of :class:`FileInfo` or False if the operation did not succeed """ if res.status_code == 207: tree = ET.fromstring(res.content) items = [] for child in tree: items.append(self._parse_dav_element(child)) return items return False
[ "def", "_parse_dav_response", "(", "self", ",", "res", ")", ":", "if", "res", ".", "status_code", "==", "207", ":", "tree", "=", "ET", ".", "fromstring", "(", "res", ".", "content", ")", "items", "=", "[", "]", "for", "child", "in", "tree", ":", "items", ".", "append", "(", "self", ".", "_parse_dav_element", "(", "child", ")", ")", "return", "items", "return", "False" ]
Parses the DAV responses from a multi-status response :param res: DAV response :returns array of :class:`FileInfo` or False if the operation did not succeed
[ "Parses", "the", "DAV", "responses", "from", "a", "multi", "-", "status", "response" ]
b9e1f04cdbde74588e86f1bebb6144571d82966c
https://github.com/owncloud/pyocclient/blob/b9e1f04cdbde74588e86f1bebb6144571d82966c/owncloud/owncloud.py#L1751-L1764
train
owncloud/pyocclient
owncloud/owncloud.py
Client._parse_dav_element
def _parse_dav_element(self, dav_response): """Parses a single DAV element :param dav_response: DAV response :returns :class:`FileInfo` """ href = parse.unquote( self._strip_dav_path(dav_response.find('{DAV:}href').text) ) if six.PY2: href = href.decode('utf-8') file_type = 'file' if href[-1] == '/': file_type = 'dir' file_attrs = {} attrs = dav_response.find('{DAV:}propstat') attrs = attrs.find('{DAV:}prop') for attr in attrs: file_attrs[attr.tag] = attr.text return FileInfo(href, file_type, file_attrs)
python
def _parse_dav_element(self, dav_response): """Parses a single DAV element :param dav_response: DAV response :returns :class:`FileInfo` """ href = parse.unquote( self._strip_dav_path(dav_response.find('{DAV:}href').text) ) if six.PY2: href = href.decode('utf-8') file_type = 'file' if href[-1] == '/': file_type = 'dir' file_attrs = {} attrs = dav_response.find('{DAV:}propstat') attrs = attrs.find('{DAV:}prop') for attr in attrs: file_attrs[attr.tag] = attr.text return FileInfo(href, file_type, file_attrs)
[ "def", "_parse_dav_element", "(", "self", ",", "dav_response", ")", ":", "href", "=", "parse", ".", "unquote", "(", "self", ".", "_strip_dav_path", "(", "dav_response", ".", "find", "(", "'{DAV:}href'", ")", ".", "text", ")", ")", "if", "six", ".", "PY2", ":", "href", "=", "href", ".", "decode", "(", "'utf-8'", ")", "file_type", "=", "'file'", "if", "href", "[", "-", "1", "]", "==", "'/'", ":", "file_type", "=", "'dir'", "file_attrs", "=", "{", "}", "attrs", "=", "dav_response", ".", "find", "(", "'{DAV:}propstat'", ")", "attrs", "=", "attrs", ".", "find", "(", "'{DAV:}prop'", ")", "for", "attr", "in", "attrs", ":", "file_attrs", "[", "attr", ".", "tag", "]", "=", "attr", ".", "text", "return", "FileInfo", "(", "href", ",", "file_type", ",", "file_attrs", ")" ]
Parses a single DAV element :param dav_response: DAV response :returns :class:`FileInfo`
[ "Parses", "a", "single", "DAV", "element" ]
b9e1f04cdbde74588e86f1bebb6144571d82966c
https://github.com/owncloud/pyocclient/blob/b9e1f04cdbde74588e86f1bebb6144571d82966c/owncloud/owncloud.py#L1766-L1789
train
owncloud/pyocclient
owncloud/owncloud.py
Client._webdav_move_copy
def _webdav_move_copy(self, remote_path_source, remote_path_target, operation): """Copies or moves a remote file or directory :param remote_path_source: source file or folder to copy / move :param remote_path_target: target file to which to copy / move :param operation: MOVE or COPY :returns: True if the operation succeeded, False otherwise :raises: HTTPResponseError in case an HTTP error status was returned """ if operation != "MOVE" and operation != "COPY": return False if remote_path_target[-1] == '/': remote_path_target += os.path.basename(remote_path_source) if not (remote_path_target[0] == '/'): remote_path_target = '/' + remote_path_target remote_path_source = self._normalize_path(remote_path_source) headers = { 'Destination': self._webdav_url + parse.quote( self._encode_string(remote_path_target)) } return self._make_dav_request( operation, remote_path_source, headers=headers )
python
def _webdav_move_copy(self, remote_path_source, remote_path_target, operation): """Copies or moves a remote file or directory :param remote_path_source: source file or folder to copy / move :param remote_path_target: target file to which to copy / move :param operation: MOVE or COPY :returns: True if the operation succeeded, False otherwise :raises: HTTPResponseError in case an HTTP error status was returned """ if operation != "MOVE" and operation != "COPY": return False if remote_path_target[-1] == '/': remote_path_target += os.path.basename(remote_path_source) if not (remote_path_target[0] == '/'): remote_path_target = '/' + remote_path_target remote_path_source = self._normalize_path(remote_path_source) headers = { 'Destination': self._webdav_url + parse.quote( self._encode_string(remote_path_target)) } return self._make_dav_request( operation, remote_path_source, headers=headers )
[ "def", "_webdav_move_copy", "(", "self", ",", "remote_path_source", ",", "remote_path_target", ",", "operation", ")", ":", "if", "operation", "!=", "\"MOVE\"", "and", "operation", "!=", "\"COPY\"", ":", "return", "False", "if", "remote_path_target", "[", "-", "1", "]", "==", "'/'", ":", "remote_path_target", "+=", "os", ".", "path", ".", "basename", "(", "remote_path_source", ")", "if", "not", "(", "remote_path_target", "[", "0", "]", "==", "'/'", ")", ":", "remote_path_target", "=", "'/'", "+", "remote_path_target", "remote_path_source", "=", "self", ".", "_normalize_path", "(", "remote_path_source", ")", "headers", "=", "{", "'Destination'", ":", "self", ".", "_webdav_url", "+", "parse", ".", "quote", "(", "self", ".", "_encode_string", "(", "remote_path_target", ")", ")", "}", "return", "self", ".", "_make_dav_request", "(", "operation", ",", "remote_path_source", ",", "headers", "=", "headers", ")" ]
Copies or moves a remote file or directory :param remote_path_source: source file or folder to copy / move :param remote_path_target: target file to which to copy / move :param operation: MOVE or COPY :returns: True if the operation succeeded, False otherwise :raises: HTTPResponseError in case an HTTP error status was returned
[ "Copies", "or", "moves", "a", "remote", "file", "or", "directory" ]
b9e1f04cdbde74588e86f1bebb6144571d82966c
https://github.com/owncloud/pyocclient/blob/b9e1f04cdbde74588e86f1bebb6144571d82966c/owncloud/owncloud.py#L1801-L1832
train
owncloud/pyocclient
owncloud/owncloud.py
Client._xml_to_dict
def _xml_to_dict(self, element): """ Take an XML element, iterate over it and build a dict :param element: An xml.etree.ElementTree.Element, or a list of the same :returns: A dictionary """ return_dict = {} for el in element: return_dict[el.tag] = None children = el.getchildren() if children: return_dict[el.tag] = self._xml_to_dict(children) else: return_dict[el.tag] = el.text return return_dict
python
def _xml_to_dict(self, element): """ Take an XML element, iterate over it and build a dict :param element: An xml.etree.ElementTree.Element, or a list of the same :returns: A dictionary """ return_dict = {} for el in element: return_dict[el.tag] = None children = el.getchildren() if children: return_dict[el.tag] = self._xml_to_dict(children) else: return_dict[el.tag] = el.text return return_dict
[ "def", "_xml_to_dict", "(", "self", ",", "element", ")", ":", "return_dict", "=", "{", "}", "for", "el", "in", "element", ":", "return_dict", "[", "el", ".", "tag", "]", "=", "None", "children", "=", "el", ".", "getchildren", "(", ")", "if", "children", ":", "return_dict", "[", "el", ".", "tag", "]", "=", "self", ".", "_xml_to_dict", "(", "children", ")", "else", ":", "return_dict", "[", "el", ".", "tag", "]", "=", "el", ".", "text", "return", "return_dict" ]
Take an XML element, iterate over it and build a dict :param element: An xml.etree.ElementTree.Element, or a list of the same :returns: A dictionary
[ "Take", "an", "XML", "element", "iterate", "over", "it", "and", "build", "a", "dict" ]
b9e1f04cdbde74588e86f1bebb6144571d82966c
https://github.com/owncloud/pyocclient/blob/b9e1f04cdbde74588e86f1bebb6144571d82966c/owncloud/owncloud.py#L1834-L1849
train
owncloud/pyocclient
owncloud/owncloud.py
Client._get_shareinfo
def _get_shareinfo(self, data_el): """Simple helper which returns instance of ShareInfo class :param data_el: 'data' element extracted from _make_ocs_request :returns: instance of ShareInfo class """ if (data_el is None) or not (isinstance(data_el, ET.Element)): return None return ShareInfo(self._xml_to_dict(data_el))
python
def _get_shareinfo(self, data_el): """Simple helper which returns instance of ShareInfo class :param data_el: 'data' element extracted from _make_ocs_request :returns: instance of ShareInfo class """ if (data_el is None) or not (isinstance(data_el, ET.Element)): return None return ShareInfo(self._xml_to_dict(data_el))
[ "def", "_get_shareinfo", "(", "self", ",", "data_el", ")", ":", "if", "(", "data_el", "is", "None", ")", "or", "not", "(", "isinstance", "(", "data_el", ",", "ET", ".", "Element", ")", ")", ":", "return", "None", "return", "ShareInfo", "(", "self", ".", "_xml_to_dict", "(", "data_el", ")", ")" ]
Simple helper which returns instance of ShareInfo class :param data_el: 'data' element extracted from _make_ocs_request :returns: instance of ShareInfo class
[ "Simple", "helper", "which", "returns", "instance", "of", "ShareInfo", "class" ]
b9e1f04cdbde74588e86f1bebb6144571d82966c
https://github.com/owncloud/pyocclient/blob/b9e1f04cdbde74588e86f1bebb6144571d82966c/owncloud/owncloud.py#L1851-L1859
train
dgovil/PySignal
PySignal.py
Signal.emit
def emit(self, *args, **kwargs): """ Calls all the connected slots with the provided args and kwargs unless block is activated """ if self._block: return for slot in self._slots: if not slot: continue elif isinstance(slot, partial): slot() elif isinstance(slot, weakref.WeakKeyDictionary): # For class methods, get the class object and call the method accordingly. for obj, method in slot.items(): method(obj, *args, **kwargs) elif isinstance(slot, weakref.ref): # If it's a weakref, call the ref to get the instance and then call the func # Don't wrap in try/except so we don't risk masking exceptions from the actual func call if (slot() is not None): slot()(*args, **kwargs) else: # Else call it in a standard way. Should be just lambdas at this point slot(*args, **kwargs)
python
def emit(self, *args, **kwargs): """ Calls all the connected slots with the provided args and kwargs unless block is activated """ if self._block: return for slot in self._slots: if not slot: continue elif isinstance(slot, partial): slot() elif isinstance(slot, weakref.WeakKeyDictionary): # For class methods, get the class object and call the method accordingly. for obj, method in slot.items(): method(obj, *args, **kwargs) elif isinstance(slot, weakref.ref): # If it's a weakref, call the ref to get the instance and then call the func # Don't wrap in try/except so we don't risk masking exceptions from the actual func call if (slot() is not None): slot()(*args, **kwargs) else: # Else call it in a standard way. Should be just lambdas at this point slot(*args, **kwargs)
[ "def", "emit", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "self", ".", "_block", ":", "return", "for", "slot", "in", "self", ".", "_slots", ":", "if", "not", "slot", ":", "continue", "elif", "isinstance", "(", "slot", ",", "partial", ")", ":", "slot", "(", ")", "elif", "isinstance", "(", "slot", ",", "weakref", ".", "WeakKeyDictionary", ")", ":", "# For class methods, get the class object and call the method accordingly.", "for", "obj", ",", "method", "in", "slot", ".", "items", "(", ")", ":", "method", "(", "obj", ",", "*", "args", ",", "*", "*", "kwargs", ")", "elif", "isinstance", "(", "slot", ",", "weakref", ".", "ref", ")", ":", "# If it's a weakref, call the ref to get the instance and then call the func", "# Don't wrap in try/except so we don't risk masking exceptions from the actual func call", "if", "(", "slot", "(", ")", "is", "not", "None", ")", ":", "slot", "(", ")", "(", "*", "args", ",", "*", "*", "kwargs", ")", "else", ":", "# Else call it in a standard way. Should be just lambdas at this point", "slot", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
Calls all the connected slots with the provided args and kwargs unless block is activated
[ "Calls", "all", "the", "connected", "slots", "with", "the", "provided", "args", "and", "kwargs", "unless", "block", "is", "activated" ]
72f4ced949f81e5438bd8f15247ef7890e8cc5ff
https://github.com/dgovil/PySignal/blob/72f4ced949f81e5438bd8f15247ef7890e8cc5ff/PySignal.py#L25-L49
train
dgovil/PySignal
PySignal.py
Signal.connect
def connect(self, slot): """ Connects the signal to any callable object """ if not callable(slot): raise ValueError("Connection to non-callable '%s' object failed" % slot.__class__.__name__) if (isinstance(slot, partial) or '<' in slot.__name__): # If it's a partial or a lambda. The '<' check is the only py2 and py3 compatible way I could find if slot not in self._slots: self._slots.append(slot) elif inspect.ismethod(slot): # Check if it's an instance method and store it with the instance as the key slotSelf = slot.__self__ slotDict = weakref.WeakKeyDictionary() slotDict[slotSelf] = slot.__func__ if slotDict not in self._slots: self._slots.append(slotDict) else: # If it's just a function then just store it as a weakref. newSlotRef = weakref.ref(slot) if newSlotRef not in self._slots: self._slots.append(newSlotRef)
python
def connect(self, slot): """ Connects the signal to any callable object """ if not callable(slot): raise ValueError("Connection to non-callable '%s' object failed" % slot.__class__.__name__) if (isinstance(slot, partial) or '<' in slot.__name__): # If it's a partial or a lambda. The '<' check is the only py2 and py3 compatible way I could find if slot not in self._slots: self._slots.append(slot) elif inspect.ismethod(slot): # Check if it's an instance method and store it with the instance as the key slotSelf = slot.__self__ slotDict = weakref.WeakKeyDictionary() slotDict[slotSelf] = slot.__func__ if slotDict not in self._slots: self._slots.append(slotDict) else: # If it's just a function then just store it as a weakref. newSlotRef = weakref.ref(slot) if newSlotRef not in self._slots: self._slots.append(newSlotRef)
[ "def", "connect", "(", "self", ",", "slot", ")", ":", "if", "not", "callable", "(", "slot", ")", ":", "raise", "ValueError", "(", "\"Connection to non-callable '%s' object failed\"", "%", "slot", ".", "__class__", ".", "__name__", ")", "if", "(", "isinstance", "(", "slot", ",", "partial", ")", "or", "'<'", "in", "slot", ".", "__name__", ")", ":", "# If it's a partial or a lambda. The '<' check is the only py2 and py3 compatible way I could find", "if", "slot", "not", "in", "self", ".", "_slots", ":", "self", ".", "_slots", ".", "append", "(", "slot", ")", "elif", "inspect", ".", "ismethod", "(", "slot", ")", ":", "# Check if it's an instance method and store it with the instance as the key", "slotSelf", "=", "slot", ".", "__self__", "slotDict", "=", "weakref", ".", "WeakKeyDictionary", "(", ")", "slotDict", "[", "slotSelf", "]", "=", "slot", ".", "__func__", "if", "slotDict", "not", "in", "self", ".", "_slots", ":", "self", ".", "_slots", ".", "append", "(", "slotDict", ")", "else", ":", "# If it's just a function then just store it as a weakref.", "newSlotRef", "=", "weakref", ".", "ref", "(", "slot", ")", "if", "newSlotRef", "not", "in", "self", ".", "_slots", ":", "self", ".", "_slots", ".", "append", "(", "newSlotRef", ")" ]
Connects the signal to any callable object
[ "Connects", "the", "signal", "to", "any", "callable", "object" ]
72f4ced949f81e5438bd8f15247ef7890e8cc5ff
https://github.com/dgovil/PySignal/blob/72f4ced949f81e5438bd8f15247ef7890e8cc5ff/PySignal.py#L51-L73
train
dgovil/PySignal
PySignal.py
Signal.disconnect
def disconnect(self, slot): """ Disconnects the slot from the signal """ if not callable(slot): return if inspect.ismethod(slot): # If it's a method, then find it by its instance slotSelf = slot.__self__ for s in self._slots: if isinstance(s, weakref.WeakKeyDictionary) and (slotSelf in s) and (s[slotSelf] is slot.__func__): self._slots.remove(s) break elif isinstance(slot, partial) or '<' in slot.__name__: # If it's a partial or lambda, try to remove directly try: self._slots.remove(slot) except ValueError: pass else: # It's probably a function, so try to remove by weakref try: self._slots.remove(weakref.ref(slot)) except ValueError: pass
python
def disconnect(self, slot): """ Disconnects the slot from the signal """ if not callable(slot): return if inspect.ismethod(slot): # If it's a method, then find it by its instance slotSelf = slot.__self__ for s in self._slots: if isinstance(s, weakref.WeakKeyDictionary) and (slotSelf in s) and (s[slotSelf] is slot.__func__): self._slots.remove(s) break elif isinstance(slot, partial) or '<' in slot.__name__: # If it's a partial or lambda, try to remove directly try: self._slots.remove(slot) except ValueError: pass else: # It's probably a function, so try to remove by weakref try: self._slots.remove(weakref.ref(slot)) except ValueError: pass
[ "def", "disconnect", "(", "self", ",", "slot", ")", ":", "if", "not", "callable", "(", "slot", ")", ":", "return", "if", "inspect", ".", "ismethod", "(", "slot", ")", ":", "# If it's a method, then find it by its instance", "slotSelf", "=", "slot", ".", "__self__", "for", "s", "in", "self", ".", "_slots", ":", "if", "isinstance", "(", "s", ",", "weakref", ".", "WeakKeyDictionary", ")", "and", "(", "slotSelf", "in", "s", ")", "and", "(", "s", "[", "slotSelf", "]", "is", "slot", ".", "__func__", ")", ":", "self", ".", "_slots", ".", "remove", "(", "s", ")", "break", "elif", "isinstance", "(", "slot", ",", "partial", ")", "or", "'<'", "in", "slot", ".", "__name__", ":", "# If it's a partial or lambda, try to remove directly", "try", ":", "self", ".", "_slots", ".", "remove", "(", "slot", ")", "except", "ValueError", ":", "pass", "else", ":", "# It's probably a function, so try to remove by weakref", "try", ":", "self", ".", "_slots", ".", "remove", "(", "weakref", ".", "ref", "(", "slot", ")", ")", "except", "ValueError", ":", "pass" ]
Disconnects the slot from the signal
[ "Disconnects", "the", "slot", "from", "the", "signal" ]
72f4ced949f81e5438bd8f15247ef7890e8cc5ff
https://github.com/dgovil/PySignal/blob/72f4ced949f81e5438bd8f15247ef7890e8cc5ff/PySignal.py#L75-L100
train
dgovil/PySignal
PySignal.py
SignalFactory.block
def block(self, signals=None, isBlocked=True): """ Sets the block on any provided signals, or to all signals :param signals: defaults to all signals. Accepts either a single string or a list of strings :param isBlocked: the state to set the signal to """ if signals: try: if isinstance(signals, basestring): signals = [signals] except NameError: if isinstance(signals, str): signals = [signals] signals = signals or self.keys() for signal in signals: if signal not in self: raise RuntimeError("Could not find signal matching %s" % signal) self[signal].block(isBlocked)
python
def block(self, signals=None, isBlocked=True): """ Sets the block on any provided signals, or to all signals :param signals: defaults to all signals. Accepts either a single string or a list of strings :param isBlocked: the state to set the signal to """ if signals: try: if isinstance(signals, basestring): signals = [signals] except NameError: if isinstance(signals, str): signals = [signals] signals = signals or self.keys() for signal in signals: if signal not in self: raise RuntimeError("Could not find signal matching %s" % signal) self[signal].block(isBlocked)
[ "def", "block", "(", "self", ",", "signals", "=", "None", ",", "isBlocked", "=", "True", ")", ":", "if", "signals", ":", "try", ":", "if", "isinstance", "(", "signals", ",", "basestring", ")", ":", "signals", "=", "[", "signals", "]", "except", "NameError", ":", "if", "isinstance", "(", "signals", ",", "str", ")", ":", "signals", "=", "[", "signals", "]", "signals", "=", "signals", "or", "self", ".", "keys", "(", ")", "for", "signal", "in", "signals", ":", "if", "signal", "not", "in", "self", ":", "raise", "RuntimeError", "(", "\"Could not find signal matching %s\"", "%", "signal", ")", "self", "[", "signal", "]", ".", "block", "(", "isBlocked", ")" ]
Sets the block on any provided signals, or to all signals :param signals: defaults to all signals. Accepts either a single string or a list of strings :param isBlocked: the state to set the signal to
[ "Sets", "the", "block", "on", "any", "provided", "signals", "or", "to", "all", "signals" ]
72f4ced949f81e5438bd8f15247ef7890e8cc5ff
https://github.com/dgovil/PySignal/blob/72f4ced949f81e5438bd8f15247ef7890e8cc5ff/PySignal.py#L167-L187
train
craffel/mir_eval
mir_eval/io.py
_open
def _open(file_or_str, **kwargs): '''Either open a file handle, or use an existing file-like object. This will behave as the `open` function if `file_or_str` is a string. If `file_or_str` has the `read` attribute, it will return `file_or_str`. Otherwise, an `IOError` is raised. ''' if hasattr(file_or_str, 'read'): yield file_or_str elif isinstance(file_or_str, six.string_types): with open(file_or_str, **kwargs) as file_desc: yield file_desc else: raise IOError('Invalid file-or-str object: {}'.format(file_or_str))
python
def _open(file_or_str, **kwargs): '''Either open a file handle, or use an existing file-like object. This will behave as the `open` function if `file_or_str` is a string. If `file_or_str` has the `read` attribute, it will return `file_or_str`. Otherwise, an `IOError` is raised. ''' if hasattr(file_or_str, 'read'): yield file_or_str elif isinstance(file_or_str, six.string_types): with open(file_or_str, **kwargs) as file_desc: yield file_desc else: raise IOError('Invalid file-or-str object: {}'.format(file_or_str))
[ "def", "_open", "(", "file_or_str", ",", "*", "*", "kwargs", ")", ":", "if", "hasattr", "(", "file_or_str", ",", "'read'", ")", ":", "yield", "file_or_str", "elif", "isinstance", "(", "file_or_str", ",", "six", ".", "string_types", ")", ":", "with", "open", "(", "file_or_str", ",", "*", "*", "kwargs", ")", "as", "file_desc", ":", "yield", "file_desc", "else", ":", "raise", "IOError", "(", "'Invalid file-or-str object: {}'", ".", "format", "(", "file_or_str", ")", ")" ]
Either open a file handle, or use an existing file-like object. This will behave as the `open` function if `file_or_str` is a string. If `file_or_str` has the `read` attribute, it will return `file_or_str`. Otherwise, an `IOError` is raised.
[ "Either", "open", "a", "file", "handle", "or", "use", "an", "existing", "file", "-", "like", "object", "." ]
f41c8dafaea04b411252a516d1965af43c7d531b
https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/io.py#L18-L33
train
craffel/mir_eval
mir_eval/io.py
load_delimited
def load_delimited(filename, converters, delimiter=r'\s+'): r"""Utility function for loading in data from an annotation file where columns are delimited. The number of columns is inferred from the length of the provided converters list. Examples -------- >>> # Load in a one-column list of event times (floats) >>> load_delimited('events.txt', [float]) >>> # Load in a list of labeled events, separated by commas >>> load_delimited('labeled_events.csv', [float, str], ',') Parameters ---------- filename : str Path to the annotation file converters : list of functions Each entry in column ``n`` of the file will be cast by the function ``converters[n]``. delimiter : str Separator regular expression. By default, lines will be split by any amount of whitespace. Returns ------- columns : tuple of lists Each list in this tuple corresponds to values in one of the columns in the file. """ # Initialize list of empty lists n_columns = len(converters) columns = tuple(list() for _ in range(n_columns)) # Create re object for splitting lines splitter = re.compile(delimiter) # Note: we do io manually here for two reasons. # 1. The csv module has difficulties with unicode, which may lead # to failures on certain annotation strings # # 2. numpy's text loader does not handle non-numeric data # with _open(filename, mode='r') as input_file: for row, line in enumerate(input_file, 1): # Split each line using the supplied delimiter data = splitter.split(line.strip(), n_columns - 1) # Throw a helpful error if we got an unexpected # of columns if n_columns != len(data): raise ValueError('Expected {} columns, got {} at ' '{}:{:d}:\n\t{}'.format(n_columns, len(data), filename, row, line)) for value, column, converter in zip(data, columns, converters): # Try converting the value, throw a helpful error on failure try: converted_value = converter(value) except: raise ValueError("Couldn't convert value {} using {} " "found at {}:{:d}:\n\t{}".format( value, converter.__name__, filename, row, line)) column.append(converted_value) # Sane output if n_columns == 1: return columns[0] else: return columns
python
def load_delimited(filename, converters, delimiter=r'\s+'): r"""Utility function for loading in data from an annotation file where columns are delimited. The number of columns is inferred from the length of the provided converters list. Examples -------- >>> # Load in a one-column list of event times (floats) >>> load_delimited('events.txt', [float]) >>> # Load in a list of labeled events, separated by commas >>> load_delimited('labeled_events.csv', [float, str], ',') Parameters ---------- filename : str Path to the annotation file converters : list of functions Each entry in column ``n`` of the file will be cast by the function ``converters[n]``. delimiter : str Separator regular expression. By default, lines will be split by any amount of whitespace. Returns ------- columns : tuple of lists Each list in this tuple corresponds to values in one of the columns in the file. """ # Initialize list of empty lists n_columns = len(converters) columns = tuple(list() for _ in range(n_columns)) # Create re object for splitting lines splitter = re.compile(delimiter) # Note: we do io manually here for two reasons. # 1. The csv module has difficulties with unicode, which may lead # to failures on certain annotation strings # # 2. numpy's text loader does not handle non-numeric data # with _open(filename, mode='r') as input_file: for row, line in enumerate(input_file, 1): # Split each line using the supplied delimiter data = splitter.split(line.strip(), n_columns - 1) # Throw a helpful error if we got an unexpected # of columns if n_columns != len(data): raise ValueError('Expected {} columns, got {} at ' '{}:{:d}:\n\t{}'.format(n_columns, len(data), filename, row, line)) for value, column, converter in zip(data, columns, converters): # Try converting the value, throw a helpful error on failure try: converted_value = converter(value) except: raise ValueError("Couldn't convert value {} using {} " "found at {}:{:d}:\n\t{}".format( value, converter.__name__, filename, row, line)) column.append(converted_value) # Sane output if n_columns == 1: return columns[0] else: return columns
[ "def", "load_delimited", "(", "filename", ",", "converters", ",", "delimiter", "=", "r'\\s+'", ")", ":", "# Initialize list of empty lists", "n_columns", "=", "len", "(", "converters", ")", "columns", "=", "tuple", "(", "list", "(", ")", "for", "_", "in", "range", "(", "n_columns", ")", ")", "# Create re object for splitting lines", "splitter", "=", "re", ".", "compile", "(", "delimiter", ")", "# Note: we do io manually here for two reasons.", "# 1. The csv module has difficulties with unicode, which may lead", "# to failures on certain annotation strings", "#", "# 2. numpy's text loader does not handle non-numeric data", "#", "with", "_open", "(", "filename", ",", "mode", "=", "'r'", ")", "as", "input_file", ":", "for", "row", ",", "line", "in", "enumerate", "(", "input_file", ",", "1", ")", ":", "# Split each line using the supplied delimiter", "data", "=", "splitter", ".", "split", "(", "line", ".", "strip", "(", ")", ",", "n_columns", "-", "1", ")", "# Throw a helpful error if we got an unexpected # of columns", "if", "n_columns", "!=", "len", "(", "data", ")", ":", "raise", "ValueError", "(", "'Expected {} columns, got {} at '", "'{}:{:d}:\\n\\t{}'", ".", "format", "(", "n_columns", ",", "len", "(", "data", ")", ",", "filename", ",", "row", ",", "line", ")", ")", "for", "value", ",", "column", ",", "converter", "in", "zip", "(", "data", ",", "columns", ",", "converters", ")", ":", "# Try converting the value, throw a helpful error on failure", "try", ":", "converted_value", "=", "converter", "(", "value", ")", "except", ":", "raise", "ValueError", "(", "\"Couldn't convert value {} using {} \"", "\"found at {}:{:d}:\\n\\t{}\"", ".", "format", "(", "value", ",", "converter", ".", "__name__", ",", "filename", ",", "row", ",", "line", ")", ")", "column", ".", "append", "(", "converted_value", ")", "# Sane output", "if", "n_columns", "==", "1", ":", "return", "columns", "[", "0", "]", "else", ":", "return", "columns" ]
r"""Utility function for loading in data from an annotation file where columns are delimited. The number of columns is inferred from the length of the provided converters list. Examples -------- >>> # Load in a one-column list of event times (floats) >>> load_delimited('events.txt', [float]) >>> # Load in a list of labeled events, separated by commas >>> load_delimited('labeled_events.csv', [float, str], ',') Parameters ---------- filename : str Path to the annotation file converters : list of functions Each entry in column ``n`` of the file will be cast by the function ``converters[n]``. delimiter : str Separator regular expression. By default, lines will be split by any amount of whitespace. Returns ------- columns : tuple of lists Each list in this tuple corresponds to values in one of the columns in the file.
[ "r", "Utility", "function", "for", "loading", "in", "data", "from", "an", "annotation", "file", "where", "columns", "are", "delimited", ".", "The", "number", "of", "columns", "is", "inferred", "from", "the", "length", "of", "the", "provided", "converters", "list", "." ]
f41c8dafaea04b411252a516d1965af43c7d531b
https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/io.py#L36-L105
train
craffel/mir_eval
mir_eval/io.py
load_events
def load_events(filename, delimiter=r'\s+'): r"""Import time-stamp events from an annotation file. The file should consist of a single column of numeric values corresponding to the event times. This is primarily useful for processing events which lack duration, such as beats or onsets. Parameters ---------- filename : str Path to the annotation file delimiter : str Separator regular expression. By default, lines will be split by any amount of whitespace. Returns ------- event_times : np.ndarray array of event times (float) """ # Use our universal function to load in the events events = load_delimited(filename, [float], delimiter) events = np.array(events) # Validate them, but throw a warning in place of an error try: util.validate_events(events) except ValueError as error: warnings.warn(error.args[0]) return events
python
def load_events(filename, delimiter=r'\s+'): r"""Import time-stamp events from an annotation file. The file should consist of a single column of numeric values corresponding to the event times. This is primarily useful for processing events which lack duration, such as beats or onsets. Parameters ---------- filename : str Path to the annotation file delimiter : str Separator regular expression. By default, lines will be split by any amount of whitespace. Returns ------- event_times : np.ndarray array of event times (float) """ # Use our universal function to load in the events events = load_delimited(filename, [float], delimiter) events = np.array(events) # Validate them, but throw a warning in place of an error try: util.validate_events(events) except ValueError as error: warnings.warn(error.args[0]) return events
[ "def", "load_events", "(", "filename", ",", "delimiter", "=", "r'\\s+'", ")", ":", "# Use our universal function to load in the events", "events", "=", "load_delimited", "(", "filename", ",", "[", "float", "]", ",", "delimiter", ")", "events", "=", "np", ".", "array", "(", "events", ")", "# Validate them, but throw a warning in place of an error", "try", ":", "util", ".", "validate_events", "(", "events", ")", "except", "ValueError", "as", "error", ":", "warnings", ".", "warn", "(", "error", ".", "args", "[", "0", "]", ")", "return", "events" ]
r"""Import time-stamp events from an annotation file. The file should consist of a single column of numeric values corresponding to the event times. This is primarily useful for processing events which lack duration, such as beats or onsets. Parameters ---------- filename : str Path to the annotation file delimiter : str Separator regular expression. By default, lines will be split by any amount of whitespace. Returns ------- event_times : np.ndarray array of event times (float)
[ "r", "Import", "time", "-", "stamp", "events", "from", "an", "annotation", "file", ".", "The", "file", "should", "consist", "of", "a", "single", "column", "of", "numeric", "values", "corresponding", "to", "the", "event", "times", ".", "This", "is", "primarily", "useful", "for", "processing", "events", "which", "lack", "duration", "such", "as", "beats", "or", "onsets", "." ]
f41c8dafaea04b411252a516d1965af43c7d531b
https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/io.py#L108-L137
train
craffel/mir_eval
mir_eval/io.py
load_labeled_events
def load_labeled_events(filename, delimiter=r'\s+'): r"""Import labeled time-stamp events from an annotation file. The file should consist of two columns; the first having numeric values corresponding to the event times and the second having string labels for each event. This is primarily useful for processing labeled events which lack duration, such as beats with metric beat number or onsets with an instrument label. Parameters ---------- filename : str Path to the annotation file delimiter : str Separator regular expression. By default, lines will be split by any amount of whitespace. Returns ------- event_times : np.ndarray array of event times (float) labels : list of str list of labels """ # Use our universal function to load in the events events, labels = load_delimited(filename, [float, str], delimiter) events = np.array(events) # Validate them, but throw a warning in place of an error try: util.validate_events(events) except ValueError as error: warnings.warn(error.args[0]) return events, labels
python
def load_labeled_events(filename, delimiter=r'\s+'): r"""Import labeled time-stamp events from an annotation file. The file should consist of two columns; the first having numeric values corresponding to the event times and the second having string labels for each event. This is primarily useful for processing labeled events which lack duration, such as beats with metric beat number or onsets with an instrument label. Parameters ---------- filename : str Path to the annotation file delimiter : str Separator regular expression. By default, lines will be split by any amount of whitespace. Returns ------- event_times : np.ndarray array of event times (float) labels : list of str list of labels """ # Use our universal function to load in the events events, labels = load_delimited(filename, [float, str], delimiter) events = np.array(events) # Validate them, but throw a warning in place of an error try: util.validate_events(events) except ValueError as error: warnings.warn(error.args[0]) return events, labels
[ "def", "load_labeled_events", "(", "filename", ",", "delimiter", "=", "r'\\s+'", ")", ":", "# Use our universal function to load in the events", "events", ",", "labels", "=", "load_delimited", "(", "filename", ",", "[", "float", ",", "str", "]", ",", "delimiter", ")", "events", "=", "np", ".", "array", "(", "events", ")", "# Validate them, but throw a warning in place of an error", "try", ":", "util", ".", "validate_events", "(", "events", ")", "except", "ValueError", "as", "error", ":", "warnings", ".", "warn", "(", "error", ".", "args", "[", "0", "]", ")", "return", "events", ",", "labels" ]
r"""Import labeled time-stamp events from an annotation file. The file should consist of two columns; the first having numeric values corresponding to the event times and the second having string labels for each event. This is primarily useful for processing labeled events which lack duration, such as beats with metric beat number or onsets with an instrument label. Parameters ---------- filename : str Path to the annotation file delimiter : str Separator regular expression. By default, lines will be split by any amount of whitespace. Returns ------- event_times : np.ndarray array of event times (float) labels : list of str list of labels
[ "r", "Import", "labeled", "time", "-", "stamp", "events", "from", "an", "annotation", "file", ".", "The", "file", "should", "consist", "of", "two", "columns", ";", "the", "first", "having", "numeric", "values", "corresponding", "to", "the", "event", "times", "and", "the", "second", "having", "string", "labels", "for", "each", "event", ".", "This", "is", "primarily", "useful", "for", "processing", "labeled", "events", "which", "lack", "duration", "such", "as", "beats", "with", "metric", "beat", "number", "or", "onsets", "with", "an", "instrument", "label", "." ]
f41c8dafaea04b411252a516d1965af43c7d531b
https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/io.py#L140-L172
train
craffel/mir_eval
mir_eval/io.py
load_time_series
def load_time_series(filename, delimiter=r'\s+'): r"""Import a time series from an annotation file. The file should consist of two columns of numeric values corresponding to the time and value of each sample of the time series. Parameters ---------- filename : str Path to the annotation file delimiter : str Separator regular expression. By default, lines will be split by any amount of whitespace. Returns ------- times : np.ndarray array of timestamps (float) values : np.ndarray array of corresponding numeric values (float) """ # Use our universal function to load in the events times, values = load_delimited(filename, [float, float], delimiter) times = np.array(times) values = np.array(values) return times, values
python
def load_time_series(filename, delimiter=r'\s+'): r"""Import a time series from an annotation file. The file should consist of two columns of numeric values corresponding to the time and value of each sample of the time series. Parameters ---------- filename : str Path to the annotation file delimiter : str Separator regular expression. By default, lines will be split by any amount of whitespace. Returns ------- times : np.ndarray array of timestamps (float) values : np.ndarray array of corresponding numeric values (float) """ # Use our universal function to load in the events times, values = load_delimited(filename, [float, float], delimiter) times = np.array(times) values = np.array(values) return times, values
[ "def", "load_time_series", "(", "filename", ",", "delimiter", "=", "r'\\s+'", ")", ":", "# Use our universal function to load in the events", "times", ",", "values", "=", "load_delimited", "(", "filename", ",", "[", "float", ",", "float", "]", ",", "delimiter", ")", "times", "=", "np", ".", "array", "(", "times", ")", "values", "=", "np", ".", "array", "(", "values", ")", "return", "times", ",", "values" ]
r"""Import a time series from an annotation file. The file should consist of two columns of numeric values corresponding to the time and value of each sample of the time series. Parameters ---------- filename : str Path to the annotation file delimiter : str Separator regular expression. By default, lines will be split by any amount of whitespace. Returns ------- times : np.ndarray array of timestamps (float) values : np.ndarray array of corresponding numeric values (float)
[ "r", "Import", "a", "time", "series", "from", "an", "annotation", "file", ".", "The", "file", "should", "consist", "of", "two", "columns", "of", "numeric", "values", "corresponding", "to", "the", "time", "and", "value", "of", "each", "sample", "of", "the", "time", "series", "." ]
f41c8dafaea04b411252a516d1965af43c7d531b
https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/io.py#L245-L271
train
craffel/mir_eval
mir_eval/io.py
load_wav
def load_wav(path, mono=True): """Loads a .wav file as a numpy array using ``scipy.io.wavfile``. Parameters ---------- path : str Path to a .wav file mono : bool If the provided .wav has more than one channel, it will be converted to mono if ``mono=True``. (Default value = True) Returns ------- audio_data : np.ndarray Array of audio samples, normalized to the range [-1., 1.] fs : int Sampling rate of the audio data """ fs, audio_data = scipy.io.wavfile.read(path) # Make float in range [-1, 1] if audio_data.dtype == 'int8': audio_data = audio_data/float(2**8) elif audio_data.dtype == 'int16': audio_data = audio_data/float(2**16) elif audio_data.dtype == 'int32': audio_data = audio_data/float(2**24) else: raise ValueError('Got unexpected .wav data type ' '{}'.format(audio_data.dtype)) # Optionally convert to mono if mono and audio_data.ndim != 1: audio_data = audio_data.mean(axis=1) return audio_data, fs
python
def load_wav(path, mono=True): """Loads a .wav file as a numpy array using ``scipy.io.wavfile``. Parameters ---------- path : str Path to a .wav file mono : bool If the provided .wav has more than one channel, it will be converted to mono if ``mono=True``. (Default value = True) Returns ------- audio_data : np.ndarray Array of audio samples, normalized to the range [-1., 1.] fs : int Sampling rate of the audio data """ fs, audio_data = scipy.io.wavfile.read(path) # Make float in range [-1, 1] if audio_data.dtype == 'int8': audio_data = audio_data/float(2**8) elif audio_data.dtype == 'int16': audio_data = audio_data/float(2**16) elif audio_data.dtype == 'int32': audio_data = audio_data/float(2**24) else: raise ValueError('Got unexpected .wav data type ' '{}'.format(audio_data.dtype)) # Optionally convert to mono if mono and audio_data.ndim != 1: audio_data = audio_data.mean(axis=1) return audio_data, fs
[ "def", "load_wav", "(", "path", ",", "mono", "=", "True", ")", ":", "fs", ",", "audio_data", "=", "scipy", ".", "io", ".", "wavfile", ".", "read", "(", "path", ")", "# Make float in range [-1, 1]", "if", "audio_data", ".", "dtype", "==", "'int8'", ":", "audio_data", "=", "audio_data", "/", "float", "(", "2", "**", "8", ")", "elif", "audio_data", ".", "dtype", "==", "'int16'", ":", "audio_data", "=", "audio_data", "/", "float", "(", "2", "**", "16", ")", "elif", "audio_data", ".", "dtype", "==", "'int32'", ":", "audio_data", "=", "audio_data", "/", "float", "(", "2", "**", "24", ")", "else", ":", "raise", "ValueError", "(", "'Got unexpected .wav data type '", "'{}'", ".", "format", "(", "audio_data", ".", "dtype", ")", ")", "# Optionally convert to mono", "if", "mono", "and", "audio_data", ".", "ndim", "!=", "1", ":", "audio_data", "=", "audio_data", ".", "mean", "(", "axis", "=", "1", ")", "return", "audio_data", ",", "fs" ]
Loads a .wav file as a numpy array using ``scipy.io.wavfile``. Parameters ---------- path : str Path to a .wav file mono : bool If the provided .wav has more than one channel, it will be converted to mono if ``mono=True``. (Default value = True) Returns ------- audio_data : np.ndarray Array of audio samples, normalized to the range [-1., 1.] fs : int Sampling rate of the audio data
[ "Loads", "a", ".", "wav", "file", "as", "a", "numpy", "array", "using", "scipy", ".", "io", ".", "wavfile", "." ]
f41c8dafaea04b411252a516d1965af43c7d531b
https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/io.py#L353-L387
train
craffel/mir_eval
mir_eval/io.py
load_ragged_time_series
def load_ragged_time_series(filename, dtype=float, delimiter=r'\s+', header=False): r"""Utility function for loading in data from a delimited time series annotation file with a variable number of columns. Assumes that column 0 contains time stamps and columns 1 through n contain values. n may be variable from time stamp to time stamp. Examples -------- >>> # Load a ragged list of tab-delimited multi-f0 midi notes >>> times, vals = load_ragged_time_series('multif0.txt', dtype=int, delimiter='\t') >>> # Load a raggled list of space delimited multi-f0 values with a header >>> times, vals = load_ragged_time_series('labeled_events.csv', header=True) Parameters ---------- filename : str Path to the annotation file dtype : function Data type to apply to values columns. delimiter : str Separator regular expression. By default, lines will be split by any amount of whitespace. header : bool Indicates whether a header row is present or not. By default, assumes no header is present. Returns ------- times : np.ndarray array of timestamps (float) values : list of np.ndarray list of arrays of corresponding values """ # Initialize empty lists times = [] values = [] # Create re object for splitting lines splitter = re.compile(delimiter) if header: start_row = 1 else: start_row = 0 with _open(filename, mode='r') as input_file: for row, line in enumerate(input_file, start_row): # Split each line using the supplied delimiter data = splitter.split(line.strip()) try: converted_time = float(data[0]) except (TypeError, ValueError) as exe: six.raise_from(ValueError("Couldn't convert value {} using {} " "found at {}:{:d}:\n\t{}".format( data[0], float.__name__, filename, row, line)), exe) times.append(converted_time) # cast values to a numpy array. time stamps with no values are cast # to an empty array. try: converted_value = np.array(data[1:], dtype=dtype) except (TypeError, ValueError) as exe: six.raise_from(ValueError("Couldn't convert value {} using {} " "found at {}:{:d}:\n\t{}".format( data[1:], dtype.__name__, filename, row, line)), exe) values.append(converted_value) return np.array(times), values
python
def load_ragged_time_series(filename, dtype=float, delimiter=r'\s+', header=False): r"""Utility function for loading in data from a delimited time series annotation file with a variable number of columns. Assumes that column 0 contains time stamps and columns 1 through n contain values. n may be variable from time stamp to time stamp. Examples -------- >>> # Load a ragged list of tab-delimited multi-f0 midi notes >>> times, vals = load_ragged_time_series('multif0.txt', dtype=int, delimiter='\t') >>> # Load a raggled list of space delimited multi-f0 values with a header >>> times, vals = load_ragged_time_series('labeled_events.csv', header=True) Parameters ---------- filename : str Path to the annotation file dtype : function Data type to apply to values columns. delimiter : str Separator regular expression. By default, lines will be split by any amount of whitespace. header : bool Indicates whether a header row is present or not. By default, assumes no header is present. Returns ------- times : np.ndarray array of timestamps (float) values : list of np.ndarray list of arrays of corresponding values """ # Initialize empty lists times = [] values = [] # Create re object for splitting lines splitter = re.compile(delimiter) if header: start_row = 1 else: start_row = 0 with _open(filename, mode='r') as input_file: for row, line in enumerate(input_file, start_row): # Split each line using the supplied delimiter data = splitter.split(line.strip()) try: converted_time = float(data[0]) except (TypeError, ValueError) as exe: six.raise_from(ValueError("Couldn't convert value {} using {} " "found at {}:{:d}:\n\t{}".format( data[0], float.__name__, filename, row, line)), exe) times.append(converted_time) # cast values to a numpy array. time stamps with no values are cast # to an empty array. try: converted_value = np.array(data[1:], dtype=dtype) except (TypeError, ValueError) as exe: six.raise_from(ValueError("Couldn't convert value {} using {} " "found at {}:{:d}:\n\t{}".format( data[1:], dtype.__name__, filename, row, line)), exe) values.append(converted_value) return np.array(times), values
[ "def", "load_ragged_time_series", "(", "filename", ",", "dtype", "=", "float", ",", "delimiter", "=", "r'\\s+'", ",", "header", "=", "False", ")", ":", "# Initialize empty lists", "times", "=", "[", "]", "values", "=", "[", "]", "# Create re object for splitting lines", "splitter", "=", "re", ".", "compile", "(", "delimiter", ")", "if", "header", ":", "start_row", "=", "1", "else", ":", "start_row", "=", "0", "with", "_open", "(", "filename", ",", "mode", "=", "'r'", ")", "as", "input_file", ":", "for", "row", ",", "line", "in", "enumerate", "(", "input_file", ",", "start_row", ")", ":", "# Split each line using the supplied delimiter", "data", "=", "splitter", ".", "split", "(", "line", ".", "strip", "(", ")", ")", "try", ":", "converted_time", "=", "float", "(", "data", "[", "0", "]", ")", "except", "(", "TypeError", ",", "ValueError", ")", "as", "exe", ":", "six", ".", "raise_from", "(", "ValueError", "(", "\"Couldn't convert value {} using {} \"", "\"found at {}:{:d}:\\n\\t{}\"", ".", "format", "(", "data", "[", "0", "]", ",", "float", ".", "__name__", ",", "filename", ",", "row", ",", "line", ")", ")", ",", "exe", ")", "times", ".", "append", "(", "converted_time", ")", "# cast values to a numpy array. time stamps with no values are cast", "# to an empty array.", "try", ":", "converted_value", "=", "np", ".", "array", "(", "data", "[", "1", ":", "]", ",", "dtype", "=", "dtype", ")", "except", "(", "TypeError", ",", "ValueError", ")", "as", "exe", ":", "six", ".", "raise_from", "(", "ValueError", "(", "\"Couldn't convert value {} using {} \"", "\"found at {}:{:d}:\\n\\t{}\"", ".", "format", "(", "data", "[", "1", ":", "]", ",", "dtype", ".", "__name__", ",", "filename", ",", "row", ",", "line", ")", ")", ",", "exe", ")", "values", ".", "append", "(", "converted_value", ")", "return", "np", ".", "array", "(", "times", ")", ",", "values" ]
r"""Utility function for loading in data from a delimited time series annotation file with a variable number of columns. Assumes that column 0 contains time stamps and columns 1 through n contain values. n may be variable from time stamp to time stamp. Examples -------- >>> # Load a ragged list of tab-delimited multi-f0 midi notes >>> times, vals = load_ragged_time_series('multif0.txt', dtype=int, delimiter='\t') >>> # Load a raggled list of space delimited multi-f0 values with a header >>> times, vals = load_ragged_time_series('labeled_events.csv', header=True) Parameters ---------- filename : str Path to the annotation file dtype : function Data type to apply to values columns. delimiter : str Separator regular expression. By default, lines will be split by any amount of whitespace. header : bool Indicates whether a header row is present or not. By default, assumes no header is present. Returns ------- times : np.ndarray array of timestamps (float) values : list of np.ndarray list of arrays of corresponding values
[ "r", "Utility", "function", "for", "loading", "in", "data", "from", "a", "delimited", "time", "series", "annotation", "file", "with", "a", "variable", "number", "of", "columns", ".", "Assumes", "that", "column", "0", "contains", "time", "stamps", "and", "columns", "1", "through", "n", "contain", "values", ".", "n", "may", "be", "variable", "from", "time", "stamp", "to", "time", "stamp", "." ]
f41c8dafaea04b411252a516d1965af43c7d531b
https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/io.py#L511-L583
train
craffel/mir_eval
mir_eval/chord.py
pitch_class_to_semitone
def pitch_class_to_semitone(pitch_class): r'''Convert a pitch class to semitone. Parameters ---------- pitch_class : str Spelling of a given pitch class, e.g. 'C#', 'Gbb' Returns ------- semitone : int Semitone value of the pitch class. ''' semitone = 0 for idx, char in enumerate(pitch_class): if char == '#' and idx > 0: semitone += 1 elif char == 'b' and idx > 0: semitone -= 1 elif idx == 0: semitone = PITCH_CLASSES.get(char) else: raise InvalidChordException( "Pitch class improperly formed: %s" % pitch_class) return semitone % 12
python
def pitch_class_to_semitone(pitch_class): r'''Convert a pitch class to semitone. Parameters ---------- pitch_class : str Spelling of a given pitch class, e.g. 'C#', 'Gbb' Returns ------- semitone : int Semitone value of the pitch class. ''' semitone = 0 for idx, char in enumerate(pitch_class): if char == '#' and idx > 0: semitone += 1 elif char == 'b' and idx > 0: semitone -= 1 elif idx == 0: semitone = PITCH_CLASSES.get(char) else: raise InvalidChordException( "Pitch class improperly formed: %s" % pitch_class) return semitone % 12
[ "def", "pitch_class_to_semitone", "(", "pitch_class", ")", ":", "semitone", "=", "0", "for", "idx", ",", "char", "in", "enumerate", "(", "pitch_class", ")", ":", "if", "char", "==", "'#'", "and", "idx", ">", "0", ":", "semitone", "+=", "1", "elif", "char", "==", "'b'", "and", "idx", ">", "0", ":", "semitone", "-=", "1", "elif", "idx", "==", "0", ":", "semitone", "=", "PITCH_CLASSES", ".", "get", "(", "char", ")", "else", ":", "raise", "InvalidChordException", "(", "\"Pitch class improperly formed: %s\"", "%", "pitch_class", ")", "return", "semitone", "%", "12" ]
r'''Convert a pitch class to semitone. Parameters ---------- pitch_class : str Spelling of a given pitch class, e.g. 'C#', 'Gbb' Returns ------- semitone : int Semitone value of the pitch class.
[ "r", "Convert", "a", "pitch", "class", "to", "semitone", "." ]
f41c8dafaea04b411252a516d1965af43c7d531b
https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/chord.py#L143-L168
train
craffel/mir_eval
mir_eval/chord.py
scale_degree_to_semitone
def scale_degree_to_semitone(scale_degree): r"""Convert a scale degree to semitone. Parameters ---------- scale degree : str Spelling of a relative scale degree, e.g. 'b3', '7', '#5' Returns ------- semitone : int Relative semitone of the scale degree, wrapped to a single octave Raises ------ InvalidChordException if `scale_degree` is invalid. """ semitone = 0 offset = 0 if scale_degree.startswith("#"): offset = scale_degree.count("#") scale_degree = scale_degree.strip("#") elif scale_degree.startswith('b'): offset = -1 * scale_degree.count("b") scale_degree = scale_degree.strip("b") semitone = SCALE_DEGREES.get(scale_degree, None) if semitone is None: raise InvalidChordException( "Scale degree improperly formed: {}, expected one of {}." .format(scale_degree, list(SCALE_DEGREES.keys()))) return semitone + offset
python
def scale_degree_to_semitone(scale_degree): r"""Convert a scale degree to semitone. Parameters ---------- scale degree : str Spelling of a relative scale degree, e.g. 'b3', '7', '#5' Returns ------- semitone : int Relative semitone of the scale degree, wrapped to a single octave Raises ------ InvalidChordException if `scale_degree` is invalid. """ semitone = 0 offset = 0 if scale_degree.startswith("#"): offset = scale_degree.count("#") scale_degree = scale_degree.strip("#") elif scale_degree.startswith('b'): offset = -1 * scale_degree.count("b") scale_degree = scale_degree.strip("b") semitone = SCALE_DEGREES.get(scale_degree, None) if semitone is None: raise InvalidChordException( "Scale degree improperly formed: {}, expected one of {}." .format(scale_degree, list(SCALE_DEGREES.keys()))) return semitone + offset
[ "def", "scale_degree_to_semitone", "(", "scale_degree", ")", ":", "semitone", "=", "0", "offset", "=", "0", "if", "scale_degree", ".", "startswith", "(", "\"#\"", ")", ":", "offset", "=", "scale_degree", ".", "count", "(", "\"#\"", ")", "scale_degree", "=", "scale_degree", ".", "strip", "(", "\"#\"", ")", "elif", "scale_degree", ".", "startswith", "(", "'b'", ")", ":", "offset", "=", "-", "1", "*", "scale_degree", ".", "count", "(", "\"b\"", ")", "scale_degree", "=", "scale_degree", ".", "strip", "(", "\"b\"", ")", "semitone", "=", "SCALE_DEGREES", ".", "get", "(", "scale_degree", ",", "None", ")", "if", "semitone", "is", "None", ":", "raise", "InvalidChordException", "(", "\"Scale degree improperly formed: {}, expected one of {}.\"", ".", "format", "(", "scale_degree", ",", "list", "(", "SCALE_DEGREES", ".", "keys", "(", ")", ")", ")", ")", "return", "semitone", "+", "offset" ]
r"""Convert a scale degree to semitone. Parameters ---------- scale degree : str Spelling of a relative scale degree, e.g. 'b3', '7', '#5' Returns ------- semitone : int Relative semitone of the scale degree, wrapped to a single octave Raises ------ InvalidChordException if `scale_degree` is invalid.
[ "r", "Convert", "a", "scale", "degree", "to", "semitone", "." ]
f41c8dafaea04b411252a516d1965af43c7d531b
https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/chord.py#L175-L206
train
craffel/mir_eval
mir_eval/chord.py
scale_degree_to_bitmap
def scale_degree_to_bitmap(scale_degree, modulo=False, length=BITMAP_LENGTH): """Create a bitmap representation of a scale degree. Note that values in the bitmap may be negative, indicating that the semitone is to be removed. Parameters ---------- scale_degree : str Spelling of a relative scale degree, e.g. 'b3', '7', '#5' modulo : bool, default=True If a scale degree exceeds the length of the bit-vector, modulo the scale degree back into the bit-vector; otherwise it is discarded. length : int, default=12 Length of the bit-vector to produce Returns ------- bitmap : np.ndarray, in [-1, 0, 1], len=`length` Bitmap representation of this scale degree. """ sign = 1 if scale_degree.startswith("*"): sign = -1 scale_degree = scale_degree.strip("*") edit_map = [0] * length sd_idx = scale_degree_to_semitone(scale_degree) if sd_idx < length or modulo: edit_map[sd_idx % length] = sign return np.array(edit_map)
python
def scale_degree_to_bitmap(scale_degree, modulo=False, length=BITMAP_LENGTH): """Create a bitmap representation of a scale degree. Note that values in the bitmap may be negative, indicating that the semitone is to be removed. Parameters ---------- scale_degree : str Spelling of a relative scale degree, e.g. 'b3', '7', '#5' modulo : bool, default=True If a scale degree exceeds the length of the bit-vector, modulo the scale degree back into the bit-vector; otherwise it is discarded. length : int, default=12 Length of the bit-vector to produce Returns ------- bitmap : np.ndarray, in [-1, 0, 1], len=`length` Bitmap representation of this scale degree. """ sign = 1 if scale_degree.startswith("*"): sign = -1 scale_degree = scale_degree.strip("*") edit_map = [0] * length sd_idx = scale_degree_to_semitone(scale_degree) if sd_idx < length or modulo: edit_map[sd_idx % length] = sign return np.array(edit_map)
[ "def", "scale_degree_to_bitmap", "(", "scale_degree", ",", "modulo", "=", "False", ",", "length", "=", "BITMAP_LENGTH", ")", ":", "sign", "=", "1", "if", "scale_degree", ".", "startswith", "(", "\"*\"", ")", ":", "sign", "=", "-", "1", "scale_degree", "=", "scale_degree", ".", "strip", "(", "\"*\"", ")", "edit_map", "=", "[", "0", "]", "*", "length", "sd_idx", "=", "scale_degree_to_semitone", "(", "scale_degree", ")", "if", "sd_idx", "<", "length", "or", "modulo", ":", "edit_map", "[", "sd_idx", "%", "length", "]", "=", "sign", "return", "np", ".", "array", "(", "edit_map", ")" ]
Create a bitmap representation of a scale degree. Note that values in the bitmap may be negative, indicating that the semitone is to be removed. Parameters ---------- scale_degree : str Spelling of a relative scale degree, e.g. 'b3', '7', '#5' modulo : bool, default=True If a scale degree exceeds the length of the bit-vector, modulo the scale degree back into the bit-vector; otherwise it is discarded. length : int, default=12 Length of the bit-vector to produce Returns ------- bitmap : np.ndarray, in [-1, 0, 1], len=`length` Bitmap representation of this scale degree.
[ "Create", "a", "bitmap", "representation", "of", "a", "scale", "degree", "." ]
f41c8dafaea04b411252a516d1965af43c7d531b
https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/chord.py#L209-L238
train
craffel/mir_eval
mir_eval/chord.py
quality_to_bitmap
def quality_to_bitmap(quality): """Return the bitmap for a given quality. Parameters ---------- quality : str Chord quality name. Returns ------- bitmap : np.ndarray Bitmap representation of this quality (12-dim). """ if quality not in QUALITIES: raise InvalidChordException( "Unsupported chord quality shorthand: '%s' " "Did you mean to reduce extended chords?" % quality) return np.array(QUALITIES[quality])
python
def quality_to_bitmap(quality): """Return the bitmap for a given quality. Parameters ---------- quality : str Chord quality name. Returns ------- bitmap : np.ndarray Bitmap representation of this quality (12-dim). """ if quality not in QUALITIES: raise InvalidChordException( "Unsupported chord quality shorthand: '%s' " "Did you mean to reduce extended chords?" % quality) return np.array(QUALITIES[quality])
[ "def", "quality_to_bitmap", "(", "quality", ")", ":", "if", "quality", "not", "in", "QUALITIES", ":", "raise", "InvalidChordException", "(", "\"Unsupported chord quality shorthand: '%s' \"", "\"Did you mean to reduce extended chords?\"", "%", "quality", ")", "return", "np", ".", "array", "(", "QUALITIES", "[", "quality", "]", ")" ]
Return the bitmap for a given quality. Parameters ---------- quality : str Chord quality name. Returns ------- bitmap : np.ndarray Bitmap representation of this quality (12-dim).
[ "Return", "the", "bitmap", "for", "a", "given", "quality", "." ]
f41c8dafaea04b411252a516d1965af43c7d531b
https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/chord.py#L276-L294
train
craffel/mir_eval
mir_eval/chord.py
validate_chord_label
def validate_chord_label(chord_label): """Test for well-formedness of a chord label. Parameters ---------- chord : str Chord label to validate. """ # This monster regexp is pulled from the JAMS chord namespace, # which is in turn derived from the context-free grammar of # Harte et al., 2005. pattern = re.compile(r'''^((N|X)|(([A-G](b*|#*))((:(maj|min|dim|aug|1|5|sus2|sus4|maj6|min6|7|maj7|min7|dim7|hdim7|minmaj7|aug7|9|maj9|min9|11|maj11|min11|13|maj13|min13)(\((\*?((b*|#*)([1-9]|1[0-3]?))(,\*?((b*|#*)([1-9]|1[0-3]?)))*)\))?)|(:\((\*?((b*|#*)([1-9]|1[0-3]?))(,\*?((b*|#*)([1-9]|1[0-3]?)))*)\)))?((/((b*|#*)([1-9]|1[0-3]?)))?)?))$''') # nopep8 if not pattern.match(chord_label): raise InvalidChordException('Invalid chord label: ' '{}'.format(chord_label)) pass
python
def validate_chord_label(chord_label): """Test for well-formedness of a chord label. Parameters ---------- chord : str Chord label to validate. """ # This monster regexp is pulled from the JAMS chord namespace, # which is in turn derived from the context-free grammar of # Harte et al., 2005. pattern = re.compile(r'''^((N|X)|(([A-G](b*|#*))((:(maj|min|dim|aug|1|5|sus2|sus4|maj6|min6|7|maj7|min7|dim7|hdim7|minmaj7|aug7|9|maj9|min9|11|maj11|min11|13|maj13|min13)(\((\*?((b*|#*)([1-9]|1[0-3]?))(,\*?((b*|#*)([1-9]|1[0-3]?)))*)\))?)|(:\((\*?((b*|#*)([1-9]|1[0-3]?))(,\*?((b*|#*)([1-9]|1[0-3]?)))*)\)))?((/((b*|#*)([1-9]|1[0-3]?)))?)?))$''') # nopep8 if not pattern.match(chord_label): raise InvalidChordException('Invalid chord label: ' '{}'.format(chord_label)) pass
[ "def", "validate_chord_label", "(", "chord_label", ")", ":", "# This monster regexp is pulled from the JAMS chord namespace,", "# which is in turn derived from the context-free grammar of", "# Harte et al., 2005.", "pattern", "=", "re", ".", "compile", "(", "r'''^((N|X)|(([A-G](b*|#*))((:(maj|min|dim|aug|1|5|sus2|sus4|maj6|min6|7|maj7|min7|dim7|hdim7|minmaj7|aug7|9|maj9|min9|11|maj11|min11|13|maj13|min13)(\\((\\*?((b*|#*)([1-9]|1[0-3]?))(,\\*?((b*|#*)([1-9]|1[0-3]?)))*)\\))?)|(:\\((\\*?((b*|#*)([1-9]|1[0-3]?))(,\\*?((b*|#*)([1-9]|1[0-3]?)))*)\\)))?((/((b*|#*)([1-9]|1[0-3]?)))?)?))$'''", ")", "# nopep8", "if", "not", "pattern", ".", "match", "(", "chord_label", ")", ":", "raise", "InvalidChordException", "(", "'Invalid chord label: '", "'{}'", ".", "format", "(", "chord_label", ")", ")", "pass" ]
Test for well-formedness of a chord label. Parameters ---------- chord : str Chord label to validate.
[ "Test", "for", "well", "-", "formedness", "of", "a", "chord", "label", "." ]
f41c8dafaea04b411252a516d1965af43c7d531b
https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/chord.py#L338-L357
train
craffel/mir_eval
mir_eval/chord.py
join
def join(chord_root, quality='', extensions=None, bass=''): r"""Join the parts of a chord into a complete chord label. Parameters ---------- chord_root : str Root pitch class of the chord, e.g. 'C', 'Eb' quality : str Quality of the chord, e.g. 'maj', 'hdim7' (Default value = '') extensions : list Any added or absent scaled degrees for this chord, e.g. ['4', '\*3'] (Default value = None) bass : str Scale degree of the bass note, e.g. '5'. (Default value = '') Returns ------- chord_label : str A complete chord label. """ chord_label = chord_root if quality or extensions: chord_label += ":%s" % quality if extensions: chord_label += "(%s)" % ",".join(extensions) if bass and bass != '1': chord_label += "/%s" % bass validate_chord_label(chord_label) return chord_label
python
def join(chord_root, quality='', extensions=None, bass=''): r"""Join the parts of a chord into a complete chord label. Parameters ---------- chord_root : str Root pitch class of the chord, e.g. 'C', 'Eb' quality : str Quality of the chord, e.g. 'maj', 'hdim7' (Default value = '') extensions : list Any added or absent scaled degrees for this chord, e.g. ['4', '\*3'] (Default value = None) bass : str Scale degree of the bass note, e.g. '5'. (Default value = '') Returns ------- chord_label : str A complete chord label. """ chord_label = chord_root if quality or extensions: chord_label += ":%s" % quality if extensions: chord_label += "(%s)" % ",".join(extensions) if bass and bass != '1': chord_label += "/%s" % bass validate_chord_label(chord_label) return chord_label
[ "def", "join", "(", "chord_root", ",", "quality", "=", "''", ",", "extensions", "=", "None", ",", "bass", "=", "''", ")", ":", "chord_label", "=", "chord_root", "if", "quality", "or", "extensions", ":", "chord_label", "+=", "\":%s\"", "%", "quality", "if", "extensions", ":", "chord_label", "+=", "\"(%s)\"", "%", "\",\"", ".", "join", "(", "extensions", ")", "if", "bass", "and", "bass", "!=", "'1'", ":", "chord_label", "+=", "\"/%s\"", "%", "bass", "validate_chord_label", "(", "chord_label", ")", "return", "chord_label" ]
r"""Join the parts of a chord into a complete chord label. Parameters ---------- chord_root : str Root pitch class of the chord, e.g. 'C', 'Eb' quality : str Quality of the chord, e.g. 'maj', 'hdim7' (Default value = '') extensions : list Any added or absent scaled degrees for this chord, e.g. ['4', '\*3'] (Default value = None) bass : str Scale degree of the bass note, e.g. '5'. (Default value = '') Returns ------- chord_label : str A complete chord label.
[ "r", "Join", "the", "parts", "of", "a", "chord", "into", "a", "complete", "chord", "label", "." ]
f41c8dafaea04b411252a516d1965af43c7d531b
https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/chord.py#L434-L465
train
craffel/mir_eval
mir_eval/chord.py
encode
def encode(chord_label, reduce_extended_chords=False, strict_bass_intervals=False): """Translate a chord label to numerical representations for evaluation. Parameters ---------- chord_label : str Chord label to encode. reduce_extended_chords : bool Whether to map the upper voicings of extended chords (9's, 11's, 13's) to semitone extensions. (Default value = False) strict_bass_intervals : bool Whether to require that the bass scale degree is present in the chord. (Default value = False) Returns ------- root_number : int Absolute semitone of the chord's root. semitone_bitmap : np.ndarray, dtype=int 12-dim vector of relative semitones in the chord spelling. bass_number : int Relative semitone of the chord's bass note, e.g. 0=root, 7=fifth, etc. """ if chord_label == NO_CHORD: return NO_CHORD_ENCODED if chord_label == X_CHORD: return X_CHORD_ENCODED chord_root, quality, scale_degrees, bass = split( chord_label, reduce_extended_chords=reduce_extended_chords) root_number = pitch_class_to_semitone(chord_root) bass_number = scale_degree_to_semitone(bass) % 12 semitone_bitmap = quality_to_bitmap(quality) semitone_bitmap[0] = 1 for scale_degree in scale_degrees: semitone_bitmap += scale_degree_to_bitmap(scale_degree, reduce_extended_chords) semitone_bitmap = (semitone_bitmap > 0).astype(np.int) if not semitone_bitmap[bass_number] and strict_bass_intervals: raise InvalidChordException( "Given bass scale degree is absent from this chord: " "%s" % chord_label, chord_label) else: semitone_bitmap[bass_number] = 1 return root_number, semitone_bitmap, bass_number
python
def encode(chord_label, reduce_extended_chords=False, strict_bass_intervals=False): """Translate a chord label to numerical representations for evaluation. Parameters ---------- chord_label : str Chord label to encode. reduce_extended_chords : bool Whether to map the upper voicings of extended chords (9's, 11's, 13's) to semitone extensions. (Default value = False) strict_bass_intervals : bool Whether to require that the bass scale degree is present in the chord. (Default value = False) Returns ------- root_number : int Absolute semitone of the chord's root. semitone_bitmap : np.ndarray, dtype=int 12-dim vector of relative semitones in the chord spelling. bass_number : int Relative semitone of the chord's bass note, e.g. 0=root, 7=fifth, etc. """ if chord_label == NO_CHORD: return NO_CHORD_ENCODED if chord_label == X_CHORD: return X_CHORD_ENCODED chord_root, quality, scale_degrees, bass = split( chord_label, reduce_extended_chords=reduce_extended_chords) root_number = pitch_class_to_semitone(chord_root) bass_number = scale_degree_to_semitone(bass) % 12 semitone_bitmap = quality_to_bitmap(quality) semitone_bitmap[0] = 1 for scale_degree in scale_degrees: semitone_bitmap += scale_degree_to_bitmap(scale_degree, reduce_extended_chords) semitone_bitmap = (semitone_bitmap > 0).astype(np.int) if not semitone_bitmap[bass_number] and strict_bass_intervals: raise InvalidChordException( "Given bass scale degree is absent from this chord: " "%s" % chord_label, chord_label) else: semitone_bitmap[bass_number] = 1 return root_number, semitone_bitmap, bass_number
[ "def", "encode", "(", "chord_label", ",", "reduce_extended_chords", "=", "False", ",", "strict_bass_intervals", "=", "False", ")", ":", "if", "chord_label", "==", "NO_CHORD", ":", "return", "NO_CHORD_ENCODED", "if", "chord_label", "==", "X_CHORD", ":", "return", "X_CHORD_ENCODED", "chord_root", ",", "quality", ",", "scale_degrees", ",", "bass", "=", "split", "(", "chord_label", ",", "reduce_extended_chords", "=", "reduce_extended_chords", ")", "root_number", "=", "pitch_class_to_semitone", "(", "chord_root", ")", "bass_number", "=", "scale_degree_to_semitone", "(", "bass", ")", "%", "12", "semitone_bitmap", "=", "quality_to_bitmap", "(", "quality", ")", "semitone_bitmap", "[", "0", "]", "=", "1", "for", "scale_degree", "in", "scale_degrees", ":", "semitone_bitmap", "+=", "scale_degree_to_bitmap", "(", "scale_degree", ",", "reduce_extended_chords", ")", "semitone_bitmap", "=", "(", "semitone_bitmap", ">", "0", ")", ".", "astype", "(", "np", ".", "int", ")", "if", "not", "semitone_bitmap", "[", "bass_number", "]", "and", "strict_bass_intervals", ":", "raise", "InvalidChordException", "(", "\"Given bass scale degree is absent from this chord: \"", "\"%s\"", "%", "chord_label", ",", "chord_label", ")", "else", ":", "semitone_bitmap", "[", "bass_number", "]", "=", "1", "return", "root_number", ",", "semitone_bitmap", ",", "bass_number" ]
Translate a chord label to numerical representations for evaluation. Parameters ---------- chord_label : str Chord label to encode. reduce_extended_chords : bool Whether to map the upper voicings of extended chords (9's, 11's, 13's) to semitone extensions. (Default value = False) strict_bass_intervals : bool Whether to require that the bass scale degree is present in the chord. (Default value = False) Returns ------- root_number : int Absolute semitone of the chord's root. semitone_bitmap : np.ndarray, dtype=int 12-dim vector of relative semitones in the chord spelling. bass_number : int Relative semitone of the chord's bass note, e.g. 0=root, 7=fifth, etc.
[ "Translate", "a", "chord", "label", "to", "numerical", "representations", "for", "evaluation", "." ]
f41c8dafaea04b411252a516d1965af43c7d531b
https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/chord.py#L469-L520
train
craffel/mir_eval
mir_eval/chord.py
encode_many
def encode_many(chord_labels, reduce_extended_chords=False): """Translate a set of chord labels to numerical representations for sane evaluation. Parameters ---------- chord_labels : list Set of chord labels to encode. reduce_extended_chords : bool Whether to map the upper voicings of extended chords (9's, 11's, 13's) to semitone extensions. (Default value = False) Returns ------- root_number : np.ndarray, dtype=int Absolute semitone of the chord's root. interval_bitmap : np.ndarray, dtype=int 12-dim vector of relative semitones in the given chord quality. bass_number : np.ndarray, dtype=int Relative semitones of the chord's bass notes. """ num_items = len(chord_labels) roots, basses = np.zeros([2, num_items], dtype=np.int) semitones = np.zeros([num_items, 12], dtype=np.int) local_cache = dict() for i, label in enumerate(chord_labels): result = local_cache.get(label, None) if result is None: result = encode(label, reduce_extended_chords) local_cache[label] = result roots[i], semitones[i], basses[i] = result return roots, semitones, basses
python
def encode_many(chord_labels, reduce_extended_chords=False): """Translate a set of chord labels to numerical representations for sane evaluation. Parameters ---------- chord_labels : list Set of chord labels to encode. reduce_extended_chords : bool Whether to map the upper voicings of extended chords (9's, 11's, 13's) to semitone extensions. (Default value = False) Returns ------- root_number : np.ndarray, dtype=int Absolute semitone of the chord's root. interval_bitmap : np.ndarray, dtype=int 12-dim vector of relative semitones in the given chord quality. bass_number : np.ndarray, dtype=int Relative semitones of the chord's bass notes. """ num_items = len(chord_labels) roots, basses = np.zeros([2, num_items], dtype=np.int) semitones = np.zeros([num_items, 12], dtype=np.int) local_cache = dict() for i, label in enumerate(chord_labels): result = local_cache.get(label, None) if result is None: result = encode(label, reduce_extended_chords) local_cache[label] = result roots[i], semitones[i], basses[i] = result return roots, semitones, basses
[ "def", "encode_many", "(", "chord_labels", ",", "reduce_extended_chords", "=", "False", ")", ":", "num_items", "=", "len", "(", "chord_labels", ")", "roots", ",", "basses", "=", "np", ".", "zeros", "(", "[", "2", ",", "num_items", "]", ",", "dtype", "=", "np", ".", "int", ")", "semitones", "=", "np", ".", "zeros", "(", "[", "num_items", ",", "12", "]", ",", "dtype", "=", "np", ".", "int", ")", "local_cache", "=", "dict", "(", ")", "for", "i", ",", "label", "in", "enumerate", "(", "chord_labels", ")", ":", "result", "=", "local_cache", ".", "get", "(", "label", ",", "None", ")", "if", "result", "is", "None", ":", "result", "=", "encode", "(", "label", ",", "reduce_extended_chords", ")", "local_cache", "[", "label", "]", "=", "result", "roots", "[", "i", "]", ",", "semitones", "[", "i", "]", ",", "basses", "[", "i", "]", "=", "result", "return", "roots", ",", "semitones", ",", "basses" ]
Translate a set of chord labels to numerical representations for sane evaluation. Parameters ---------- chord_labels : list Set of chord labels to encode. reduce_extended_chords : bool Whether to map the upper voicings of extended chords (9's, 11's, 13's) to semitone extensions. (Default value = False) Returns ------- root_number : np.ndarray, dtype=int Absolute semitone of the chord's root. interval_bitmap : np.ndarray, dtype=int 12-dim vector of relative semitones in the given chord quality. bass_number : np.ndarray, dtype=int Relative semitones of the chord's bass notes.
[ "Translate", "a", "set", "of", "chord", "labels", "to", "numerical", "representations", "for", "sane", "evaluation", "." ]
f41c8dafaea04b411252a516d1965af43c7d531b
https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/chord.py#L523-L556
train
craffel/mir_eval
mir_eval/chord.py
rotate_bitmap_to_root
def rotate_bitmap_to_root(bitmap, chord_root): """Circularly shift a relative bitmap to its asbolute pitch classes. For clarity, the best explanation is an example. Given 'G:Maj', the root and quality map are as follows:: root=5 quality=[1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0] # Relative chord shape After rotating to the root, the resulting bitmap becomes:: abs_quality = [0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1] # G, B, and D Parameters ---------- bitmap : np.ndarray, shape=(12,) Bitmap of active notes, relative to the given root. chord_root : int Absolute pitch class number. Returns ------- bitmap : np.ndarray, shape=(12,) Absolute bitmap of active pitch classes. """ bitmap = np.asarray(bitmap) assert bitmap.ndim == 1, "Currently only 1D bitmaps are supported." idxs = list(np.nonzero(bitmap)) idxs[-1] = (idxs[-1] + chord_root) % 12 abs_bitmap = np.zeros_like(bitmap) abs_bitmap[tuple(idxs)] = 1 return abs_bitmap
python
def rotate_bitmap_to_root(bitmap, chord_root): """Circularly shift a relative bitmap to its asbolute pitch classes. For clarity, the best explanation is an example. Given 'G:Maj', the root and quality map are as follows:: root=5 quality=[1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0] # Relative chord shape After rotating to the root, the resulting bitmap becomes:: abs_quality = [0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1] # G, B, and D Parameters ---------- bitmap : np.ndarray, shape=(12,) Bitmap of active notes, relative to the given root. chord_root : int Absolute pitch class number. Returns ------- bitmap : np.ndarray, shape=(12,) Absolute bitmap of active pitch classes. """ bitmap = np.asarray(bitmap) assert bitmap.ndim == 1, "Currently only 1D bitmaps are supported." idxs = list(np.nonzero(bitmap)) idxs[-1] = (idxs[-1] + chord_root) % 12 abs_bitmap = np.zeros_like(bitmap) abs_bitmap[tuple(idxs)] = 1 return abs_bitmap
[ "def", "rotate_bitmap_to_root", "(", "bitmap", ",", "chord_root", ")", ":", "bitmap", "=", "np", ".", "asarray", "(", "bitmap", ")", "assert", "bitmap", ".", "ndim", "==", "1", ",", "\"Currently only 1D bitmaps are supported.\"", "idxs", "=", "list", "(", "np", ".", "nonzero", "(", "bitmap", ")", ")", "idxs", "[", "-", "1", "]", "=", "(", "idxs", "[", "-", "1", "]", "+", "chord_root", ")", "%", "12", "abs_bitmap", "=", "np", ".", "zeros_like", "(", "bitmap", ")", "abs_bitmap", "[", "tuple", "(", "idxs", ")", "]", "=", "1", "return", "abs_bitmap" ]
Circularly shift a relative bitmap to its asbolute pitch classes. For clarity, the best explanation is an example. Given 'G:Maj', the root and quality map are as follows:: root=5 quality=[1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0] # Relative chord shape After rotating to the root, the resulting bitmap becomes:: abs_quality = [0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1] # G, B, and D Parameters ---------- bitmap : np.ndarray, shape=(12,) Bitmap of active notes, relative to the given root. chord_root : int Absolute pitch class number. Returns ------- bitmap : np.ndarray, shape=(12,) Absolute bitmap of active pitch classes.
[ "Circularly", "shift", "a", "relative", "bitmap", "to", "its", "asbolute", "pitch", "classes", "." ]
f41c8dafaea04b411252a516d1965af43c7d531b
https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/chord.py#L559-L591
train
craffel/mir_eval
mir_eval/chord.py
rotate_bitmaps_to_roots
def rotate_bitmaps_to_roots(bitmaps, roots): """Circularly shift a relative bitmaps to asbolute pitch classes. See :func:`rotate_bitmap_to_root` for more information. Parameters ---------- bitmap : np.ndarray, shape=(N, 12) Bitmap of active notes, relative to the given root. root : np.ndarray, shape=(N,) Absolute pitch class number. Returns ------- bitmap : np.ndarray, shape=(N, 12) Absolute bitmaps of active pitch classes. """ abs_bitmaps = [] for bitmap, chord_root in zip(bitmaps, roots): abs_bitmaps.append(rotate_bitmap_to_root(bitmap, chord_root)) return np.asarray(abs_bitmaps)
python
def rotate_bitmaps_to_roots(bitmaps, roots): """Circularly shift a relative bitmaps to asbolute pitch classes. See :func:`rotate_bitmap_to_root` for more information. Parameters ---------- bitmap : np.ndarray, shape=(N, 12) Bitmap of active notes, relative to the given root. root : np.ndarray, shape=(N,) Absolute pitch class number. Returns ------- bitmap : np.ndarray, shape=(N, 12) Absolute bitmaps of active pitch classes. """ abs_bitmaps = [] for bitmap, chord_root in zip(bitmaps, roots): abs_bitmaps.append(rotate_bitmap_to_root(bitmap, chord_root)) return np.asarray(abs_bitmaps)
[ "def", "rotate_bitmaps_to_roots", "(", "bitmaps", ",", "roots", ")", ":", "abs_bitmaps", "=", "[", "]", "for", "bitmap", ",", "chord_root", "in", "zip", "(", "bitmaps", ",", "roots", ")", ":", "abs_bitmaps", ".", "append", "(", "rotate_bitmap_to_root", "(", "bitmap", ",", "chord_root", ")", ")", "return", "np", ".", "asarray", "(", "abs_bitmaps", ")" ]
Circularly shift a relative bitmaps to asbolute pitch classes. See :func:`rotate_bitmap_to_root` for more information. Parameters ---------- bitmap : np.ndarray, shape=(N, 12) Bitmap of active notes, relative to the given root. root : np.ndarray, shape=(N,) Absolute pitch class number. Returns ------- bitmap : np.ndarray, shape=(N, 12) Absolute bitmaps of active pitch classes.
[ "Circularly", "shift", "a", "relative", "bitmaps", "to", "asbolute", "pitch", "classes", "." ]
f41c8dafaea04b411252a516d1965af43c7d531b
https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/chord.py#L594-L615
train
craffel/mir_eval
mir_eval/chord.py
validate
def validate(reference_labels, estimated_labels): """Checks that the input annotations to a comparison function look like valid chord labels. Parameters ---------- reference_labels : list, len=n Reference chord labels to score against. estimated_labels : list, len=n Estimated chord labels to score against. """ N = len(reference_labels) M = len(estimated_labels) if N != M: raise ValueError( "Chord comparison received different length lists: " "len(reference)=%d\tlen(estimates)=%d" % (N, M)) for labels in [reference_labels, estimated_labels]: for chord_label in labels: validate_chord_label(chord_label) # When either label list is empty, warn the user if len(reference_labels) == 0: warnings.warn('Reference labels are empty') if len(estimated_labels) == 0: warnings.warn('Estimated labels are empty')
python
def validate(reference_labels, estimated_labels): """Checks that the input annotations to a comparison function look like valid chord labels. Parameters ---------- reference_labels : list, len=n Reference chord labels to score against. estimated_labels : list, len=n Estimated chord labels to score against. """ N = len(reference_labels) M = len(estimated_labels) if N != M: raise ValueError( "Chord comparison received different length lists: " "len(reference)=%d\tlen(estimates)=%d" % (N, M)) for labels in [reference_labels, estimated_labels]: for chord_label in labels: validate_chord_label(chord_label) # When either label list is empty, warn the user if len(reference_labels) == 0: warnings.warn('Reference labels are empty') if len(estimated_labels) == 0: warnings.warn('Estimated labels are empty')
[ "def", "validate", "(", "reference_labels", ",", "estimated_labels", ")", ":", "N", "=", "len", "(", "reference_labels", ")", "M", "=", "len", "(", "estimated_labels", ")", "if", "N", "!=", "M", ":", "raise", "ValueError", "(", "\"Chord comparison received different length lists: \"", "\"len(reference)=%d\\tlen(estimates)=%d\"", "%", "(", "N", ",", "M", ")", ")", "for", "labels", "in", "[", "reference_labels", ",", "estimated_labels", "]", ":", "for", "chord_label", "in", "labels", ":", "validate_chord_label", "(", "chord_label", ")", "# When either label list is empty, warn the user", "if", "len", "(", "reference_labels", ")", "==", "0", ":", "warnings", ".", "warn", "(", "'Reference labels are empty'", ")", "if", "len", "(", "estimated_labels", ")", "==", "0", ":", "warnings", ".", "warn", "(", "'Estimated labels are empty'", ")" ]
Checks that the input annotations to a comparison function look like valid chord labels. Parameters ---------- reference_labels : list, len=n Reference chord labels to score against. estimated_labels : list, len=n Estimated chord labels to score against.
[ "Checks", "that", "the", "input", "annotations", "to", "a", "comparison", "function", "look", "like", "valid", "chord", "labels", "." ]
f41c8dafaea04b411252a516d1965af43c7d531b
https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/chord.py#L619-L644
train
craffel/mir_eval
mir_eval/chord.py
weighted_accuracy
def weighted_accuracy(comparisons, weights): """Compute the weighted accuracy of a list of chord comparisons. Examples -------- >>> (ref_intervals, ... ref_labels) = mir_eval.io.load_labeled_intervals('ref.lab') >>> (est_intervals, ... est_labels) = mir_eval.io.load_labeled_intervals('est.lab') >>> est_intervals, est_labels = mir_eval.util.adjust_intervals( ... est_intervals, est_labels, ref_intervals.min(), ... ref_intervals.max(), mir_eval.chord.NO_CHORD, ... mir_eval.chord.NO_CHORD) >>> (intervals, ... ref_labels, ... est_labels) = mir_eval.util.merge_labeled_intervals( ... ref_intervals, ref_labels, est_intervals, est_labels) >>> durations = mir_eval.util.intervals_to_durations(intervals) >>> # Here, we're using the "thirds" function to compare labels >>> # but any of the comparison functions would work. >>> comparisons = mir_eval.chord.thirds(ref_labels, est_labels) >>> score = mir_eval.chord.weighted_accuracy(comparisons, durations) Parameters ---------- comparisons : np.ndarray List of chord comparison scores, in [0, 1] or -1 weights : np.ndarray Weights (not necessarily normalized) for each comparison. This can be a list of interval durations Returns ------- score : float Weighted accuracy """ N = len(comparisons) # There should be as many weights as comparisons if weights.shape[0] != N: raise ValueError('weights and comparisons should be of the same' ' length. len(weights) = {} but len(comparisons)' ' = {}'.format(weights.shape[0], N)) if (weights < 0).any(): raise ValueError('Weights should all be positive.') if np.sum(weights) == 0: warnings.warn('No nonzero weights, returning 0') return 0 # Find all comparison scores which are valid valid_idx = (comparisons >= 0) # If no comparable chords were provided, warn and return 0 if valid_idx.sum() == 0: warnings.warn("No reference chords were comparable " "to estimated chords, returning 0.") return 0 # Remove any uncomparable labels comparisons = comparisons[valid_idx] weights = weights[valid_idx] # Normalize the weights total_weight = float(np.sum(weights)) normalized_weights = np.asarray(weights, dtype=float)/total_weight # Score is the sum of all weighted comparisons return np.sum(comparisons*normalized_weights)
python
def weighted_accuracy(comparisons, weights): """Compute the weighted accuracy of a list of chord comparisons. Examples -------- >>> (ref_intervals, ... ref_labels) = mir_eval.io.load_labeled_intervals('ref.lab') >>> (est_intervals, ... est_labels) = mir_eval.io.load_labeled_intervals('est.lab') >>> est_intervals, est_labels = mir_eval.util.adjust_intervals( ... est_intervals, est_labels, ref_intervals.min(), ... ref_intervals.max(), mir_eval.chord.NO_CHORD, ... mir_eval.chord.NO_CHORD) >>> (intervals, ... ref_labels, ... est_labels) = mir_eval.util.merge_labeled_intervals( ... ref_intervals, ref_labels, est_intervals, est_labels) >>> durations = mir_eval.util.intervals_to_durations(intervals) >>> # Here, we're using the "thirds" function to compare labels >>> # but any of the comparison functions would work. >>> comparisons = mir_eval.chord.thirds(ref_labels, est_labels) >>> score = mir_eval.chord.weighted_accuracy(comparisons, durations) Parameters ---------- comparisons : np.ndarray List of chord comparison scores, in [0, 1] or -1 weights : np.ndarray Weights (not necessarily normalized) for each comparison. This can be a list of interval durations Returns ------- score : float Weighted accuracy """ N = len(comparisons) # There should be as many weights as comparisons if weights.shape[0] != N: raise ValueError('weights and comparisons should be of the same' ' length. len(weights) = {} but len(comparisons)' ' = {}'.format(weights.shape[0], N)) if (weights < 0).any(): raise ValueError('Weights should all be positive.') if np.sum(weights) == 0: warnings.warn('No nonzero weights, returning 0') return 0 # Find all comparison scores which are valid valid_idx = (comparisons >= 0) # If no comparable chords were provided, warn and return 0 if valid_idx.sum() == 0: warnings.warn("No reference chords were comparable " "to estimated chords, returning 0.") return 0 # Remove any uncomparable labels comparisons = comparisons[valid_idx] weights = weights[valid_idx] # Normalize the weights total_weight = float(np.sum(weights)) normalized_weights = np.asarray(weights, dtype=float)/total_weight # Score is the sum of all weighted comparisons return np.sum(comparisons*normalized_weights)
[ "def", "weighted_accuracy", "(", "comparisons", ",", "weights", ")", ":", "N", "=", "len", "(", "comparisons", ")", "# There should be as many weights as comparisons", "if", "weights", ".", "shape", "[", "0", "]", "!=", "N", ":", "raise", "ValueError", "(", "'weights and comparisons should be of the same'", "' length. len(weights) = {} but len(comparisons)'", "' = {}'", ".", "format", "(", "weights", ".", "shape", "[", "0", "]", ",", "N", ")", ")", "if", "(", "weights", "<", "0", ")", ".", "any", "(", ")", ":", "raise", "ValueError", "(", "'Weights should all be positive.'", ")", "if", "np", ".", "sum", "(", "weights", ")", "==", "0", ":", "warnings", ".", "warn", "(", "'No nonzero weights, returning 0'", ")", "return", "0", "# Find all comparison scores which are valid", "valid_idx", "=", "(", "comparisons", ">=", "0", ")", "# If no comparable chords were provided, warn and return 0", "if", "valid_idx", ".", "sum", "(", ")", "==", "0", ":", "warnings", ".", "warn", "(", "\"No reference chords were comparable \"", "\"to estimated chords, returning 0.\"", ")", "return", "0", "# Remove any uncomparable labels", "comparisons", "=", "comparisons", "[", "valid_idx", "]", "weights", "=", "weights", "[", "valid_idx", "]", "# Normalize the weights", "total_weight", "=", "float", "(", "np", ".", "sum", "(", "weights", ")", ")", "normalized_weights", "=", "np", ".", "asarray", "(", "weights", ",", "dtype", "=", "float", ")", "/", "total_weight", "# Score is the sum of all weighted comparisons", "return", "np", ".", "sum", "(", "comparisons", "*", "normalized_weights", ")" ]
Compute the weighted accuracy of a list of chord comparisons. Examples -------- >>> (ref_intervals, ... ref_labels) = mir_eval.io.load_labeled_intervals('ref.lab') >>> (est_intervals, ... est_labels) = mir_eval.io.load_labeled_intervals('est.lab') >>> est_intervals, est_labels = mir_eval.util.adjust_intervals( ... est_intervals, est_labels, ref_intervals.min(), ... ref_intervals.max(), mir_eval.chord.NO_CHORD, ... mir_eval.chord.NO_CHORD) >>> (intervals, ... ref_labels, ... est_labels) = mir_eval.util.merge_labeled_intervals( ... ref_intervals, ref_labels, est_intervals, est_labels) >>> durations = mir_eval.util.intervals_to_durations(intervals) >>> # Here, we're using the "thirds" function to compare labels >>> # but any of the comparison functions would work. >>> comparisons = mir_eval.chord.thirds(ref_labels, est_labels) >>> score = mir_eval.chord.weighted_accuracy(comparisons, durations) Parameters ---------- comparisons : np.ndarray List of chord comparison scores, in [0, 1] or -1 weights : np.ndarray Weights (not necessarily normalized) for each comparison. This can be a list of interval durations Returns ------- score : float Weighted accuracy
[ "Compute", "the", "weighted", "accuracy", "of", "a", "list", "of", "chord", "comparisons", "." ]
f41c8dafaea04b411252a516d1965af43c7d531b
https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/chord.py#L647-L709
train
craffel/mir_eval
mir_eval/chord.py
thirds
def thirds(reference_labels, estimated_labels): """Compare chords along root & third relationships. Examples -------- >>> (ref_intervals, ... ref_labels) = mir_eval.io.load_labeled_intervals('ref.lab') >>> (est_intervals, ... est_labels) = mir_eval.io.load_labeled_intervals('est.lab') >>> est_intervals, est_labels = mir_eval.util.adjust_intervals( ... est_intervals, est_labels, ref_intervals.min(), ... ref_intervals.max(), mir_eval.chord.NO_CHORD, ... mir_eval.chord.NO_CHORD) >>> (intervals, ... ref_labels, ... est_labels) = mir_eval.util.merge_labeled_intervals( ... ref_intervals, ref_labels, est_intervals, est_labels) >>> durations = mir_eval.util.intervals_to_durations(intervals) >>> comparisons = mir_eval.chord.thirds(ref_labels, est_labels) >>> score = mir_eval.chord.weighted_accuracy(comparisons, durations) Parameters ---------- reference_labels : list, len=n Reference chord labels to score against. estimated_labels : list, len=n Estimated chord labels to score against. Returns ------- comparison_scores : np.ndarray, shape=(n,), dtype=float Comparison scores, in [0.0, 1.0] """ validate(reference_labels, estimated_labels) ref_roots, ref_semitones = encode_many(reference_labels, False)[:2] est_roots, est_semitones = encode_many(estimated_labels, False)[:2] eq_roots = ref_roots == est_roots eq_thirds = ref_semitones[:, 3] == est_semitones[:, 3] comparison_scores = (eq_roots * eq_thirds).astype(np.float) # Ignore 'X' chords comparison_scores[np.any(ref_semitones < 0, axis=1)] = -1.0 return comparison_scores
python
def thirds(reference_labels, estimated_labels): """Compare chords along root & third relationships. Examples -------- >>> (ref_intervals, ... ref_labels) = mir_eval.io.load_labeled_intervals('ref.lab') >>> (est_intervals, ... est_labels) = mir_eval.io.load_labeled_intervals('est.lab') >>> est_intervals, est_labels = mir_eval.util.adjust_intervals( ... est_intervals, est_labels, ref_intervals.min(), ... ref_intervals.max(), mir_eval.chord.NO_CHORD, ... mir_eval.chord.NO_CHORD) >>> (intervals, ... ref_labels, ... est_labels) = mir_eval.util.merge_labeled_intervals( ... ref_intervals, ref_labels, est_intervals, est_labels) >>> durations = mir_eval.util.intervals_to_durations(intervals) >>> comparisons = mir_eval.chord.thirds(ref_labels, est_labels) >>> score = mir_eval.chord.weighted_accuracy(comparisons, durations) Parameters ---------- reference_labels : list, len=n Reference chord labels to score against. estimated_labels : list, len=n Estimated chord labels to score against. Returns ------- comparison_scores : np.ndarray, shape=(n,), dtype=float Comparison scores, in [0.0, 1.0] """ validate(reference_labels, estimated_labels) ref_roots, ref_semitones = encode_many(reference_labels, False)[:2] est_roots, est_semitones = encode_many(estimated_labels, False)[:2] eq_roots = ref_roots == est_roots eq_thirds = ref_semitones[:, 3] == est_semitones[:, 3] comparison_scores = (eq_roots * eq_thirds).astype(np.float) # Ignore 'X' chords comparison_scores[np.any(ref_semitones < 0, axis=1)] = -1.0 return comparison_scores
[ "def", "thirds", "(", "reference_labels", ",", "estimated_labels", ")", ":", "validate", "(", "reference_labels", ",", "estimated_labels", ")", "ref_roots", ",", "ref_semitones", "=", "encode_many", "(", "reference_labels", ",", "False", ")", "[", ":", "2", "]", "est_roots", ",", "est_semitones", "=", "encode_many", "(", "estimated_labels", ",", "False", ")", "[", ":", "2", "]", "eq_roots", "=", "ref_roots", "==", "est_roots", "eq_thirds", "=", "ref_semitones", "[", ":", ",", "3", "]", "==", "est_semitones", "[", ":", ",", "3", "]", "comparison_scores", "=", "(", "eq_roots", "*", "eq_thirds", ")", ".", "astype", "(", "np", ".", "float", ")", "# Ignore 'X' chords", "comparison_scores", "[", "np", ".", "any", "(", "ref_semitones", "<", "0", ",", "axis", "=", "1", ")", "]", "=", "-", "1.0", "return", "comparison_scores" ]
Compare chords along root & third relationships. Examples -------- >>> (ref_intervals, ... ref_labels) = mir_eval.io.load_labeled_intervals('ref.lab') >>> (est_intervals, ... est_labels) = mir_eval.io.load_labeled_intervals('est.lab') >>> est_intervals, est_labels = mir_eval.util.adjust_intervals( ... est_intervals, est_labels, ref_intervals.min(), ... ref_intervals.max(), mir_eval.chord.NO_CHORD, ... mir_eval.chord.NO_CHORD) >>> (intervals, ... ref_labels, ... est_labels) = mir_eval.util.merge_labeled_intervals( ... ref_intervals, ref_labels, est_intervals, est_labels) >>> durations = mir_eval.util.intervals_to_durations(intervals) >>> comparisons = mir_eval.chord.thirds(ref_labels, est_labels) >>> score = mir_eval.chord.weighted_accuracy(comparisons, durations) Parameters ---------- reference_labels : list, len=n Reference chord labels to score against. estimated_labels : list, len=n Estimated chord labels to score against. Returns ------- comparison_scores : np.ndarray, shape=(n,), dtype=float Comparison scores, in [0.0, 1.0]
[ "Compare", "chords", "along", "root", "&", "third", "relationships", "." ]
f41c8dafaea04b411252a516d1965af43c7d531b
https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/chord.py#L712-L756
train
craffel/mir_eval
mir_eval/chord.py
thirds_inv
def thirds_inv(reference_labels, estimated_labels): """Score chords along root, third, & bass relationships. Examples -------- >>> (ref_intervals, ... ref_labels) = mir_eval.io.load_labeled_intervals('ref.lab') >>> (est_intervals, ... est_labels) = mir_eval.io.load_labeled_intervals('est.lab') >>> est_intervals, est_labels = mir_eval.util.adjust_intervals( ... est_intervals, est_labels, ref_intervals.min(), ... ref_intervals.max(), mir_eval.chord.NO_CHORD, ... mir_eval.chord.NO_CHORD) >>> (intervals, ... ref_labels, ... est_labels) = mir_eval.util.merge_labeled_intervals( ... ref_intervals, ref_labels, est_intervals, est_labels) >>> durations = mir_eval.util.intervals_to_durations(intervals) >>> comparisons = mir_eval.chord.thirds_inv(ref_labels, est_labels) >>> score = mir_eval.chord.weighted_accuracy(comparisons, durations) Parameters ---------- reference_labels : list, len=n Reference chord labels to score against. estimated_labels : list, len=n Estimated chord labels to score against. Returns ------- scores : np.ndarray, shape=(n,), dtype=float Comparison scores, in [0.0, 1.0] """ validate(reference_labels, estimated_labels) ref_roots, ref_semitones, ref_bass = encode_many(reference_labels, False) est_roots, est_semitones, est_bass = encode_many(estimated_labels, False) eq_root = ref_roots == est_roots eq_bass = ref_bass == est_bass eq_third = ref_semitones[:, 3] == est_semitones[:, 3] comparison_scores = (eq_root * eq_third * eq_bass).astype(np.float) # Ignore 'X' chords comparison_scores[np.any(ref_semitones < 0, axis=1)] = -1.0 return comparison_scores
python
def thirds_inv(reference_labels, estimated_labels): """Score chords along root, third, & bass relationships. Examples -------- >>> (ref_intervals, ... ref_labels) = mir_eval.io.load_labeled_intervals('ref.lab') >>> (est_intervals, ... est_labels) = mir_eval.io.load_labeled_intervals('est.lab') >>> est_intervals, est_labels = mir_eval.util.adjust_intervals( ... est_intervals, est_labels, ref_intervals.min(), ... ref_intervals.max(), mir_eval.chord.NO_CHORD, ... mir_eval.chord.NO_CHORD) >>> (intervals, ... ref_labels, ... est_labels) = mir_eval.util.merge_labeled_intervals( ... ref_intervals, ref_labels, est_intervals, est_labels) >>> durations = mir_eval.util.intervals_to_durations(intervals) >>> comparisons = mir_eval.chord.thirds_inv(ref_labels, est_labels) >>> score = mir_eval.chord.weighted_accuracy(comparisons, durations) Parameters ---------- reference_labels : list, len=n Reference chord labels to score against. estimated_labels : list, len=n Estimated chord labels to score against. Returns ------- scores : np.ndarray, shape=(n,), dtype=float Comparison scores, in [0.0, 1.0] """ validate(reference_labels, estimated_labels) ref_roots, ref_semitones, ref_bass = encode_many(reference_labels, False) est_roots, est_semitones, est_bass = encode_many(estimated_labels, False) eq_root = ref_roots == est_roots eq_bass = ref_bass == est_bass eq_third = ref_semitones[:, 3] == est_semitones[:, 3] comparison_scores = (eq_root * eq_third * eq_bass).astype(np.float) # Ignore 'X' chords comparison_scores[np.any(ref_semitones < 0, axis=1)] = -1.0 return comparison_scores
[ "def", "thirds_inv", "(", "reference_labels", ",", "estimated_labels", ")", ":", "validate", "(", "reference_labels", ",", "estimated_labels", ")", "ref_roots", ",", "ref_semitones", ",", "ref_bass", "=", "encode_many", "(", "reference_labels", ",", "False", ")", "est_roots", ",", "est_semitones", ",", "est_bass", "=", "encode_many", "(", "estimated_labels", ",", "False", ")", "eq_root", "=", "ref_roots", "==", "est_roots", "eq_bass", "=", "ref_bass", "==", "est_bass", "eq_third", "=", "ref_semitones", "[", ":", ",", "3", "]", "==", "est_semitones", "[", ":", ",", "3", "]", "comparison_scores", "=", "(", "eq_root", "*", "eq_third", "*", "eq_bass", ")", ".", "astype", "(", "np", ".", "float", ")", "# Ignore 'X' chords", "comparison_scores", "[", "np", ".", "any", "(", "ref_semitones", "<", "0", ",", "axis", "=", "1", ")", "]", "=", "-", "1.0", "return", "comparison_scores" ]
Score chords along root, third, & bass relationships. Examples -------- >>> (ref_intervals, ... ref_labels) = mir_eval.io.load_labeled_intervals('ref.lab') >>> (est_intervals, ... est_labels) = mir_eval.io.load_labeled_intervals('est.lab') >>> est_intervals, est_labels = mir_eval.util.adjust_intervals( ... est_intervals, est_labels, ref_intervals.min(), ... ref_intervals.max(), mir_eval.chord.NO_CHORD, ... mir_eval.chord.NO_CHORD) >>> (intervals, ... ref_labels, ... est_labels) = mir_eval.util.merge_labeled_intervals( ... ref_intervals, ref_labels, est_intervals, est_labels) >>> durations = mir_eval.util.intervals_to_durations(intervals) >>> comparisons = mir_eval.chord.thirds_inv(ref_labels, est_labels) >>> score = mir_eval.chord.weighted_accuracy(comparisons, durations) Parameters ---------- reference_labels : list, len=n Reference chord labels to score against. estimated_labels : list, len=n Estimated chord labels to score against. Returns ------- scores : np.ndarray, shape=(n,), dtype=float Comparison scores, in [0.0, 1.0]
[ "Score", "chords", "along", "root", "third", "&", "bass", "relationships", "." ]
f41c8dafaea04b411252a516d1965af43c7d531b
https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/chord.py#L759-L804
train
craffel/mir_eval
mir_eval/chord.py
root
def root(reference_labels, estimated_labels): """Compare chords according to roots. Examples -------- >>> (ref_intervals, ... ref_labels) = mir_eval.io.load_labeled_intervals('ref.lab') >>> (est_intervals, ... est_labels) = mir_eval.io.load_labeled_intervals('est.lab') >>> est_intervals, est_labels = mir_eval.util.adjust_intervals( ... est_intervals, est_labels, ref_intervals.min(), ... ref_intervals.max(), mir_eval.chord.NO_CHORD, ... mir_eval.chord.NO_CHORD) >>> (intervals, ... ref_labels, ... est_labels) = mir_eval.util.merge_labeled_intervals( ... ref_intervals, ref_labels, est_intervals, est_labels) >>> durations = mir_eval.util.intervals_to_durations(intervals) >>> comparisons = mir_eval.chord.root(ref_labels, est_labels) >>> score = mir_eval.chord.weighted_accuracy(comparisons, durations) Parameters ---------- reference_labels : list, len=n Reference chord labels to score against. estimated_labels : list, len=n Estimated chord labels to score against. Returns ------- comparison_scores : np.ndarray, shape=(n,), dtype=float Comparison scores, in [0.0, 1.0], or -1 if the comparison is out of gamut. """ validate(reference_labels, estimated_labels) ref_roots, ref_semitones = encode_many(reference_labels, False)[:2] est_roots = encode_many(estimated_labels, False)[0] comparison_scores = (ref_roots == est_roots).astype(np.float) # Ignore 'X' chords comparison_scores[np.any(ref_semitones < 0, axis=1)] = -1.0 return comparison_scores
python
def root(reference_labels, estimated_labels): """Compare chords according to roots. Examples -------- >>> (ref_intervals, ... ref_labels) = mir_eval.io.load_labeled_intervals('ref.lab') >>> (est_intervals, ... est_labels) = mir_eval.io.load_labeled_intervals('est.lab') >>> est_intervals, est_labels = mir_eval.util.adjust_intervals( ... est_intervals, est_labels, ref_intervals.min(), ... ref_intervals.max(), mir_eval.chord.NO_CHORD, ... mir_eval.chord.NO_CHORD) >>> (intervals, ... ref_labels, ... est_labels) = mir_eval.util.merge_labeled_intervals( ... ref_intervals, ref_labels, est_intervals, est_labels) >>> durations = mir_eval.util.intervals_to_durations(intervals) >>> comparisons = mir_eval.chord.root(ref_labels, est_labels) >>> score = mir_eval.chord.weighted_accuracy(comparisons, durations) Parameters ---------- reference_labels : list, len=n Reference chord labels to score against. estimated_labels : list, len=n Estimated chord labels to score against. Returns ------- comparison_scores : np.ndarray, shape=(n,), dtype=float Comparison scores, in [0.0, 1.0], or -1 if the comparison is out of gamut. """ validate(reference_labels, estimated_labels) ref_roots, ref_semitones = encode_many(reference_labels, False)[:2] est_roots = encode_many(estimated_labels, False)[0] comparison_scores = (ref_roots == est_roots).astype(np.float) # Ignore 'X' chords comparison_scores[np.any(ref_semitones < 0, axis=1)] = -1.0 return comparison_scores
[ "def", "root", "(", "reference_labels", ",", "estimated_labels", ")", ":", "validate", "(", "reference_labels", ",", "estimated_labels", ")", "ref_roots", ",", "ref_semitones", "=", "encode_many", "(", "reference_labels", ",", "False", ")", "[", ":", "2", "]", "est_roots", "=", "encode_many", "(", "estimated_labels", ",", "False", ")", "[", "0", "]", "comparison_scores", "=", "(", "ref_roots", "==", "est_roots", ")", ".", "astype", "(", "np", ".", "float", ")", "# Ignore 'X' chords", "comparison_scores", "[", "np", ".", "any", "(", "ref_semitones", "<", "0", ",", "axis", "=", "1", ")", "]", "=", "-", "1.0", "return", "comparison_scores" ]
Compare chords according to roots. Examples -------- >>> (ref_intervals, ... ref_labels) = mir_eval.io.load_labeled_intervals('ref.lab') >>> (est_intervals, ... est_labels) = mir_eval.io.load_labeled_intervals('est.lab') >>> est_intervals, est_labels = mir_eval.util.adjust_intervals( ... est_intervals, est_labels, ref_intervals.min(), ... ref_intervals.max(), mir_eval.chord.NO_CHORD, ... mir_eval.chord.NO_CHORD) >>> (intervals, ... ref_labels, ... est_labels) = mir_eval.util.merge_labeled_intervals( ... ref_intervals, ref_labels, est_intervals, est_labels) >>> durations = mir_eval.util.intervals_to_durations(intervals) >>> comparisons = mir_eval.chord.root(ref_labels, est_labels) >>> score = mir_eval.chord.weighted_accuracy(comparisons, durations) Parameters ---------- reference_labels : list, len=n Reference chord labels to score against. estimated_labels : list, len=n Estimated chord labels to score against. Returns ------- comparison_scores : np.ndarray, shape=(n,), dtype=float Comparison scores, in [0.0, 1.0], or -1 if the comparison is out of gamut.
[ "Compare", "chords", "according", "to", "roots", "." ]
f41c8dafaea04b411252a516d1965af43c7d531b
https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/chord.py#L999-L1042
train
craffel/mir_eval
mir_eval/chord.py
mirex
def mirex(reference_labels, estimated_labels): """Compare chords along MIREX rules. Examples -------- >>> (ref_intervals, ... ref_labels) = mir_eval.io.load_labeled_intervals('ref.lab') >>> (est_intervals, ... est_labels) = mir_eval.io.load_labeled_intervals('est.lab') >>> est_intervals, est_labels = mir_eval.util.adjust_intervals( ... est_intervals, est_labels, ref_intervals.min(), ... ref_intervals.max(), mir_eval.chord.NO_CHORD, ... mir_eval.chord.NO_CHORD) >>> (intervals, ... ref_labels, ... est_labels) = mir_eval.util.merge_labeled_intervals( ... ref_intervals, ref_labels, est_intervals, est_labels) >>> durations = mir_eval.util.intervals_to_durations(intervals) >>> comparisons = mir_eval.chord.mirex(ref_labels, est_labels) >>> score = mir_eval.chord.weighted_accuracy(comparisons, durations) Parameters ---------- reference_labels : list, len=n Reference chord labels to score against. estimated_labels : list, len=n Estimated chord labels to score against. Returns ------- comparison_scores : np.ndarray, shape=(n,), dtype=float Comparison scores, in [0.0, 1.0] """ validate(reference_labels, estimated_labels) # TODO(?): Should this be an argument? min_intersection = 3 ref_data = encode_many(reference_labels, False) ref_chroma = rotate_bitmaps_to_roots(ref_data[1], ref_data[0]) est_data = encode_many(estimated_labels, False) est_chroma = rotate_bitmaps_to_roots(est_data[1], est_data[0]) eq_chroma = (ref_chroma * est_chroma).sum(axis=-1) # Chroma matching for set bits comparison_scores = (eq_chroma >= min_intersection).astype(np.float) # No-chord matching; match -1 roots, SKIP_CHORDS dropped next no_root = np.logical_and(ref_data[0] == -1, est_data[0] == -1) comparison_scores[no_root] = 1.0 # Skip chords where the number of active semitones `n` is # 0 < n < `min_intersection`. ref_semitone_count = (ref_data[1] > 0).sum(axis=1) skip_idx = np.logical_and(ref_semitone_count > 0, ref_semitone_count < min_intersection) # Also ignore 'X' chords. np.logical_or(skip_idx, np.any(ref_data[1] < 0, axis=1), skip_idx) comparison_scores[skip_idx] = -1.0 return comparison_scores
python
def mirex(reference_labels, estimated_labels): """Compare chords along MIREX rules. Examples -------- >>> (ref_intervals, ... ref_labels) = mir_eval.io.load_labeled_intervals('ref.lab') >>> (est_intervals, ... est_labels) = mir_eval.io.load_labeled_intervals('est.lab') >>> est_intervals, est_labels = mir_eval.util.adjust_intervals( ... est_intervals, est_labels, ref_intervals.min(), ... ref_intervals.max(), mir_eval.chord.NO_CHORD, ... mir_eval.chord.NO_CHORD) >>> (intervals, ... ref_labels, ... est_labels) = mir_eval.util.merge_labeled_intervals( ... ref_intervals, ref_labels, est_intervals, est_labels) >>> durations = mir_eval.util.intervals_to_durations(intervals) >>> comparisons = mir_eval.chord.mirex(ref_labels, est_labels) >>> score = mir_eval.chord.weighted_accuracy(comparisons, durations) Parameters ---------- reference_labels : list, len=n Reference chord labels to score against. estimated_labels : list, len=n Estimated chord labels to score against. Returns ------- comparison_scores : np.ndarray, shape=(n,), dtype=float Comparison scores, in [0.0, 1.0] """ validate(reference_labels, estimated_labels) # TODO(?): Should this be an argument? min_intersection = 3 ref_data = encode_many(reference_labels, False) ref_chroma = rotate_bitmaps_to_roots(ref_data[1], ref_data[0]) est_data = encode_many(estimated_labels, False) est_chroma = rotate_bitmaps_to_roots(est_data[1], est_data[0]) eq_chroma = (ref_chroma * est_chroma).sum(axis=-1) # Chroma matching for set bits comparison_scores = (eq_chroma >= min_intersection).astype(np.float) # No-chord matching; match -1 roots, SKIP_CHORDS dropped next no_root = np.logical_and(ref_data[0] == -1, est_data[0] == -1) comparison_scores[no_root] = 1.0 # Skip chords where the number of active semitones `n` is # 0 < n < `min_intersection`. ref_semitone_count = (ref_data[1] > 0).sum(axis=1) skip_idx = np.logical_and(ref_semitone_count > 0, ref_semitone_count < min_intersection) # Also ignore 'X' chords. np.logical_or(skip_idx, np.any(ref_data[1] < 0, axis=1), skip_idx) comparison_scores[skip_idx] = -1.0 return comparison_scores
[ "def", "mirex", "(", "reference_labels", ",", "estimated_labels", ")", ":", "validate", "(", "reference_labels", ",", "estimated_labels", ")", "# TODO(?): Should this be an argument?", "min_intersection", "=", "3", "ref_data", "=", "encode_many", "(", "reference_labels", ",", "False", ")", "ref_chroma", "=", "rotate_bitmaps_to_roots", "(", "ref_data", "[", "1", "]", ",", "ref_data", "[", "0", "]", ")", "est_data", "=", "encode_many", "(", "estimated_labels", ",", "False", ")", "est_chroma", "=", "rotate_bitmaps_to_roots", "(", "est_data", "[", "1", "]", ",", "est_data", "[", "0", "]", ")", "eq_chroma", "=", "(", "ref_chroma", "*", "est_chroma", ")", ".", "sum", "(", "axis", "=", "-", "1", ")", "# Chroma matching for set bits", "comparison_scores", "=", "(", "eq_chroma", ">=", "min_intersection", ")", ".", "astype", "(", "np", ".", "float", ")", "# No-chord matching; match -1 roots, SKIP_CHORDS dropped next", "no_root", "=", "np", ".", "logical_and", "(", "ref_data", "[", "0", "]", "==", "-", "1", ",", "est_data", "[", "0", "]", "==", "-", "1", ")", "comparison_scores", "[", "no_root", "]", "=", "1.0", "# Skip chords where the number of active semitones `n` is", "# 0 < n < `min_intersection`.", "ref_semitone_count", "=", "(", "ref_data", "[", "1", "]", ">", "0", ")", ".", "sum", "(", "axis", "=", "1", ")", "skip_idx", "=", "np", ".", "logical_and", "(", "ref_semitone_count", ">", "0", ",", "ref_semitone_count", "<", "min_intersection", ")", "# Also ignore 'X' chords.", "np", ".", "logical_or", "(", "skip_idx", ",", "np", ".", "any", "(", "ref_data", "[", "1", "]", "<", "0", ",", "axis", "=", "1", ")", ",", "skip_idx", ")", "comparison_scores", "[", "skip_idx", "]", "=", "-", "1.0", "return", "comparison_scores" ]
Compare chords along MIREX rules. Examples -------- >>> (ref_intervals, ... ref_labels) = mir_eval.io.load_labeled_intervals('ref.lab') >>> (est_intervals, ... est_labels) = mir_eval.io.load_labeled_intervals('est.lab') >>> est_intervals, est_labels = mir_eval.util.adjust_intervals( ... est_intervals, est_labels, ref_intervals.min(), ... ref_intervals.max(), mir_eval.chord.NO_CHORD, ... mir_eval.chord.NO_CHORD) >>> (intervals, ... ref_labels, ... est_labels) = mir_eval.util.merge_labeled_intervals( ... ref_intervals, ref_labels, est_intervals, est_labels) >>> durations = mir_eval.util.intervals_to_durations(intervals) >>> comparisons = mir_eval.chord.mirex(ref_labels, est_labels) >>> score = mir_eval.chord.weighted_accuracy(comparisons, durations) Parameters ---------- reference_labels : list, len=n Reference chord labels to score against. estimated_labels : list, len=n Estimated chord labels to score against. Returns ------- comparison_scores : np.ndarray, shape=(n,), dtype=float Comparison scores, in [0.0, 1.0]
[ "Compare", "chords", "along", "MIREX", "rules", "." ]
f41c8dafaea04b411252a516d1965af43c7d531b
https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/chord.py#L1045-L1104
train
craffel/mir_eval
mir_eval/chord.py
seg
def seg(reference_intervals, estimated_intervals): """Compute the MIREX 'MeanSeg' score. Examples -------- >>> (ref_intervals, ... ref_labels) = mir_eval.io.load_labeled_intervals('ref.lab') >>> (est_intervals, ... est_labels) = mir_eval.io.load_labeled_intervals('est.lab') >>> score = mir_eval.chord.seg(ref_intervals, est_intervals) Parameters ---------- reference_intervals : np.ndarray, shape=(n, 2), dtype=float Reference chord intervals to score against. estimated_intervals : np.ndarray, shape=(m, 2), dtype=float Estimated chord intervals to score against. Returns ------- segmentation score : float Comparison score, in [0.0, 1.0], where 1.0 means perfect segmentation. """ return min(underseg(reference_intervals, estimated_intervals), overseg(reference_intervals, estimated_intervals))
python
def seg(reference_intervals, estimated_intervals): """Compute the MIREX 'MeanSeg' score. Examples -------- >>> (ref_intervals, ... ref_labels) = mir_eval.io.load_labeled_intervals('ref.lab') >>> (est_intervals, ... est_labels) = mir_eval.io.load_labeled_intervals('est.lab') >>> score = mir_eval.chord.seg(ref_intervals, est_intervals) Parameters ---------- reference_intervals : np.ndarray, shape=(n, 2), dtype=float Reference chord intervals to score against. estimated_intervals : np.ndarray, shape=(m, 2), dtype=float Estimated chord intervals to score against. Returns ------- segmentation score : float Comparison score, in [0.0, 1.0], where 1.0 means perfect segmentation. """ return min(underseg(reference_intervals, estimated_intervals), overseg(reference_intervals, estimated_intervals))
[ "def", "seg", "(", "reference_intervals", ",", "estimated_intervals", ")", ":", "return", "min", "(", "underseg", "(", "reference_intervals", ",", "estimated_intervals", ")", ",", "overseg", "(", "reference_intervals", ",", "estimated_intervals", ")", ")" ]
Compute the MIREX 'MeanSeg' score. Examples -------- >>> (ref_intervals, ... ref_labels) = mir_eval.io.load_labeled_intervals('ref.lab') >>> (est_intervals, ... est_labels) = mir_eval.io.load_labeled_intervals('est.lab') >>> score = mir_eval.chord.seg(ref_intervals, est_intervals) Parameters ---------- reference_intervals : np.ndarray, shape=(n, 2), dtype=float Reference chord intervals to score against. estimated_intervals : np.ndarray, shape=(m, 2), dtype=float Estimated chord intervals to score against. Returns ------- segmentation score : float Comparison score, in [0.0, 1.0], where 1.0 means perfect segmentation.
[ "Compute", "the", "MIREX", "MeanSeg", "score", "." ]
f41c8dafaea04b411252a516d1965af43c7d531b
https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/chord.py#L1455-L1480
train
craffel/mir_eval
mir_eval/chord.py
merge_chord_intervals
def merge_chord_intervals(intervals, labels): """ Merge consecutive chord intervals if they represent the same chord. Parameters ---------- intervals : np.ndarray, shape=(n, 2), dtype=float Chord intervals to be merged, in the format returned by :func:`mir_eval.io.load_labeled_intervals`. labels : list, shape=(n,) Chord labels to be merged, in the format returned by :func:`mir_eval.io.load_labeled_intervals`. Returns ------- merged_ivs : np.ndarray, shape=(k, 2), dtype=float Merged chord intervals, k <= n """ roots, semitones, basses = encode_many(labels, True) merged_ivs = [] prev_rt = None prev_st = None prev_ba = None for s, e, rt, st, ba in zip(intervals[:, 0], intervals[:, 1], roots, semitones, basses): if rt != prev_rt or (st != prev_st).any() or ba != prev_ba: prev_rt, prev_st, prev_ba = rt, st, ba merged_ivs.append([s, e]) else: merged_ivs[-1][-1] = e return np.array(merged_ivs)
python
def merge_chord_intervals(intervals, labels): """ Merge consecutive chord intervals if they represent the same chord. Parameters ---------- intervals : np.ndarray, shape=(n, 2), dtype=float Chord intervals to be merged, in the format returned by :func:`mir_eval.io.load_labeled_intervals`. labels : list, shape=(n,) Chord labels to be merged, in the format returned by :func:`mir_eval.io.load_labeled_intervals`. Returns ------- merged_ivs : np.ndarray, shape=(k, 2), dtype=float Merged chord intervals, k <= n """ roots, semitones, basses = encode_many(labels, True) merged_ivs = [] prev_rt = None prev_st = None prev_ba = None for s, e, rt, st, ba in zip(intervals[:, 0], intervals[:, 1], roots, semitones, basses): if rt != prev_rt or (st != prev_st).any() or ba != prev_ba: prev_rt, prev_st, prev_ba = rt, st, ba merged_ivs.append([s, e]) else: merged_ivs[-1][-1] = e return np.array(merged_ivs)
[ "def", "merge_chord_intervals", "(", "intervals", ",", "labels", ")", ":", "roots", ",", "semitones", ",", "basses", "=", "encode_many", "(", "labels", ",", "True", ")", "merged_ivs", "=", "[", "]", "prev_rt", "=", "None", "prev_st", "=", "None", "prev_ba", "=", "None", "for", "s", ",", "e", ",", "rt", ",", "st", ",", "ba", "in", "zip", "(", "intervals", "[", ":", ",", "0", "]", ",", "intervals", "[", ":", ",", "1", "]", ",", "roots", ",", "semitones", ",", "basses", ")", ":", "if", "rt", "!=", "prev_rt", "or", "(", "st", "!=", "prev_st", ")", ".", "any", "(", ")", "or", "ba", "!=", "prev_ba", ":", "prev_rt", ",", "prev_st", ",", "prev_ba", "=", "rt", ",", "st", ",", "ba", "merged_ivs", ".", "append", "(", "[", "s", ",", "e", "]", ")", "else", ":", "merged_ivs", "[", "-", "1", "]", "[", "-", "1", "]", "=", "e", "return", "np", ".", "array", "(", "merged_ivs", ")" ]
Merge consecutive chord intervals if they represent the same chord. Parameters ---------- intervals : np.ndarray, shape=(n, 2), dtype=float Chord intervals to be merged, in the format returned by :func:`mir_eval.io.load_labeled_intervals`. labels : list, shape=(n,) Chord labels to be merged, in the format returned by :func:`mir_eval.io.load_labeled_intervals`. Returns ------- merged_ivs : np.ndarray, shape=(k, 2), dtype=float Merged chord intervals, k <= n
[ "Merge", "consecutive", "chord", "intervals", "if", "they", "represent", "the", "same", "chord", "." ]
f41c8dafaea04b411252a516d1965af43c7d531b
https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/chord.py#L1483-L1514
train
craffel/mir_eval
mir_eval/chord.py
evaluate
def evaluate(ref_intervals, ref_labels, est_intervals, est_labels, **kwargs): """Computes weighted accuracy for all comparison functions for the given reference and estimated annotations. Examples -------- >>> (ref_intervals, ... ref_labels) = mir_eval.io.load_labeled_intervals('ref.lab') >>> (est_intervals, ... est_labels) = mir_eval.io.load_labeled_intervals('est.lab') >>> scores = mir_eval.chord.evaluate(ref_intervals, ref_labels, ... est_intervals, est_labels) Parameters ---------- ref_intervals : np.ndarray, shape=(n, 2) Reference chord intervals, in the format returned by :func:`mir_eval.io.load_labeled_intervals`. ref_labels : list, shape=(n,) reference chord labels, in the format returned by :func:`mir_eval.io.load_labeled_intervals`. est_intervals : np.ndarray, shape=(m, 2) estimated chord intervals, in the format returned by :func:`mir_eval.io.load_labeled_intervals`. est_labels : list, shape=(m,) estimated chord labels, in the format returned by :func:`mir_eval.io.load_labeled_intervals`. kwargs Additional keyword arguments which will be passed to the appropriate metric or preprocessing functions. Returns ------- scores : dict Dictionary of scores, where the key is the metric name (str) and the value is the (float) score achieved. """ # Append or crop estimated intervals so their span is the same as reference est_intervals, est_labels = util.adjust_intervals( est_intervals, est_labels, ref_intervals.min(), ref_intervals.max(), NO_CHORD, NO_CHORD) # use merged intervals for segmentation evaluation merged_ref_intervals = merge_chord_intervals(ref_intervals, ref_labels) merged_est_intervals = merge_chord_intervals(est_intervals, est_labels) # Adjust the labels so that they span the same intervals intervals, ref_labels, est_labels = util.merge_labeled_intervals( ref_intervals, ref_labels, est_intervals, est_labels) # Convert intervals to durations (used as weights) durations = util.intervals_to_durations(intervals) # Store scores for each comparison function scores = collections.OrderedDict() scores['thirds'] = weighted_accuracy(thirds(ref_labels, est_labels), durations) scores['thirds_inv'] = weighted_accuracy(thirds_inv(ref_labels, est_labels), durations) scores['triads'] = weighted_accuracy(triads(ref_labels, est_labels), durations) scores['triads_inv'] = weighted_accuracy(triads_inv(ref_labels, est_labels), durations) scores['tetrads'] = weighted_accuracy(tetrads(ref_labels, est_labels), durations) scores['tetrads_inv'] = weighted_accuracy(tetrads_inv(ref_labels, est_labels), durations) scores['root'] = weighted_accuracy(root(ref_labels, est_labels), durations) scores['mirex'] = weighted_accuracy(mirex(ref_labels, est_labels), durations) scores['majmin'] = weighted_accuracy(majmin(ref_labels, est_labels), durations) scores['majmin_inv'] = weighted_accuracy(majmin_inv(ref_labels, est_labels), durations) scores['sevenths'] = weighted_accuracy(sevenths(ref_labels, est_labels), durations) scores['sevenths_inv'] = weighted_accuracy(sevenths_inv(ref_labels, est_labels), durations) scores['underseg'] = underseg(merged_ref_intervals, merged_est_intervals) scores['overseg'] = overseg(merged_ref_intervals, merged_est_intervals) scores['seg'] = min(scores['overseg'], scores['underseg']) return scores
python
def evaluate(ref_intervals, ref_labels, est_intervals, est_labels, **kwargs): """Computes weighted accuracy for all comparison functions for the given reference and estimated annotations. Examples -------- >>> (ref_intervals, ... ref_labels) = mir_eval.io.load_labeled_intervals('ref.lab') >>> (est_intervals, ... est_labels) = mir_eval.io.load_labeled_intervals('est.lab') >>> scores = mir_eval.chord.evaluate(ref_intervals, ref_labels, ... est_intervals, est_labels) Parameters ---------- ref_intervals : np.ndarray, shape=(n, 2) Reference chord intervals, in the format returned by :func:`mir_eval.io.load_labeled_intervals`. ref_labels : list, shape=(n,) reference chord labels, in the format returned by :func:`mir_eval.io.load_labeled_intervals`. est_intervals : np.ndarray, shape=(m, 2) estimated chord intervals, in the format returned by :func:`mir_eval.io.load_labeled_intervals`. est_labels : list, shape=(m,) estimated chord labels, in the format returned by :func:`mir_eval.io.load_labeled_intervals`. kwargs Additional keyword arguments which will be passed to the appropriate metric or preprocessing functions. Returns ------- scores : dict Dictionary of scores, where the key is the metric name (str) and the value is the (float) score achieved. """ # Append or crop estimated intervals so their span is the same as reference est_intervals, est_labels = util.adjust_intervals( est_intervals, est_labels, ref_intervals.min(), ref_intervals.max(), NO_CHORD, NO_CHORD) # use merged intervals for segmentation evaluation merged_ref_intervals = merge_chord_intervals(ref_intervals, ref_labels) merged_est_intervals = merge_chord_intervals(est_intervals, est_labels) # Adjust the labels so that they span the same intervals intervals, ref_labels, est_labels = util.merge_labeled_intervals( ref_intervals, ref_labels, est_intervals, est_labels) # Convert intervals to durations (used as weights) durations = util.intervals_to_durations(intervals) # Store scores for each comparison function scores = collections.OrderedDict() scores['thirds'] = weighted_accuracy(thirds(ref_labels, est_labels), durations) scores['thirds_inv'] = weighted_accuracy(thirds_inv(ref_labels, est_labels), durations) scores['triads'] = weighted_accuracy(triads(ref_labels, est_labels), durations) scores['triads_inv'] = weighted_accuracy(triads_inv(ref_labels, est_labels), durations) scores['tetrads'] = weighted_accuracy(tetrads(ref_labels, est_labels), durations) scores['tetrads_inv'] = weighted_accuracy(tetrads_inv(ref_labels, est_labels), durations) scores['root'] = weighted_accuracy(root(ref_labels, est_labels), durations) scores['mirex'] = weighted_accuracy(mirex(ref_labels, est_labels), durations) scores['majmin'] = weighted_accuracy(majmin(ref_labels, est_labels), durations) scores['majmin_inv'] = weighted_accuracy(majmin_inv(ref_labels, est_labels), durations) scores['sevenths'] = weighted_accuracy(sevenths(ref_labels, est_labels), durations) scores['sevenths_inv'] = weighted_accuracy(sevenths_inv(ref_labels, est_labels), durations) scores['underseg'] = underseg(merged_ref_intervals, merged_est_intervals) scores['overseg'] = overseg(merged_ref_intervals, merged_est_intervals) scores['seg'] = min(scores['overseg'], scores['underseg']) return scores
[ "def", "evaluate", "(", "ref_intervals", ",", "ref_labels", ",", "est_intervals", ",", "est_labels", ",", "*", "*", "kwargs", ")", ":", "# Append or crop estimated intervals so their span is the same as reference", "est_intervals", ",", "est_labels", "=", "util", ".", "adjust_intervals", "(", "est_intervals", ",", "est_labels", ",", "ref_intervals", ".", "min", "(", ")", ",", "ref_intervals", ".", "max", "(", ")", ",", "NO_CHORD", ",", "NO_CHORD", ")", "# use merged intervals for segmentation evaluation", "merged_ref_intervals", "=", "merge_chord_intervals", "(", "ref_intervals", ",", "ref_labels", ")", "merged_est_intervals", "=", "merge_chord_intervals", "(", "est_intervals", ",", "est_labels", ")", "# Adjust the labels so that they span the same intervals", "intervals", ",", "ref_labels", ",", "est_labels", "=", "util", ".", "merge_labeled_intervals", "(", "ref_intervals", ",", "ref_labels", ",", "est_intervals", ",", "est_labels", ")", "# Convert intervals to durations (used as weights)", "durations", "=", "util", ".", "intervals_to_durations", "(", "intervals", ")", "# Store scores for each comparison function", "scores", "=", "collections", ".", "OrderedDict", "(", ")", "scores", "[", "'thirds'", "]", "=", "weighted_accuracy", "(", "thirds", "(", "ref_labels", ",", "est_labels", ")", ",", "durations", ")", "scores", "[", "'thirds_inv'", "]", "=", "weighted_accuracy", "(", "thirds_inv", "(", "ref_labels", ",", "est_labels", ")", ",", "durations", ")", "scores", "[", "'triads'", "]", "=", "weighted_accuracy", "(", "triads", "(", "ref_labels", ",", "est_labels", ")", ",", "durations", ")", "scores", "[", "'triads_inv'", "]", "=", "weighted_accuracy", "(", "triads_inv", "(", "ref_labels", ",", "est_labels", ")", ",", "durations", ")", "scores", "[", "'tetrads'", "]", "=", "weighted_accuracy", "(", "tetrads", "(", "ref_labels", ",", "est_labels", ")", ",", "durations", ")", "scores", "[", "'tetrads_inv'", "]", "=", "weighted_accuracy", "(", "tetrads_inv", "(", "ref_labels", ",", "est_labels", ")", ",", "durations", ")", "scores", "[", "'root'", "]", "=", "weighted_accuracy", "(", "root", "(", "ref_labels", ",", "est_labels", ")", ",", "durations", ")", "scores", "[", "'mirex'", "]", "=", "weighted_accuracy", "(", "mirex", "(", "ref_labels", ",", "est_labels", ")", ",", "durations", ")", "scores", "[", "'majmin'", "]", "=", "weighted_accuracy", "(", "majmin", "(", "ref_labels", ",", "est_labels", ")", ",", "durations", ")", "scores", "[", "'majmin_inv'", "]", "=", "weighted_accuracy", "(", "majmin_inv", "(", "ref_labels", ",", "est_labels", ")", ",", "durations", ")", "scores", "[", "'sevenths'", "]", "=", "weighted_accuracy", "(", "sevenths", "(", "ref_labels", ",", "est_labels", ")", ",", "durations", ")", "scores", "[", "'sevenths_inv'", "]", "=", "weighted_accuracy", "(", "sevenths_inv", "(", "ref_labels", ",", "est_labels", ")", ",", "durations", ")", "scores", "[", "'underseg'", "]", "=", "underseg", "(", "merged_ref_intervals", ",", "merged_est_intervals", ")", "scores", "[", "'overseg'", "]", "=", "overseg", "(", "merged_ref_intervals", ",", "merged_est_intervals", ")", "scores", "[", "'seg'", "]", "=", "min", "(", "scores", "[", "'overseg'", "]", ",", "scores", "[", "'underseg'", "]", ")", "return", "scores" ]
Computes weighted accuracy for all comparison functions for the given reference and estimated annotations. Examples -------- >>> (ref_intervals, ... ref_labels) = mir_eval.io.load_labeled_intervals('ref.lab') >>> (est_intervals, ... est_labels) = mir_eval.io.load_labeled_intervals('est.lab') >>> scores = mir_eval.chord.evaluate(ref_intervals, ref_labels, ... est_intervals, est_labels) Parameters ---------- ref_intervals : np.ndarray, shape=(n, 2) Reference chord intervals, in the format returned by :func:`mir_eval.io.load_labeled_intervals`. ref_labels : list, shape=(n,) reference chord labels, in the format returned by :func:`mir_eval.io.load_labeled_intervals`. est_intervals : np.ndarray, shape=(m, 2) estimated chord intervals, in the format returned by :func:`mir_eval.io.load_labeled_intervals`. est_labels : list, shape=(m,) estimated chord labels, in the format returned by :func:`mir_eval.io.load_labeled_intervals`. kwargs Additional keyword arguments which will be passed to the appropriate metric or preprocessing functions. Returns ------- scores : dict Dictionary of scores, where the key is the metric name (str) and the value is the (float) score achieved.
[ "Computes", "weighted", "accuracy", "for", "all", "comparison", "functions", "for", "the", "given", "reference", "and", "estimated", "annotations", "." ]
f41c8dafaea04b411252a516d1965af43c7d531b
https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/chord.py#L1517-L1604
train
craffel/mir_eval
mir_eval/pattern.py
_n_onset_midi
def _n_onset_midi(patterns): """Computes the number of onset_midi objects in a pattern Parameters ---------- patterns : A list of patterns using the format returned by :func:`mir_eval.io.load_patterns()` Returns ------- n_onsets : int Number of onsets within the pattern. """ return len([o_m for pat in patterns for occ in pat for o_m in occ])
python
def _n_onset_midi(patterns): """Computes the number of onset_midi objects in a pattern Parameters ---------- patterns : A list of patterns using the format returned by :func:`mir_eval.io.load_patterns()` Returns ------- n_onsets : int Number of onsets within the pattern. """ return len([o_m for pat in patterns for occ in pat for o_m in occ])
[ "def", "_n_onset_midi", "(", "patterns", ")", ":", "return", "len", "(", "[", "o_m", "for", "pat", "in", "patterns", "for", "occ", "in", "pat", "for", "o_m", "in", "occ", "]", ")" ]
Computes the number of onset_midi objects in a pattern Parameters ---------- patterns : A list of patterns using the format returned by :func:`mir_eval.io.load_patterns()` Returns ------- n_onsets : int Number of onsets within the pattern.
[ "Computes", "the", "number", "of", "onset_midi", "objects", "in", "a", "pattern" ]
f41c8dafaea04b411252a516d1965af43c7d531b
https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/pattern.py#L64-L79
train
craffel/mir_eval
mir_eval/pattern.py
validate
def validate(reference_patterns, estimated_patterns): """Checks that the input annotations to a metric look like valid pattern lists, and throws helpful errors if not. Parameters ---------- reference_patterns : list The reference patterns using the format returned by :func:`mir_eval.io.load_patterns()` estimated_patterns : list The estimated patterns in the same format Returns ------- """ # Warn if pattern lists are empty if _n_onset_midi(reference_patterns) == 0: warnings.warn('Reference patterns are empty.') if _n_onset_midi(estimated_patterns) == 0: warnings.warn('Estimated patterns are empty.') for patterns in [reference_patterns, estimated_patterns]: for pattern in patterns: if len(pattern) <= 0: raise ValueError("Each pattern must contain at least one " "occurrence.") for occurrence in pattern: for onset_midi in occurrence: if len(onset_midi) != 2: raise ValueError("The (onset, midi) tuple must " "contain exactly 2 elements.")
python
def validate(reference_patterns, estimated_patterns): """Checks that the input annotations to a metric look like valid pattern lists, and throws helpful errors if not. Parameters ---------- reference_patterns : list The reference patterns using the format returned by :func:`mir_eval.io.load_patterns()` estimated_patterns : list The estimated patterns in the same format Returns ------- """ # Warn if pattern lists are empty if _n_onset_midi(reference_patterns) == 0: warnings.warn('Reference patterns are empty.') if _n_onset_midi(estimated_patterns) == 0: warnings.warn('Estimated patterns are empty.') for patterns in [reference_patterns, estimated_patterns]: for pattern in patterns: if len(pattern) <= 0: raise ValueError("Each pattern must contain at least one " "occurrence.") for occurrence in pattern: for onset_midi in occurrence: if len(onset_midi) != 2: raise ValueError("The (onset, midi) tuple must " "contain exactly 2 elements.")
[ "def", "validate", "(", "reference_patterns", ",", "estimated_patterns", ")", ":", "# Warn if pattern lists are empty", "if", "_n_onset_midi", "(", "reference_patterns", ")", "==", "0", ":", "warnings", ".", "warn", "(", "'Reference patterns are empty.'", ")", "if", "_n_onset_midi", "(", "estimated_patterns", ")", "==", "0", ":", "warnings", ".", "warn", "(", "'Estimated patterns are empty.'", ")", "for", "patterns", "in", "[", "reference_patterns", ",", "estimated_patterns", "]", ":", "for", "pattern", "in", "patterns", ":", "if", "len", "(", "pattern", ")", "<=", "0", ":", "raise", "ValueError", "(", "\"Each pattern must contain at least one \"", "\"occurrence.\"", ")", "for", "occurrence", "in", "pattern", ":", "for", "onset_midi", "in", "occurrence", ":", "if", "len", "(", "onset_midi", ")", "!=", "2", ":", "raise", "ValueError", "(", "\"The (onset, midi) tuple must \"", "\"contain exactly 2 elements.\"", ")" ]
Checks that the input annotations to a metric look like valid pattern lists, and throws helpful errors if not. Parameters ---------- reference_patterns : list The reference patterns using the format returned by :func:`mir_eval.io.load_patterns()` estimated_patterns : list The estimated patterns in the same format Returns -------
[ "Checks", "that", "the", "input", "annotations", "to", "a", "metric", "look", "like", "valid", "pattern", "lists", "and", "throws", "helpful", "errors", "if", "not", "." ]
f41c8dafaea04b411252a516d1965af43c7d531b
https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/pattern.py#L82-L112
train
craffel/mir_eval
mir_eval/pattern.py
_occurrence_intersection
def _occurrence_intersection(occ_P, occ_Q): """Computes the intersection between two occurrences. Parameters ---------- occ_P : list of tuples (onset, midi) pairs representing the reference occurrence. occ_Q : list second list of (onset, midi) tuples Returns ------- S : set Set of the intersection between occ_P and occ_Q. """ set_P = set([tuple(onset_midi) for onset_midi in occ_P]) set_Q = set([tuple(onset_midi) for onset_midi in occ_Q]) return set_P & set_Q
python
def _occurrence_intersection(occ_P, occ_Q): """Computes the intersection between two occurrences. Parameters ---------- occ_P : list of tuples (onset, midi) pairs representing the reference occurrence. occ_Q : list second list of (onset, midi) tuples Returns ------- S : set Set of the intersection between occ_P and occ_Q. """ set_P = set([tuple(onset_midi) for onset_midi in occ_P]) set_Q = set([tuple(onset_midi) for onset_midi in occ_Q]) return set_P & set_Q
[ "def", "_occurrence_intersection", "(", "occ_P", ",", "occ_Q", ")", ":", "set_P", "=", "set", "(", "[", "tuple", "(", "onset_midi", ")", "for", "onset_midi", "in", "occ_P", "]", ")", "set_Q", "=", "set", "(", "[", "tuple", "(", "onset_midi", ")", "for", "onset_midi", "in", "occ_Q", "]", ")", "return", "set_P", "&", "set_Q" ]
Computes the intersection between two occurrences. Parameters ---------- occ_P : list of tuples (onset, midi) pairs representing the reference occurrence. occ_Q : list second list of (onset, midi) tuples Returns ------- S : set Set of the intersection between occ_P and occ_Q.
[ "Computes", "the", "intersection", "between", "two", "occurrences", "." ]
f41c8dafaea04b411252a516d1965af43c7d531b
https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/pattern.py#L115-L133
train
craffel/mir_eval
mir_eval/pattern.py
_compute_score_matrix
def _compute_score_matrix(P, Q, similarity_metric="cardinality_score"): """Computes the score matrix between the patterns P and Q. Parameters ---------- P : list Pattern containing a list of occurrences. Q : list Pattern containing a list of occurrences. similarity_metric : str A string representing the metric to be used when computing the similarity matrix. Accepted values: - "cardinality_score": Count of the intersection between occurrences. (Default value = "cardinality_score") Returns ------- sm : np.array The score matrix between P and Q using the similarity_metric. """ sm = np.zeros((len(P), len(Q))) # The score matrix for iP, occ_P in enumerate(P): for iQ, occ_Q in enumerate(Q): if similarity_metric == "cardinality_score": denom = float(np.max([len(occ_P), len(occ_Q)])) # Compute the score sm[iP, iQ] = len(_occurrence_intersection(occ_P, occ_Q)) / \ denom # TODO: More scores: 'normalised matching socre' else: raise ValueError("The similarity metric (%s) can only be: " "'cardinality_score'.") return sm
python
def _compute_score_matrix(P, Q, similarity_metric="cardinality_score"): """Computes the score matrix between the patterns P and Q. Parameters ---------- P : list Pattern containing a list of occurrences. Q : list Pattern containing a list of occurrences. similarity_metric : str A string representing the metric to be used when computing the similarity matrix. Accepted values: - "cardinality_score": Count of the intersection between occurrences. (Default value = "cardinality_score") Returns ------- sm : np.array The score matrix between P and Q using the similarity_metric. """ sm = np.zeros((len(P), len(Q))) # The score matrix for iP, occ_P in enumerate(P): for iQ, occ_Q in enumerate(Q): if similarity_metric == "cardinality_score": denom = float(np.max([len(occ_P), len(occ_Q)])) # Compute the score sm[iP, iQ] = len(_occurrence_intersection(occ_P, occ_Q)) / \ denom # TODO: More scores: 'normalised matching socre' else: raise ValueError("The similarity metric (%s) can only be: " "'cardinality_score'.") return sm
[ "def", "_compute_score_matrix", "(", "P", ",", "Q", ",", "similarity_metric", "=", "\"cardinality_score\"", ")", ":", "sm", "=", "np", ".", "zeros", "(", "(", "len", "(", "P", ")", ",", "len", "(", "Q", ")", ")", ")", "# The score matrix", "for", "iP", ",", "occ_P", "in", "enumerate", "(", "P", ")", ":", "for", "iQ", ",", "occ_Q", "in", "enumerate", "(", "Q", ")", ":", "if", "similarity_metric", "==", "\"cardinality_score\"", ":", "denom", "=", "float", "(", "np", ".", "max", "(", "[", "len", "(", "occ_P", ")", ",", "len", "(", "occ_Q", ")", "]", ")", ")", "# Compute the score", "sm", "[", "iP", ",", "iQ", "]", "=", "len", "(", "_occurrence_intersection", "(", "occ_P", ",", "occ_Q", ")", ")", "/", "denom", "# TODO: More scores: 'normalised matching socre'", "else", ":", "raise", "ValueError", "(", "\"The similarity metric (%s) can only be: \"", "\"'cardinality_score'.\"", ")", "return", "sm" ]
Computes the score matrix between the patterns P and Q. Parameters ---------- P : list Pattern containing a list of occurrences. Q : list Pattern containing a list of occurrences. similarity_metric : str A string representing the metric to be used when computing the similarity matrix. Accepted values: - "cardinality_score": Count of the intersection between occurrences. (Default value = "cardinality_score") Returns ------- sm : np.array The score matrix between P and Q using the similarity_metric.
[ "Computes", "the", "score", "matrix", "between", "the", "patterns", "P", "and", "Q", "." ]
f41c8dafaea04b411252a516d1965af43c7d531b
https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/pattern.py#L136-L170
train
craffel/mir_eval
mir_eval/pattern.py
standard_FPR
def standard_FPR(reference_patterns, estimated_patterns, tol=1e-5): """Standard F1 Score, Precision and Recall. This metric checks if the prototype patterns of the reference match possible translated patterns in the prototype patterns of the estimations. Since the sizes of these prototypes must be equal, this metric is quite restictive and it tends to be 0 in most of 2013 MIREX results. Examples -------- >>> ref_patterns = mir_eval.io.load_patterns("ref_pattern.txt") >>> est_patterns = mir_eval.io.load_patterns("est_pattern.txt") >>> F, P, R = mir_eval.pattern.standard_FPR(ref_patterns, est_patterns) Parameters ---------- reference_patterns : list The reference patterns using the format returned by :func:`mir_eval.io.load_patterns()` estimated_patterns : list The estimated patterns in the same format tol : float Tolerance level when comparing reference against estimation. Default parameter is the one found in the original matlab code by Tom Collins used for MIREX 2013. (Default value = 1e-5) Returns ------- f_measure : float The standard F1 Score precision : float The standard Precision recall : float The standard Recall """ validate(reference_patterns, estimated_patterns) nP = len(reference_patterns) # Number of patterns in the reference nQ = len(estimated_patterns) # Number of patterns in the estimation k = 0 # Number of patterns that match # If no patterns were provided, metric is zero if _n_onset_midi(reference_patterns) == 0 or \ _n_onset_midi(estimated_patterns) == 0: return 0., 0., 0. # Find matches of the prototype patterns for ref_pattern in reference_patterns: P = np.asarray(ref_pattern[0]) # Get reference prototype for est_pattern in estimated_patterns: Q = np.asarray(est_pattern[0]) # Get estimation prototype if len(P) != len(Q): continue # Check transposition given a certain tolerance if (len(P) == len(Q) == 1 or np.max(np.abs(np.diff(P - Q, axis=0))) < tol): k += 1 break # Compute the standard measures precision = k / float(nQ) recall = k / float(nP) f_measure = util.f_measure(precision, recall) return f_measure, precision, recall
python
def standard_FPR(reference_patterns, estimated_patterns, tol=1e-5): """Standard F1 Score, Precision and Recall. This metric checks if the prototype patterns of the reference match possible translated patterns in the prototype patterns of the estimations. Since the sizes of these prototypes must be equal, this metric is quite restictive and it tends to be 0 in most of 2013 MIREX results. Examples -------- >>> ref_patterns = mir_eval.io.load_patterns("ref_pattern.txt") >>> est_patterns = mir_eval.io.load_patterns("est_pattern.txt") >>> F, P, R = mir_eval.pattern.standard_FPR(ref_patterns, est_patterns) Parameters ---------- reference_patterns : list The reference patterns using the format returned by :func:`mir_eval.io.load_patterns()` estimated_patterns : list The estimated patterns in the same format tol : float Tolerance level when comparing reference against estimation. Default parameter is the one found in the original matlab code by Tom Collins used for MIREX 2013. (Default value = 1e-5) Returns ------- f_measure : float The standard F1 Score precision : float The standard Precision recall : float The standard Recall """ validate(reference_patterns, estimated_patterns) nP = len(reference_patterns) # Number of patterns in the reference nQ = len(estimated_patterns) # Number of patterns in the estimation k = 0 # Number of patterns that match # If no patterns were provided, metric is zero if _n_onset_midi(reference_patterns) == 0 or \ _n_onset_midi(estimated_patterns) == 0: return 0., 0., 0. # Find matches of the prototype patterns for ref_pattern in reference_patterns: P = np.asarray(ref_pattern[0]) # Get reference prototype for est_pattern in estimated_patterns: Q = np.asarray(est_pattern[0]) # Get estimation prototype if len(P) != len(Q): continue # Check transposition given a certain tolerance if (len(P) == len(Q) == 1 or np.max(np.abs(np.diff(P - Q, axis=0))) < tol): k += 1 break # Compute the standard measures precision = k / float(nQ) recall = k / float(nP) f_measure = util.f_measure(precision, recall) return f_measure, precision, recall
[ "def", "standard_FPR", "(", "reference_patterns", ",", "estimated_patterns", ",", "tol", "=", "1e-5", ")", ":", "validate", "(", "reference_patterns", ",", "estimated_patterns", ")", "nP", "=", "len", "(", "reference_patterns", ")", "# Number of patterns in the reference", "nQ", "=", "len", "(", "estimated_patterns", ")", "# Number of patterns in the estimation", "k", "=", "0", "# Number of patterns that match", "# If no patterns were provided, metric is zero", "if", "_n_onset_midi", "(", "reference_patterns", ")", "==", "0", "or", "_n_onset_midi", "(", "estimated_patterns", ")", "==", "0", ":", "return", "0.", ",", "0.", ",", "0.", "# Find matches of the prototype patterns", "for", "ref_pattern", "in", "reference_patterns", ":", "P", "=", "np", ".", "asarray", "(", "ref_pattern", "[", "0", "]", ")", "# Get reference prototype", "for", "est_pattern", "in", "estimated_patterns", ":", "Q", "=", "np", ".", "asarray", "(", "est_pattern", "[", "0", "]", ")", "# Get estimation prototype", "if", "len", "(", "P", ")", "!=", "len", "(", "Q", ")", ":", "continue", "# Check transposition given a certain tolerance", "if", "(", "len", "(", "P", ")", "==", "len", "(", "Q", ")", "==", "1", "or", "np", ".", "max", "(", "np", ".", "abs", "(", "np", ".", "diff", "(", "P", "-", "Q", ",", "axis", "=", "0", ")", ")", ")", "<", "tol", ")", ":", "k", "+=", "1", "break", "# Compute the standard measures", "precision", "=", "k", "/", "float", "(", "nQ", ")", "recall", "=", "k", "/", "float", "(", "nP", ")", "f_measure", "=", "util", ".", "f_measure", "(", "precision", ",", "recall", ")", "return", "f_measure", ",", "precision", ",", "recall" ]
Standard F1 Score, Precision and Recall. This metric checks if the prototype patterns of the reference match possible translated patterns in the prototype patterns of the estimations. Since the sizes of these prototypes must be equal, this metric is quite restictive and it tends to be 0 in most of 2013 MIREX results. Examples -------- >>> ref_patterns = mir_eval.io.load_patterns("ref_pattern.txt") >>> est_patterns = mir_eval.io.load_patterns("est_pattern.txt") >>> F, P, R = mir_eval.pattern.standard_FPR(ref_patterns, est_patterns) Parameters ---------- reference_patterns : list The reference patterns using the format returned by :func:`mir_eval.io.load_patterns()` estimated_patterns : list The estimated patterns in the same format tol : float Tolerance level when comparing reference against estimation. Default parameter is the one found in the original matlab code by Tom Collins used for MIREX 2013. (Default value = 1e-5) Returns ------- f_measure : float The standard F1 Score precision : float The standard Precision recall : float The standard Recall
[ "Standard", "F1", "Score", "Precision", "and", "Recall", "." ]
f41c8dafaea04b411252a516d1965af43c7d531b
https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/pattern.py#L173-L239
train
craffel/mir_eval
mir_eval/pattern.py
three_layer_FPR
def three_layer_FPR(reference_patterns, estimated_patterns): """Three Layer F1 Score, Precision and Recall. As described by Meridith. Examples -------- >>> ref_patterns = mir_eval.io.load_patterns("ref_pattern.txt") >>> est_patterns = mir_eval.io.load_patterns("est_pattern.txt") >>> F, P, R = mir_eval.pattern.three_layer_FPR(ref_patterns, ... est_patterns) Parameters ---------- reference_patterns : list The reference patterns in the format returned by :func:`mir_eval.io.load_patterns()` estimated_patterns : list The estimated patterns in the same format Returns ------- f_measure : float The three-layer F1 Score precision : float The three-layer Precision recall : float The three-layer Recall """ validate(reference_patterns, estimated_patterns) def compute_first_layer_PR(ref_occs, est_occs): """Computes the first layer Precision and Recall values given the set of occurrences in the reference and the set of occurrences in the estimation. Parameters ---------- ref_occs : est_occs : Returns ------- """ # Find the length of the intersection between reference and estimation s = len(_occurrence_intersection(ref_occs, est_occs)) # Compute the first layer scores precision = s / float(len(ref_occs)) recall = s / float(len(est_occs)) return precision, recall def compute_second_layer_PR(ref_pattern, est_pattern): """Computes the second layer Precision and Recall values given the set of occurrences in the reference and the set of occurrences in the estimation. Parameters ---------- ref_pattern : est_pattern : Returns ------- """ # Compute the first layer scores F_1 = compute_layer(ref_pattern, est_pattern) # Compute the second layer scores precision = np.mean(np.max(F_1, axis=0)) recall = np.mean(np.max(F_1, axis=1)) return precision, recall def compute_layer(ref_elements, est_elements, layer=1): """Computes the F-measure matrix for a given layer. The reference and estimated elements can be either patters or occurrences, depending on the layer. For layer 1, the elements must be occurrences. For layer 2, the elements must be patterns. Parameters ---------- ref_elements : est_elements : layer : (Default value = 1) Returns ------- """ if layer != 1 and layer != 2: raise ValueError("Layer (%d) must be an integer between 1 and 2" % layer) nP = len(ref_elements) # Number of elements in reference nQ = len(est_elements) # Number of elements in estimation F = np.zeros((nP, nQ)) # F-measure matrix for the given layer for iP in range(nP): for iQ in range(nQ): if layer == 1: func = compute_first_layer_PR elif layer == 2: func = compute_second_layer_PR # Compute layer scores precision, recall = func(ref_elements[iP], est_elements[iQ]) F[iP, iQ] = util.f_measure(precision, recall) return F # If no patterns were provided, metric is zero if _n_onset_midi(reference_patterns) == 0 or \ _n_onset_midi(estimated_patterns) == 0: return 0., 0., 0. # Compute the second layer (it includes the first layer) F_2 = compute_layer(reference_patterns, estimated_patterns, layer=2) # Compute the final scores (third layer) precision_3 = np.mean(np.max(F_2, axis=0)) recall_3 = np.mean(np.max(F_2, axis=1)) f_measure_3 = util.f_measure(precision_3, recall_3) return f_measure_3, precision_3, recall_3
python
def three_layer_FPR(reference_patterns, estimated_patterns): """Three Layer F1 Score, Precision and Recall. As described by Meridith. Examples -------- >>> ref_patterns = mir_eval.io.load_patterns("ref_pattern.txt") >>> est_patterns = mir_eval.io.load_patterns("est_pattern.txt") >>> F, P, R = mir_eval.pattern.three_layer_FPR(ref_patterns, ... est_patterns) Parameters ---------- reference_patterns : list The reference patterns in the format returned by :func:`mir_eval.io.load_patterns()` estimated_patterns : list The estimated patterns in the same format Returns ------- f_measure : float The three-layer F1 Score precision : float The three-layer Precision recall : float The three-layer Recall """ validate(reference_patterns, estimated_patterns) def compute_first_layer_PR(ref_occs, est_occs): """Computes the first layer Precision and Recall values given the set of occurrences in the reference and the set of occurrences in the estimation. Parameters ---------- ref_occs : est_occs : Returns ------- """ # Find the length of the intersection between reference and estimation s = len(_occurrence_intersection(ref_occs, est_occs)) # Compute the first layer scores precision = s / float(len(ref_occs)) recall = s / float(len(est_occs)) return precision, recall def compute_second_layer_PR(ref_pattern, est_pattern): """Computes the second layer Precision and Recall values given the set of occurrences in the reference and the set of occurrences in the estimation. Parameters ---------- ref_pattern : est_pattern : Returns ------- """ # Compute the first layer scores F_1 = compute_layer(ref_pattern, est_pattern) # Compute the second layer scores precision = np.mean(np.max(F_1, axis=0)) recall = np.mean(np.max(F_1, axis=1)) return precision, recall def compute_layer(ref_elements, est_elements, layer=1): """Computes the F-measure matrix for a given layer. The reference and estimated elements can be either patters or occurrences, depending on the layer. For layer 1, the elements must be occurrences. For layer 2, the elements must be patterns. Parameters ---------- ref_elements : est_elements : layer : (Default value = 1) Returns ------- """ if layer != 1 and layer != 2: raise ValueError("Layer (%d) must be an integer between 1 and 2" % layer) nP = len(ref_elements) # Number of elements in reference nQ = len(est_elements) # Number of elements in estimation F = np.zeros((nP, nQ)) # F-measure matrix for the given layer for iP in range(nP): for iQ in range(nQ): if layer == 1: func = compute_first_layer_PR elif layer == 2: func = compute_second_layer_PR # Compute layer scores precision, recall = func(ref_elements[iP], est_elements[iQ]) F[iP, iQ] = util.f_measure(precision, recall) return F # If no patterns were provided, metric is zero if _n_onset_midi(reference_patterns) == 0 or \ _n_onset_midi(estimated_patterns) == 0: return 0., 0., 0. # Compute the second layer (it includes the first layer) F_2 = compute_layer(reference_patterns, estimated_patterns, layer=2) # Compute the final scores (third layer) precision_3 = np.mean(np.max(F_2, axis=0)) recall_3 = np.mean(np.max(F_2, axis=1)) f_measure_3 = util.f_measure(precision_3, recall_3) return f_measure_3, precision_3, recall_3
[ "def", "three_layer_FPR", "(", "reference_patterns", ",", "estimated_patterns", ")", ":", "validate", "(", "reference_patterns", ",", "estimated_patterns", ")", "def", "compute_first_layer_PR", "(", "ref_occs", ",", "est_occs", ")", ":", "\"\"\"Computes the first layer Precision and Recall values given the\n set of occurrences in the reference and the set of occurrences in the\n estimation.\n\n Parameters\n ----------\n ref_occs :\n\n est_occs :\n\n\n Returns\n -------\n\n \"\"\"", "# Find the length of the intersection between reference and estimation", "s", "=", "len", "(", "_occurrence_intersection", "(", "ref_occs", ",", "est_occs", ")", ")", "# Compute the first layer scores", "precision", "=", "s", "/", "float", "(", "len", "(", "ref_occs", ")", ")", "recall", "=", "s", "/", "float", "(", "len", "(", "est_occs", ")", ")", "return", "precision", ",", "recall", "def", "compute_second_layer_PR", "(", "ref_pattern", ",", "est_pattern", ")", ":", "\"\"\"Computes the second layer Precision and Recall values given the\n set of occurrences in the reference and the set of occurrences in the\n estimation.\n\n Parameters\n ----------\n ref_pattern :\n\n est_pattern :\n\n\n Returns\n -------\n\n \"\"\"", "# Compute the first layer scores", "F_1", "=", "compute_layer", "(", "ref_pattern", ",", "est_pattern", ")", "# Compute the second layer scores", "precision", "=", "np", ".", "mean", "(", "np", ".", "max", "(", "F_1", ",", "axis", "=", "0", ")", ")", "recall", "=", "np", ".", "mean", "(", "np", ".", "max", "(", "F_1", ",", "axis", "=", "1", ")", ")", "return", "precision", ",", "recall", "def", "compute_layer", "(", "ref_elements", ",", "est_elements", ",", "layer", "=", "1", ")", ":", "\"\"\"Computes the F-measure matrix for a given layer. The reference and\n estimated elements can be either patters or occurrences, depending\n on the layer.\n\n For layer 1, the elements must be occurrences.\n For layer 2, the elements must be patterns.\n\n Parameters\n ----------\n ref_elements :\n\n est_elements :\n\n layer :\n (Default value = 1)\n\n Returns\n -------\n\n \"\"\"", "if", "layer", "!=", "1", "and", "layer", "!=", "2", ":", "raise", "ValueError", "(", "\"Layer (%d) must be an integer between 1 and 2\"", "%", "layer", ")", "nP", "=", "len", "(", "ref_elements", ")", "# Number of elements in reference", "nQ", "=", "len", "(", "est_elements", ")", "# Number of elements in estimation", "F", "=", "np", ".", "zeros", "(", "(", "nP", ",", "nQ", ")", ")", "# F-measure matrix for the given layer", "for", "iP", "in", "range", "(", "nP", ")", ":", "for", "iQ", "in", "range", "(", "nQ", ")", ":", "if", "layer", "==", "1", ":", "func", "=", "compute_first_layer_PR", "elif", "layer", "==", "2", ":", "func", "=", "compute_second_layer_PR", "# Compute layer scores", "precision", ",", "recall", "=", "func", "(", "ref_elements", "[", "iP", "]", ",", "est_elements", "[", "iQ", "]", ")", "F", "[", "iP", ",", "iQ", "]", "=", "util", ".", "f_measure", "(", "precision", ",", "recall", ")", "return", "F", "# If no patterns were provided, metric is zero", "if", "_n_onset_midi", "(", "reference_patterns", ")", "==", "0", "or", "_n_onset_midi", "(", "estimated_patterns", ")", "==", "0", ":", "return", "0.", ",", "0.", ",", "0.", "# Compute the second layer (it includes the first layer)", "F_2", "=", "compute_layer", "(", "reference_patterns", ",", "estimated_patterns", ",", "layer", "=", "2", ")", "# Compute the final scores (third layer)", "precision_3", "=", "np", ".", "mean", "(", "np", ".", "max", "(", "F_2", ",", "axis", "=", "0", ")", ")", "recall_3", "=", "np", ".", "mean", "(", "np", ".", "max", "(", "F_2", ",", "axis", "=", "1", ")", ")", "f_measure_3", "=", "util", ".", "f_measure", "(", "precision_3", ",", "recall_3", ")", "return", "f_measure_3", ",", "precision_3", ",", "recall_3" ]
Three Layer F1 Score, Precision and Recall. As described by Meridith. Examples -------- >>> ref_patterns = mir_eval.io.load_patterns("ref_pattern.txt") >>> est_patterns = mir_eval.io.load_patterns("est_pattern.txt") >>> F, P, R = mir_eval.pattern.three_layer_FPR(ref_patterns, ... est_patterns) Parameters ---------- reference_patterns : list The reference patterns in the format returned by :func:`mir_eval.io.load_patterns()` estimated_patterns : list The estimated patterns in the same format Returns ------- f_measure : float The three-layer F1 Score precision : float The three-layer Precision recall : float The three-layer Recall
[ "Three", "Layer", "F1", "Score", "Precision", "and", "Recall", ".", "As", "described", "by", "Meridith", "." ]
f41c8dafaea04b411252a516d1965af43c7d531b
https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/pattern.py#L390-L520
train
craffel/mir_eval
mir_eval/pattern.py
first_n_three_layer_P
def first_n_three_layer_P(reference_patterns, estimated_patterns, n=5): """First n three-layer precision. This metric is basically the same as the three-layer FPR but it is only applied to the first n estimated patterns, and it only returns the precision. In MIREX and typically, n = 5. Examples -------- >>> ref_patterns = mir_eval.io.load_patterns("ref_pattern.txt") >>> est_patterns = mir_eval.io.load_patterns("est_pattern.txt") >>> P = mir_eval.pattern.first_n_three_layer_P(ref_patterns, ... est_patterns, n=5) Parameters ---------- reference_patterns : list The reference patterns in the format returned by :func:`mir_eval.io.load_patterns()` estimated_patterns : list The estimated patterns in the same format n : int Number of patterns to consider from the estimated results, in the order they appear in the matrix (Default value = 5) Returns ------- precision : float The first n three-layer Precision """ validate(reference_patterns, estimated_patterns) # If no patterns were provided, metric is zero if _n_onset_midi(reference_patterns) == 0 or \ _n_onset_midi(estimated_patterns) == 0: return 0., 0., 0. # Get only the first n patterns from the estimated results fn_est_patterns = estimated_patterns[:min(len(estimated_patterns), n)] # Compute the three-layer scores for the first n estimated patterns F, P, R = three_layer_FPR(reference_patterns, fn_est_patterns) return P
python
def first_n_three_layer_P(reference_patterns, estimated_patterns, n=5): """First n three-layer precision. This metric is basically the same as the three-layer FPR but it is only applied to the first n estimated patterns, and it only returns the precision. In MIREX and typically, n = 5. Examples -------- >>> ref_patterns = mir_eval.io.load_patterns("ref_pattern.txt") >>> est_patterns = mir_eval.io.load_patterns("est_pattern.txt") >>> P = mir_eval.pattern.first_n_three_layer_P(ref_patterns, ... est_patterns, n=5) Parameters ---------- reference_patterns : list The reference patterns in the format returned by :func:`mir_eval.io.load_patterns()` estimated_patterns : list The estimated patterns in the same format n : int Number of patterns to consider from the estimated results, in the order they appear in the matrix (Default value = 5) Returns ------- precision : float The first n three-layer Precision """ validate(reference_patterns, estimated_patterns) # If no patterns were provided, metric is zero if _n_onset_midi(reference_patterns) == 0 or \ _n_onset_midi(estimated_patterns) == 0: return 0., 0., 0. # Get only the first n patterns from the estimated results fn_est_patterns = estimated_patterns[:min(len(estimated_patterns), n)] # Compute the three-layer scores for the first n estimated patterns F, P, R = three_layer_FPR(reference_patterns, fn_est_patterns) return P
[ "def", "first_n_three_layer_P", "(", "reference_patterns", ",", "estimated_patterns", ",", "n", "=", "5", ")", ":", "validate", "(", "reference_patterns", ",", "estimated_patterns", ")", "# If no patterns were provided, metric is zero", "if", "_n_onset_midi", "(", "reference_patterns", ")", "==", "0", "or", "_n_onset_midi", "(", "estimated_patterns", ")", "==", "0", ":", "return", "0.", ",", "0.", ",", "0.", "# Get only the first n patterns from the estimated results", "fn_est_patterns", "=", "estimated_patterns", "[", ":", "min", "(", "len", "(", "estimated_patterns", ")", ",", "n", ")", "]", "# Compute the three-layer scores for the first n estimated patterns", "F", ",", "P", ",", "R", "=", "three_layer_FPR", "(", "reference_patterns", ",", "fn_est_patterns", ")", "return", "P" ]
First n three-layer precision. This metric is basically the same as the three-layer FPR but it is only applied to the first n estimated patterns, and it only returns the precision. In MIREX and typically, n = 5. Examples -------- >>> ref_patterns = mir_eval.io.load_patterns("ref_pattern.txt") >>> est_patterns = mir_eval.io.load_patterns("est_pattern.txt") >>> P = mir_eval.pattern.first_n_three_layer_P(ref_patterns, ... est_patterns, n=5) Parameters ---------- reference_patterns : list The reference patterns in the format returned by :func:`mir_eval.io.load_patterns()` estimated_patterns : list The estimated patterns in the same format n : int Number of patterns to consider from the estimated results, in the order they appear in the matrix (Default value = 5) Returns ------- precision : float The first n three-layer Precision
[ "First", "n", "three", "-", "layer", "precision", "." ]
f41c8dafaea04b411252a516d1965af43c7d531b
https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/pattern.py#L523-L568
train
craffel/mir_eval
mir_eval/pattern.py
first_n_target_proportion_R
def first_n_target_proportion_R(reference_patterns, estimated_patterns, n=5): """First n target proportion establishment recall metric. This metric is similar is similar to the establishment FPR score, but it only takes into account the first n estimated patterns and it only outputs the Recall value of it. Examples -------- >>> ref_patterns = mir_eval.io.load_patterns("ref_pattern.txt") >>> est_patterns = mir_eval.io.load_patterns("est_pattern.txt") >>> R = mir_eval.pattern.first_n_target_proportion_R( ... ref_patterns, est_patterns, n=5) Parameters ---------- reference_patterns : list The reference patterns in the format returned by :func:`mir_eval.io.load_patterns()` estimated_patterns : list The estimated patterns in the same format n : int Number of patterns to consider from the estimated results, in the order they appear in the matrix. (Default value = 5) Returns ------- recall : float The first n target proportion Recall. """ validate(reference_patterns, estimated_patterns) # If no patterns were provided, metric is zero if _n_onset_midi(reference_patterns) == 0 or \ _n_onset_midi(estimated_patterns) == 0: return 0., 0., 0. # Get only the first n patterns from the estimated results fn_est_patterns = estimated_patterns[:min(len(estimated_patterns), n)] F, P, R = establishment_FPR(reference_patterns, fn_est_patterns) return R
python
def first_n_target_proportion_R(reference_patterns, estimated_patterns, n=5): """First n target proportion establishment recall metric. This metric is similar is similar to the establishment FPR score, but it only takes into account the first n estimated patterns and it only outputs the Recall value of it. Examples -------- >>> ref_patterns = mir_eval.io.load_patterns("ref_pattern.txt") >>> est_patterns = mir_eval.io.load_patterns("est_pattern.txt") >>> R = mir_eval.pattern.first_n_target_proportion_R( ... ref_patterns, est_patterns, n=5) Parameters ---------- reference_patterns : list The reference patterns in the format returned by :func:`mir_eval.io.load_patterns()` estimated_patterns : list The estimated patterns in the same format n : int Number of patterns to consider from the estimated results, in the order they appear in the matrix. (Default value = 5) Returns ------- recall : float The first n target proportion Recall. """ validate(reference_patterns, estimated_patterns) # If no patterns were provided, metric is zero if _n_onset_midi(reference_patterns) == 0 or \ _n_onset_midi(estimated_patterns) == 0: return 0., 0., 0. # Get only the first n patterns from the estimated results fn_est_patterns = estimated_patterns[:min(len(estimated_patterns), n)] F, P, R = establishment_FPR(reference_patterns, fn_est_patterns) return R
[ "def", "first_n_target_proportion_R", "(", "reference_patterns", ",", "estimated_patterns", ",", "n", "=", "5", ")", ":", "validate", "(", "reference_patterns", ",", "estimated_patterns", ")", "# If no patterns were provided, metric is zero", "if", "_n_onset_midi", "(", "reference_patterns", ")", "==", "0", "or", "_n_onset_midi", "(", "estimated_patterns", ")", "==", "0", ":", "return", "0.", ",", "0.", ",", "0.", "# Get only the first n patterns from the estimated results", "fn_est_patterns", "=", "estimated_patterns", "[", ":", "min", "(", "len", "(", "estimated_patterns", ")", ",", "n", ")", "]", "F", ",", "P", ",", "R", "=", "establishment_FPR", "(", "reference_patterns", ",", "fn_est_patterns", ")", "return", "R" ]
First n target proportion establishment recall metric. This metric is similar is similar to the establishment FPR score, but it only takes into account the first n estimated patterns and it only outputs the Recall value of it. Examples -------- >>> ref_patterns = mir_eval.io.load_patterns("ref_pattern.txt") >>> est_patterns = mir_eval.io.load_patterns("est_pattern.txt") >>> R = mir_eval.pattern.first_n_target_proportion_R( ... ref_patterns, est_patterns, n=5) Parameters ---------- reference_patterns : list The reference patterns in the format returned by :func:`mir_eval.io.load_patterns()` estimated_patterns : list The estimated patterns in the same format n : int Number of patterns to consider from the estimated results, in the order they appear in the matrix. (Default value = 5) Returns ------- recall : float The first n target proportion Recall.
[ "First", "n", "target", "proportion", "establishment", "recall", "metric", "." ]
f41c8dafaea04b411252a516d1965af43c7d531b
https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/pattern.py#L571-L614
train
craffel/mir_eval
mir_eval/pattern.py
evaluate
def evaluate(ref_patterns, est_patterns, **kwargs): """Load data and perform the evaluation. Examples -------- >>> ref_patterns = mir_eval.io.load_patterns("ref_pattern.txt") >>> est_patterns = mir_eval.io.load_patterns("est_pattern.txt") >>> scores = mir_eval.pattern.evaluate(ref_patterns, est_patterns) Parameters ---------- ref_patterns : list The reference patterns in the format returned by :func:`mir_eval.io.load_patterns()` est_patterns : list The estimated patterns in the same format kwargs Additional keyword arguments which will be passed to the appropriate metric or preprocessing functions. Returns ------- scores : dict Dictionary of scores, where the key is the metric name (str) and the value is the (float) score achieved. """ # Compute all the metrics scores = collections.OrderedDict() # Standard scores scores['F'], scores['P'], scores['R'] = \ util.filter_kwargs(standard_FPR, ref_patterns, est_patterns, **kwargs) # Establishment scores scores['F_est'], scores['P_est'], scores['R_est'] = \ util.filter_kwargs(establishment_FPR, ref_patterns, est_patterns, **kwargs) # Occurrence scores # Force these values for thresh kwargs['thresh'] = .5 scores['F_occ.5'], scores['P_occ.5'], scores['R_occ.5'] = \ util.filter_kwargs(occurrence_FPR, ref_patterns, est_patterns, **kwargs) kwargs['thresh'] = .75 scores['F_occ.75'], scores['P_occ.75'], scores['R_occ.75'] = \ util.filter_kwargs(occurrence_FPR, ref_patterns, est_patterns, **kwargs) # Three-layer scores scores['F_3'], scores['P_3'], scores['R_3'] = \ util.filter_kwargs(three_layer_FPR, ref_patterns, est_patterns, **kwargs) # First Five Patterns scores # Set default value of n if 'n' not in kwargs: kwargs['n'] = 5 scores['FFP'] = util.filter_kwargs(first_n_three_layer_P, ref_patterns, est_patterns, **kwargs) scores['FFTP_est'] = \ util.filter_kwargs(first_n_target_proportion_R, ref_patterns, est_patterns, **kwargs) return scores
python
def evaluate(ref_patterns, est_patterns, **kwargs): """Load data and perform the evaluation. Examples -------- >>> ref_patterns = mir_eval.io.load_patterns("ref_pattern.txt") >>> est_patterns = mir_eval.io.load_patterns("est_pattern.txt") >>> scores = mir_eval.pattern.evaluate(ref_patterns, est_patterns) Parameters ---------- ref_patterns : list The reference patterns in the format returned by :func:`mir_eval.io.load_patterns()` est_patterns : list The estimated patterns in the same format kwargs Additional keyword arguments which will be passed to the appropriate metric or preprocessing functions. Returns ------- scores : dict Dictionary of scores, where the key is the metric name (str) and the value is the (float) score achieved. """ # Compute all the metrics scores = collections.OrderedDict() # Standard scores scores['F'], scores['P'], scores['R'] = \ util.filter_kwargs(standard_FPR, ref_patterns, est_patterns, **kwargs) # Establishment scores scores['F_est'], scores['P_est'], scores['R_est'] = \ util.filter_kwargs(establishment_FPR, ref_patterns, est_patterns, **kwargs) # Occurrence scores # Force these values for thresh kwargs['thresh'] = .5 scores['F_occ.5'], scores['P_occ.5'], scores['R_occ.5'] = \ util.filter_kwargs(occurrence_FPR, ref_patterns, est_patterns, **kwargs) kwargs['thresh'] = .75 scores['F_occ.75'], scores['P_occ.75'], scores['R_occ.75'] = \ util.filter_kwargs(occurrence_FPR, ref_patterns, est_patterns, **kwargs) # Three-layer scores scores['F_3'], scores['P_3'], scores['R_3'] = \ util.filter_kwargs(three_layer_FPR, ref_patterns, est_patterns, **kwargs) # First Five Patterns scores # Set default value of n if 'n' not in kwargs: kwargs['n'] = 5 scores['FFP'] = util.filter_kwargs(first_n_three_layer_P, ref_patterns, est_patterns, **kwargs) scores['FFTP_est'] = \ util.filter_kwargs(first_n_target_proportion_R, ref_patterns, est_patterns, **kwargs) return scores
[ "def", "evaluate", "(", "ref_patterns", ",", "est_patterns", ",", "*", "*", "kwargs", ")", ":", "# Compute all the metrics", "scores", "=", "collections", ".", "OrderedDict", "(", ")", "# Standard scores", "scores", "[", "'F'", "]", ",", "scores", "[", "'P'", "]", ",", "scores", "[", "'R'", "]", "=", "util", ".", "filter_kwargs", "(", "standard_FPR", ",", "ref_patterns", ",", "est_patterns", ",", "*", "*", "kwargs", ")", "# Establishment scores", "scores", "[", "'F_est'", "]", ",", "scores", "[", "'P_est'", "]", ",", "scores", "[", "'R_est'", "]", "=", "util", ".", "filter_kwargs", "(", "establishment_FPR", ",", "ref_patterns", ",", "est_patterns", ",", "*", "*", "kwargs", ")", "# Occurrence scores", "# Force these values for thresh", "kwargs", "[", "'thresh'", "]", "=", ".5", "scores", "[", "'F_occ.5'", "]", ",", "scores", "[", "'P_occ.5'", "]", ",", "scores", "[", "'R_occ.5'", "]", "=", "util", ".", "filter_kwargs", "(", "occurrence_FPR", ",", "ref_patterns", ",", "est_patterns", ",", "*", "*", "kwargs", ")", "kwargs", "[", "'thresh'", "]", "=", ".75", "scores", "[", "'F_occ.75'", "]", ",", "scores", "[", "'P_occ.75'", "]", ",", "scores", "[", "'R_occ.75'", "]", "=", "util", ".", "filter_kwargs", "(", "occurrence_FPR", ",", "ref_patterns", ",", "est_patterns", ",", "*", "*", "kwargs", ")", "# Three-layer scores", "scores", "[", "'F_3'", "]", ",", "scores", "[", "'P_3'", "]", ",", "scores", "[", "'R_3'", "]", "=", "util", ".", "filter_kwargs", "(", "three_layer_FPR", ",", "ref_patterns", ",", "est_patterns", ",", "*", "*", "kwargs", ")", "# First Five Patterns scores", "# Set default value of n", "if", "'n'", "not", "in", "kwargs", ":", "kwargs", "[", "'n'", "]", "=", "5", "scores", "[", "'FFP'", "]", "=", "util", ".", "filter_kwargs", "(", "first_n_three_layer_P", ",", "ref_patterns", ",", "est_patterns", ",", "*", "*", "kwargs", ")", "scores", "[", "'FFTP_est'", "]", "=", "util", ".", "filter_kwargs", "(", "first_n_target_proportion_R", ",", "ref_patterns", ",", "est_patterns", ",", "*", "*", "kwargs", ")", "return", "scores" ]
Load data and perform the evaluation. Examples -------- >>> ref_patterns = mir_eval.io.load_patterns("ref_pattern.txt") >>> est_patterns = mir_eval.io.load_patterns("est_pattern.txt") >>> scores = mir_eval.pattern.evaluate(ref_patterns, est_patterns) Parameters ---------- ref_patterns : list The reference patterns in the format returned by :func:`mir_eval.io.load_patterns()` est_patterns : list The estimated patterns in the same format kwargs Additional keyword arguments which will be passed to the appropriate metric or preprocessing functions. Returns ------- scores : dict Dictionary of scores, where the key is the metric name (str) and the value is the (float) score achieved.
[ "Load", "data", "and", "perform", "the", "evaluation", "." ]
f41c8dafaea04b411252a516d1965af43c7d531b
https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/pattern.py#L617-L683
train
craffel/mir_eval
mir_eval/transcription_velocity.py
validate
def validate(ref_intervals, ref_pitches, ref_velocities, est_intervals, est_pitches, est_velocities): """Checks that the input annotations have valid time intervals, pitches, and velocities, and throws helpful errors if not. Parameters ---------- ref_intervals : np.ndarray, shape=(n,2) Array of reference notes time intervals (onset and offset times) ref_pitches : np.ndarray, shape=(n,) Array of reference pitch values in Hertz ref_velocities : np.ndarray, shape=(n,) Array of MIDI velocities (i.e. between 0 and 127) of reference notes est_intervals : np.ndarray, shape=(m,2) Array of estimated notes time intervals (onset and offset times) est_pitches : np.ndarray, shape=(m,) Array of estimated pitch values in Hertz est_velocities : np.ndarray, shape=(m,) Array of MIDI velocities (i.e. between 0 and 127) of estimated notes """ transcription.validate(ref_intervals, ref_pitches, est_intervals, est_pitches) # Check that velocities have the same length as intervals/pitches if not ref_velocities.shape[0] == ref_pitches.shape[0]: raise ValueError('Reference velocities must have the same length as ' 'pitches and intervals.') if not est_velocities.shape[0] == est_pitches.shape[0]: raise ValueError('Estimated velocities must have the same length as ' 'pitches and intervals.') # Check that the velocities are positive if ref_velocities.size > 0 and np.min(ref_velocities) < 0: raise ValueError('Reference velocities must be positive.') if est_velocities.size > 0 and np.min(est_velocities) < 0: raise ValueError('Estimated velocities must be positive.')
python
def validate(ref_intervals, ref_pitches, ref_velocities, est_intervals, est_pitches, est_velocities): """Checks that the input annotations have valid time intervals, pitches, and velocities, and throws helpful errors if not. Parameters ---------- ref_intervals : np.ndarray, shape=(n,2) Array of reference notes time intervals (onset and offset times) ref_pitches : np.ndarray, shape=(n,) Array of reference pitch values in Hertz ref_velocities : np.ndarray, shape=(n,) Array of MIDI velocities (i.e. between 0 and 127) of reference notes est_intervals : np.ndarray, shape=(m,2) Array of estimated notes time intervals (onset and offset times) est_pitches : np.ndarray, shape=(m,) Array of estimated pitch values in Hertz est_velocities : np.ndarray, shape=(m,) Array of MIDI velocities (i.e. between 0 and 127) of estimated notes """ transcription.validate(ref_intervals, ref_pitches, est_intervals, est_pitches) # Check that velocities have the same length as intervals/pitches if not ref_velocities.shape[0] == ref_pitches.shape[0]: raise ValueError('Reference velocities must have the same length as ' 'pitches and intervals.') if not est_velocities.shape[0] == est_pitches.shape[0]: raise ValueError('Estimated velocities must have the same length as ' 'pitches and intervals.') # Check that the velocities are positive if ref_velocities.size > 0 and np.min(ref_velocities) < 0: raise ValueError('Reference velocities must be positive.') if est_velocities.size > 0 and np.min(est_velocities) < 0: raise ValueError('Estimated velocities must be positive.')
[ "def", "validate", "(", "ref_intervals", ",", "ref_pitches", ",", "ref_velocities", ",", "est_intervals", ",", "est_pitches", ",", "est_velocities", ")", ":", "transcription", ".", "validate", "(", "ref_intervals", ",", "ref_pitches", ",", "est_intervals", ",", "est_pitches", ")", "# Check that velocities have the same length as intervals/pitches", "if", "not", "ref_velocities", ".", "shape", "[", "0", "]", "==", "ref_pitches", ".", "shape", "[", "0", "]", ":", "raise", "ValueError", "(", "'Reference velocities must have the same length as '", "'pitches and intervals.'", ")", "if", "not", "est_velocities", ".", "shape", "[", "0", "]", "==", "est_pitches", ".", "shape", "[", "0", "]", ":", "raise", "ValueError", "(", "'Estimated velocities must have the same length as '", "'pitches and intervals.'", ")", "# Check that the velocities are positive", "if", "ref_velocities", ".", "size", ">", "0", "and", "np", ".", "min", "(", "ref_velocities", ")", "<", "0", ":", "raise", "ValueError", "(", "'Reference velocities must be positive.'", ")", "if", "est_velocities", ".", "size", ">", "0", "and", "np", ".", "min", "(", "est_velocities", ")", "<", "0", ":", "raise", "ValueError", "(", "'Estimated velocities must be positive.'", ")" ]
Checks that the input annotations have valid time intervals, pitches, and velocities, and throws helpful errors if not. Parameters ---------- ref_intervals : np.ndarray, shape=(n,2) Array of reference notes time intervals (onset and offset times) ref_pitches : np.ndarray, shape=(n,) Array of reference pitch values in Hertz ref_velocities : np.ndarray, shape=(n,) Array of MIDI velocities (i.e. between 0 and 127) of reference notes est_intervals : np.ndarray, shape=(m,2) Array of estimated notes time intervals (onset and offset times) est_pitches : np.ndarray, shape=(m,) Array of estimated pitch values in Hertz est_velocities : np.ndarray, shape=(m,) Array of MIDI velocities (i.e. between 0 and 127) of estimated notes
[ "Checks", "that", "the", "input", "annotations", "have", "valid", "time", "intervals", "pitches", "and", "velocities", "and", "throws", "helpful", "errors", "if", "not", "." ]
f41c8dafaea04b411252a516d1965af43c7d531b
https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/transcription_velocity.py#L62-L95
train
craffel/mir_eval
mir_eval/transcription_velocity.py
match_notes
def match_notes( ref_intervals, ref_pitches, ref_velocities, est_intervals, est_pitches, est_velocities, onset_tolerance=0.05, pitch_tolerance=50.0, offset_ratio=0.2, offset_min_tolerance=0.05, strict=False, velocity_tolerance=0.1): """Match notes, taking note velocity into consideration. This function first calls :func:`mir_eval.transcription.match_notes` to match notes according to the supplied intervals, pitches, onset, offset, and pitch tolerances. The velocities of the matched notes are then used to estimate a slope and intercept which can rescale the estimated velocities so that they are as close as possible (in L2 sense) to their matched reference velocities. Velocities are then normalized to the range [0, 1]. A estimated note is then further only considered correct if its velocity is within ``velocity_tolerance`` of its matched (according to pitch and timing) reference note. Parameters ---------- ref_intervals : np.ndarray, shape=(n,2) Array of reference notes time intervals (onset and offset times) ref_pitches : np.ndarray, shape=(n,) Array of reference pitch values in Hertz ref_velocities : np.ndarray, shape=(n,) Array of MIDI velocities (i.e. between 0 and 127) of reference notes est_intervals : np.ndarray, shape=(m,2) Array of estimated notes time intervals (onset and offset times) est_pitches : np.ndarray, shape=(m,) Array of estimated pitch values in Hertz est_velocities : np.ndarray, shape=(m,) Array of MIDI velocities (i.e. between 0 and 127) of estimated notes onset_tolerance : float > 0 The tolerance for an estimated note's onset deviating from the reference note's onset, in seconds. Default is 0.05 (50 ms). pitch_tolerance : float > 0 The tolerance for an estimated note's pitch deviating from the reference note's pitch, in cents. Default is 50.0 (50 cents). offset_ratio : float > 0 or None The ratio of the reference note's duration used to define the offset_tolerance. Default is 0.2 (20%), meaning the ``offset_tolerance`` will equal the ``ref_duration * 0.2``, or 0.05 (50 ms), whichever is greater. If ``offset_ratio`` is set to ``None``, offsets are ignored in the matching. offset_min_tolerance : float > 0 The minimum tolerance for offset matching. See offset_ratio description for an explanation of how the offset tolerance is determined. Note: this parameter only influences the results if ``offset_ratio`` is not ``None``. strict : bool If ``strict=False`` (the default), threshold checks for onset, offset, and pitch matching are performed using ``<=`` (less than or equal). If ``strict=True``, the threshold checks are performed using ``<`` (less than). velocity_tolerance : float > 0 Estimated notes are considered correct if, after rescaling and normalization to [0, 1], they are within ``velocity_tolerance`` of a matched reference note. Returns ------- matching : list of tuples A list of matched reference and estimated notes. ``matching[i] == (i, j)`` where reference note ``i`` matches estimated note ``j``. """ # Compute note matching as usual using standard transcription function matching = transcription.match_notes( ref_intervals, ref_pitches, est_intervals, est_pitches, onset_tolerance, pitch_tolerance, offset_ratio, offset_min_tolerance, strict) # Rescale reference velocities to the range [0, 1] min_velocity, max_velocity = np.min(ref_velocities), np.max(ref_velocities) # Make the smallest possible range 1 to avoid divide by zero velocity_range = max(1, max_velocity - min_velocity) ref_velocities = (ref_velocities - min_velocity)/float(velocity_range) # Convert matching list-of-tuples to array for fancy indexing matching = np.array(matching) # When there is no matching, return an empty list if matching.size == 0: return [] # Grab velocities for matched notes ref_matched_velocities = ref_velocities[matching[:, 0]] est_matched_velocities = est_velocities[matching[:, 1]] # Find slope and intercept of line which produces best least-squares fit # between matched est and ref velocities slope, intercept = np.linalg.lstsq( np.vstack([est_matched_velocities, np.ones(len(est_matched_velocities))]).T, ref_matched_velocities)[0] # Re-scale est velocities to match ref est_matched_velocities = slope*est_matched_velocities + intercept # Compute the absolute error of (rescaled) estimated velocities vs. # normalized reference velocities. Error will be in [0, 1] velocity_diff = np.abs(est_matched_velocities - ref_matched_velocities) # Check whether each error is within the provided tolerance velocity_within_tolerance = (velocity_diff < velocity_tolerance) # Only keep matches whose velocity was within the provided tolerance matching = matching[velocity_within_tolerance] # Convert back to list-of-tuple format matching = [tuple(_) for _ in matching] return matching
python
def match_notes( ref_intervals, ref_pitches, ref_velocities, est_intervals, est_pitches, est_velocities, onset_tolerance=0.05, pitch_tolerance=50.0, offset_ratio=0.2, offset_min_tolerance=0.05, strict=False, velocity_tolerance=0.1): """Match notes, taking note velocity into consideration. This function first calls :func:`mir_eval.transcription.match_notes` to match notes according to the supplied intervals, pitches, onset, offset, and pitch tolerances. The velocities of the matched notes are then used to estimate a slope and intercept which can rescale the estimated velocities so that they are as close as possible (in L2 sense) to their matched reference velocities. Velocities are then normalized to the range [0, 1]. A estimated note is then further only considered correct if its velocity is within ``velocity_tolerance`` of its matched (according to pitch and timing) reference note. Parameters ---------- ref_intervals : np.ndarray, shape=(n,2) Array of reference notes time intervals (onset and offset times) ref_pitches : np.ndarray, shape=(n,) Array of reference pitch values in Hertz ref_velocities : np.ndarray, shape=(n,) Array of MIDI velocities (i.e. between 0 and 127) of reference notes est_intervals : np.ndarray, shape=(m,2) Array of estimated notes time intervals (onset and offset times) est_pitches : np.ndarray, shape=(m,) Array of estimated pitch values in Hertz est_velocities : np.ndarray, shape=(m,) Array of MIDI velocities (i.e. between 0 and 127) of estimated notes onset_tolerance : float > 0 The tolerance for an estimated note's onset deviating from the reference note's onset, in seconds. Default is 0.05 (50 ms). pitch_tolerance : float > 0 The tolerance for an estimated note's pitch deviating from the reference note's pitch, in cents. Default is 50.0 (50 cents). offset_ratio : float > 0 or None The ratio of the reference note's duration used to define the offset_tolerance. Default is 0.2 (20%), meaning the ``offset_tolerance`` will equal the ``ref_duration * 0.2``, or 0.05 (50 ms), whichever is greater. If ``offset_ratio`` is set to ``None``, offsets are ignored in the matching. offset_min_tolerance : float > 0 The minimum tolerance for offset matching. See offset_ratio description for an explanation of how the offset tolerance is determined. Note: this parameter only influences the results if ``offset_ratio`` is not ``None``. strict : bool If ``strict=False`` (the default), threshold checks for onset, offset, and pitch matching are performed using ``<=`` (less than or equal). If ``strict=True``, the threshold checks are performed using ``<`` (less than). velocity_tolerance : float > 0 Estimated notes are considered correct if, after rescaling and normalization to [0, 1], they are within ``velocity_tolerance`` of a matched reference note. Returns ------- matching : list of tuples A list of matched reference and estimated notes. ``matching[i] == (i, j)`` where reference note ``i`` matches estimated note ``j``. """ # Compute note matching as usual using standard transcription function matching = transcription.match_notes( ref_intervals, ref_pitches, est_intervals, est_pitches, onset_tolerance, pitch_tolerance, offset_ratio, offset_min_tolerance, strict) # Rescale reference velocities to the range [0, 1] min_velocity, max_velocity = np.min(ref_velocities), np.max(ref_velocities) # Make the smallest possible range 1 to avoid divide by zero velocity_range = max(1, max_velocity - min_velocity) ref_velocities = (ref_velocities - min_velocity)/float(velocity_range) # Convert matching list-of-tuples to array for fancy indexing matching = np.array(matching) # When there is no matching, return an empty list if matching.size == 0: return [] # Grab velocities for matched notes ref_matched_velocities = ref_velocities[matching[:, 0]] est_matched_velocities = est_velocities[matching[:, 1]] # Find slope and intercept of line which produces best least-squares fit # between matched est and ref velocities slope, intercept = np.linalg.lstsq( np.vstack([est_matched_velocities, np.ones(len(est_matched_velocities))]).T, ref_matched_velocities)[0] # Re-scale est velocities to match ref est_matched_velocities = slope*est_matched_velocities + intercept # Compute the absolute error of (rescaled) estimated velocities vs. # normalized reference velocities. Error will be in [0, 1] velocity_diff = np.abs(est_matched_velocities - ref_matched_velocities) # Check whether each error is within the provided tolerance velocity_within_tolerance = (velocity_diff < velocity_tolerance) # Only keep matches whose velocity was within the provided tolerance matching = matching[velocity_within_tolerance] # Convert back to list-of-tuple format matching = [tuple(_) for _ in matching] return matching
[ "def", "match_notes", "(", "ref_intervals", ",", "ref_pitches", ",", "ref_velocities", ",", "est_intervals", ",", "est_pitches", ",", "est_velocities", ",", "onset_tolerance", "=", "0.05", ",", "pitch_tolerance", "=", "50.0", ",", "offset_ratio", "=", "0.2", ",", "offset_min_tolerance", "=", "0.05", ",", "strict", "=", "False", ",", "velocity_tolerance", "=", "0.1", ")", ":", "# Compute note matching as usual using standard transcription function", "matching", "=", "transcription", ".", "match_notes", "(", "ref_intervals", ",", "ref_pitches", ",", "est_intervals", ",", "est_pitches", ",", "onset_tolerance", ",", "pitch_tolerance", ",", "offset_ratio", ",", "offset_min_tolerance", ",", "strict", ")", "# Rescale reference velocities to the range [0, 1]", "min_velocity", ",", "max_velocity", "=", "np", ".", "min", "(", "ref_velocities", ")", ",", "np", ".", "max", "(", "ref_velocities", ")", "# Make the smallest possible range 1 to avoid divide by zero", "velocity_range", "=", "max", "(", "1", ",", "max_velocity", "-", "min_velocity", ")", "ref_velocities", "=", "(", "ref_velocities", "-", "min_velocity", ")", "/", "float", "(", "velocity_range", ")", "# Convert matching list-of-tuples to array for fancy indexing", "matching", "=", "np", ".", "array", "(", "matching", ")", "# When there is no matching, return an empty list", "if", "matching", ".", "size", "==", "0", ":", "return", "[", "]", "# Grab velocities for matched notes", "ref_matched_velocities", "=", "ref_velocities", "[", "matching", "[", ":", ",", "0", "]", "]", "est_matched_velocities", "=", "est_velocities", "[", "matching", "[", ":", ",", "1", "]", "]", "# Find slope and intercept of line which produces best least-squares fit", "# between matched est and ref velocities", "slope", ",", "intercept", "=", "np", ".", "linalg", ".", "lstsq", "(", "np", ".", "vstack", "(", "[", "est_matched_velocities", ",", "np", ".", "ones", "(", "len", "(", "est_matched_velocities", ")", ")", "]", ")", ".", "T", ",", "ref_matched_velocities", ")", "[", "0", "]", "# Re-scale est velocities to match ref", "est_matched_velocities", "=", "slope", "*", "est_matched_velocities", "+", "intercept", "# Compute the absolute error of (rescaled) estimated velocities vs.", "# normalized reference velocities. Error will be in [0, 1]", "velocity_diff", "=", "np", ".", "abs", "(", "est_matched_velocities", "-", "ref_matched_velocities", ")", "# Check whether each error is within the provided tolerance", "velocity_within_tolerance", "=", "(", "velocity_diff", "<", "velocity_tolerance", ")", "# Only keep matches whose velocity was within the provided tolerance", "matching", "=", "matching", "[", "velocity_within_tolerance", "]", "# Convert back to list-of-tuple format", "matching", "=", "[", "tuple", "(", "_", ")", "for", "_", "in", "matching", "]", "return", "matching" ]
Match notes, taking note velocity into consideration. This function first calls :func:`mir_eval.transcription.match_notes` to match notes according to the supplied intervals, pitches, onset, offset, and pitch tolerances. The velocities of the matched notes are then used to estimate a slope and intercept which can rescale the estimated velocities so that they are as close as possible (in L2 sense) to their matched reference velocities. Velocities are then normalized to the range [0, 1]. A estimated note is then further only considered correct if its velocity is within ``velocity_tolerance`` of its matched (according to pitch and timing) reference note. Parameters ---------- ref_intervals : np.ndarray, shape=(n,2) Array of reference notes time intervals (onset and offset times) ref_pitches : np.ndarray, shape=(n,) Array of reference pitch values in Hertz ref_velocities : np.ndarray, shape=(n,) Array of MIDI velocities (i.e. between 0 and 127) of reference notes est_intervals : np.ndarray, shape=(m,2) Array of estimated notes time intervals (onset and offset times) est_pitches : np.ndarray, shape=(m,) Array of estimated pitch values in Hertz est_velocities : np.ndarray, shape=(m,) Array of MIDI velocities (i.e. between 0 and 127) of estimated notes onset_tolerance : float > 0 The tolerance for an estimated note's onset deviating from the reference note's onset, in seconds. Default is 0.05 (50 ms). pitch_tolerance : float > 0 The tolerance for an estimated note's pitch deviating from the reference note's pitch, in cents. Default is 50.0 (50 cents). offset_ratio : float > 0 or None The ratio of the reference note's duration used to define the offset_tolerance. Default is 0.2 (20%), meaning the ``offset_tolerance`` will equal the ``ref_duration * 0.2``, or 0.05 (50 ms), whichever is greater. If ``offset_ratio`` is set to ``None``, offsets are ignored in the matching. offset_min_tolerance : float > 0 The minimum tolerance for offset matching. See offset_ratio description for an explanation of how the offset tolerance is determined. Note: this parameter only influences the results if ``offset_ratio`` is not ``None``. strict : bool If ``strict=False`` (the default), threshold checks for onset, offset, and pitch matching are performed using ``<=`` (less than or equal). If ``strict=True``, the threshold checks are performed using ``<`` (less than). velocity_tolerance : float > 0 Estimated notes are considered correct if, after rescaling and normalization to [0, 1], they are within ``velocity_tolerance`` of a matched reference note. Returns ------- matching : list of tuples A list of matched reference and estimated notes. ``matching[i] == (i, j)`` where reference note ``i`` matches estimated note ``j``.
[ "Match", "notes", "taking", "note", "velocity", "into", "consideration", "." ]
f41c8dafaea04b411252a516d1965af43c7d531b
https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/transcription_velocity.py#L98-L201
train
craffel/mir_eval
mir_eval/beat.py
validate
def validate(reference_beats, estimated_beats): """Checks that the input annotations to a metric look like valid beat time arrays, and throws helpful errors if not. Parameters ---------- reference_beats : np.ndarray reference beat times, in seconds estimated_beats : np.ndarray estimated beat times, in seconds """ # If reference or estimated beats are empty, # warn because metric will be 0 if reference_beats.size == 0: warnings.warn("Reference beats are empty.") if estimated_beats.size == 0: warnings.warn("Estimated beats are empty.") for beats in [reference_beats, estimated_beats]: util.validate_events(beats, MAX_TIME)
python
def validate(reference_beats, estimated_beats): """Checks that the input annotations to a metric look like valid beat time arrays, and throws helpful errors if not. Parameters ---------- reference_beats : np.ndarray reference beat times, in seconds estimated_beats : np.ndarray estimated beat times, in seconds """ # If reference or estimated beats are empty, # warn because metric will be 0 if reference_beats.size == 0: warnings.warn("Reference beats are empty.") if estimated_beats.size == 0: warnings.warn("Estimated beats are empty.") for beats in [reference_beats, estimated_beats]: util.validate_events(beats, MAX_TIME)
[ "def", "validate", "(", "reference_beats", ",", "estimated_beats", ")", ":", "# If reference or estimated beats are empty,", "# warn because metric will be 0", "if", "reference_beats", ".", "size", "==", "0", ":", "warnings", ".", "warn", "(", "\"Reference beats are empty.\"", ")", "if", "estimated_beats", ".", "size", "==", "0", ":", "warnings", ".", "warn", "(", "\"Estimated beats are empty.\"", ")", "for", "beats", "in", "[", "reference_beats", ",", "estimated_beats", "]", ":", "util", ".", "validate_events", "(", "beats", ",", "MAX_TIME", ")" ]
Checks that the input annotations to a metric look like valid beat time arrays, and throws helpful errors if not. Parameters ---------- reference_beats : np.ndarray reference beat times, in seconds estimated_beats : np.ndarray estimated beat times, in seconds
[ "Checks", "that", "the", "input", "annotations", "to", "a", "metric", "look", "like", "valid", "beat", "time", "arrays", "and", "throws", "helpful", "errors", "if", "not", "." ]
f41c8dafaea04b411252a516d1965af43c7d531b
https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/beat.py#L77-L95
train
craffel/mir_eval
mir_eval/beat.py
_get_reference_beat_variations
def _get_reference_beat_variations(reference_beats): """Return metric variations of the reference beats Parameters ---------- reference_beats : np.ndarray beat locations in seconds Returns ------- reference_beats : np.ndarray Original beat locations off_beat : np.ndarray 180 degrees out of phase from the original beat locations double : np.ndarray Beats at 2x the original tempo half_odd : np.ndarray Half tempo, odd beats half_even : np.ndarray Half tempo, even beats """ # Create annotations at twice the metric level interpolated_indices = np.arange(0, reference_beats.shape[0]-.5, .5) original_indices = np.arange(0, reference_beats.shape[0]) double_reference_beats = np.interp(interpolated_indices, original_indices, reference_beats) # Return metric variations: # True, off-beat, double tempo, half tempo odd, and half tempo even return (reference_beats, double_reference_beats[1::2], double_reference_beats, reference_beats[::2], reference_beats[1::2])
python
def _get_reference_beat_variations(reference_beats): """Return metric variations of the reference beats Parameters ---------- reference_beats : np.ndarray beat locations in seconds Returns ------- reference_beats : np.ndarray Original beat locations off_beat : np.ndarray 180 degrees out of phase from the original beat locations double : np.ndarray Beats at 2x the original tempo half_odd : np.ndarray Half tempo, odd beats half_even : np.ndarray Half tempo, even beats """ # Create annotations at twice the metric level interpolated_indices = np.arange(0, reference_beats.shape[0]-.5, .5) original_indices = np.arange(0, reference_beats.shape[0]) double_reference_beats = np.interp(interpolated_indices, original_indices, reference_beats) # Return metric variations: # True, off-beat, double tempo, half tempo odd, and half tempo even return (reference_beats, double_reference_beats[1::2], double_reference_beats, reference_beats[::2], reference_beats[1::2])
[ "def", "_get_reference_beat_variations", "(", "reference_beats", ")", ":", "# Create annotations at twice the metric level", "interpolated_indices", "=", "np", ".", "arange", "(", "0", ",", "reference_beats", ".", "shape", "[", "0", "]", "-", ".5", ",", ".5", ")", "original_indices", "=", "np", ".", "arange", "(", "0", ",", "reference_beats", ".", "shape", "[", "0", "]", ")", "double_reference_beats", "=", "np", ".", "interp", "(", "interpolated_indices", ",", "original_indices", ",", "reference_beats", ")", "# Return metric variations:", "# True, off-beat, double tempo, half tempo odd, and half tempo even", "return", "(", "reference_beats", ",", "double_reference_beats", "[", "1", ":", ":", "2", "]", ",", "double_reference_beats", ",", "reference_beats", "[", ":", ":", "2", "]", ",", "reference_beats", "[", "1", ":", ":", "2", "]", ")" ]
Return metric variations of the reference beats Parameters ---------- reference_beats : np.ndarray beat locations in seconds Returns ------- reference_beats : np.ndarray Original beat locations off_beat : np.ndarray 180 degrees out of phase from the original beat locations double : np.ndarray Beats at 2x the original tempo half_odd : np.ndarray Half tempo, odd beats half_even : np.ndarray Half tempo, even beats
[ "Return", "metric", "variations", "of", "the", "reference", "beats" ]
f41c8dafaea04b411252a516d1965af43c7d531b
https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/beat.py#L98-L133
train
craffel/mir_eval
mir_eval/beat.py
f_measure
def f_measure(reference_beats, estimated_beats, f_measure_threshold=0.07): """Compute the F-measure of correct vs incorrectly predicted beats. "Correctness" is determined over a small window. Examples -------- >>> reference_beats = mir_eval.io.load_events('reference.txt') >>> reference_beats = mir_eval.beat.trim_beats(reference_beats) >>> estimated_beats = mir_eval.io.load_events('estimated.txt') >>> estimated_beats = mir_eval.beat.trim_beats(estimated_beats) >>> f_measure = mir_eval.beat.f_measure(reference_beats, estimated_beats) Parameters ---------- reference_beats : np.ndarray reference beat times, in seconds estimated_beats : np.ndarray estimated beat times, in seconds f_measure_threshold : float Window size, in seconds (Default value = 0.07) Returns ------- f_score : float The computed F-measure score """ validate(reference_beats, estimated_beats) # When estimated beats are empty, no beats are correct; metric is 0 if estimated_beats.size == 0 or reference_beats.size == 0: return 0. # Compute the best-case matching between reference and estimated locations matching = util.match_events(reference_beats, estimated_beats, f_measure_threshold) precision = float(len(matching))/len(estimated_beats) recall = float(len(matching))/len(reference_beats) return util.f_measure(precision, recall)
python
def f_measure(reference_beats, estimated_beats, f_measure_threshold=0.07): """Compute the F-measure of correct vs incorrectly predicted beats. "Correctness" is determined over a small window. Examples -------- >>> reference_beats = mir_eval.io.load_events('reference.txt') >>> reference_beats = mir_eval.beat.trim_beats(reference_beats) >>> estimated_beats = mir_eval.io.load_events('estimated.txt') >>> estimated_beats = mir_eval.beat.trim_beats(estimated_beats) >>> f_measure = mir_eval.beat.f_measure(reference_beats, estimated_beats) Parameters ---------- reference_beats : np.ndarray reference beat times, in seconds estimated_beats : np.ndarray estimated beat times, in seconds f_measure_threshold : float Window size, in seconds (Default value = 0.07) Returns ------- f_score : float The computed F-measure score """ validate(reference_beats, estimated_beats) # When estimated beats are empty, no beats are correct; metric is 0 if estimated_beats.size == 0 or reference_beats.size == 0: return 0. # Compute the best-case matching between reference and estimated locations matching = util.match_events(reference_beats, estimated_beats, f_measure_threshold) precision = float(len(matching))/len(estimated_beats) recall = float(len(matching))/len(reference_beats) return util.f_measure(precision, recall)
[ "def", "f_measure", "(", "reference_beats", ",", "estimated_beats", ",", "f_measure_threshold", "=", "0.07", ")", ":", "validate", "(", "reference_beats", ",", "estimated_beats", ")", "# When estimated beats are empty, no beats are correct; metric is 0", "if", "estimated_beats", ".", "size", "==", "0", "or", "reference_beats", ".", "size", "==", "0", ":", "return", "0.", "# Compute the best-case matching between reference and estimated locations", "matching", "=", "util", ".", "match_events", "(", "reference_beats", ",", "estimated_beats", ",", "f_measure_threshold", ")", "precision", "=", "float", "(", "len", "(", "matching", ")", ")", "/", "len", "(", "estimated_beats", ")", "recall", "=", "float", "(", "len", "(", "matching", ")", ")", "/", "len", "(", "reference_beats", ")", "return", "util", ".", "f_measure", "(", "precision", ",", "recall", ")" ]
Compute the F-measure of correct vs incorrectly predicted beats. "Correctness" is determined over a small window. Examples -------- >>> reference_beats = mir_eval.io.load_events('reference.txt') >>> reference_beats = mir_eval.beat.trim_beats(reference_beats) >>> estimated_beats = mir_eval.io.load_events('estimated.txt') >>> estimated_beats = mir_eval.beat.trim_beats(estimated_beats) >>> f_measure = mir_eval.beat.f_measure(reference_beats, estimated_beats) Parameters ---------- reference_beats : np.ndarray reference beat times, in seconds estimated_beats : np.ndarray estimated beat times, in seconds f_measure_threshold : float Window size, in seconds (Default value = 0.07) Returns ------- f_score : float The computed F-measure score
[ "Compute", "the", "F", "-", "measure", "of", "correct", "vs", "incorrectly", "predicted", "beats", ".", "Correctness", "is", "determined", "over", "a", "small", "window", "." ]
f41c8dafaea04b411252a516d1965af43c7d531b
https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/beat.py#L136-L178
train
craffel/mir_eval
mir_eval/beat.py
cemgil
def cemgil(reference_beats, estimated_beats, cemgil_sigma=0.04): """Cemgil's score, computes a gaussian error of each estimated beat. Compares against the original beat times and all metrical variations. Examples -------- >>> reference_beats = mir_eval.io.load_events('reference.txt') >>> reference_beats = mir_eval.beat.trim_beats(reference_beats) >>> estimated_beats = mir_eval.io.load_events('estimated.txt') >>> estimated_beats = mir_eval.beat.trim_beats(estimated_beats) >>> cemgil_score, cemgil_max = mir_eval.beat.cemgil(reference_beats, estimated_beats) Parameters ---------- reference_beats : np.ndarray reference beat times, in seconds estimated_beats : np.ndarray query beat times, in seconds cemgil_sigma : float Sigma parameter of gaussian error windows (Default value = 0.04) Returns ------- cemgil_score : float Cemgil's score for the original reference beats cemgil_max : float The best Cemgil score for all metrical variations """ validate(reference_beats, estimated_beats) # When estimated beats are empty, no beats are correct; metric is 0 if estimated_beats.size == 0 or reference_beats.size == 0: return 0., 0. # We'll compute Cemgil's accuracy for each variation accuracies = [] for reference_beats in _get_reference_beat_variations(reference_beats): accuracy = 0 # Cycle through beats for beat in reference_beats: # Find the error for the closest beat to the reference beat beat_diff = np.min(np.abs(beat - estimated_beats)) # Add gaussian error into the accuracy accuracy += np.exp(-(beat_diff**2)/(2.0*cemgil_sigma**2)) # Normalize the accuracy accuracy /= .5*(estimated_beats.shape[0] + reference_beats.shape[0]) # Add it to our list of accuracy scores accuracies.append(accuracy) # Return raw accuracy with non-varied annotations # and maximal accuracy across all variations return accuracies[0], np.max(accuracies)
python
def cemgil(reference_beats, estimated_beats, cemgil_sigma=0.04): """Cemgil's score, computes a gaussian error of each estimated beat. Compares against the original beat times and all metrical variations. Examples -------- >>> reference_beats = mir_eval.io.load_events('reference.txt') >>> reference_beats = mir_eval.beat.trim_beats(reference_beats) >>> estimated_beats = mir_eval.io.load_events('estimated.txt') >>> estimated_beats = mir_eval.beat.trim_beats(estimated_beats) >>> cemgil_score, cemgil_max = mir_eval.beat.cemgil(reference_beats, estimated_beats) Parameters ---------- reference_beats : np.ndarray reference beat times, in seconds estimated_beats : np.ndarray query beat times, in seconds cemgil_sigma : float Sigma parameter of gaussian error windows (Default value = 0.04) Returns ------- cemgil_score : float Cemgil's score for the original reference beats cemgil_max : float The best Cemgil score for all metrical variations """ validate(reference_beats, estimated_beats) # When estimated beats are empty, no beats are correct; metric is 0 if estimated_beats.size == 0 or reference_beats.size == 0: return 0., 0. # We'll compute Cemgil's accuracy for each variation accuracies = [] for reference_beats in _get_reference_beat_variations(reference_beats): accuracy = 0 # Cycle through beats for beat in reference_beats: # Find the error for the closest beat to the reference beat beat_diff = np.min(np.abs(beat - estimated_beats)) # Add gaussian error into the accuracy accuracy += np.exp(-(beat_diff**2)/(2.0*cemgil_sigma**2)) # Normalize the accuracy accuracy /= .5*(estimated_beats.shape[0] + reference_beats.shape[0]) # Add it to our list of accuracy scores accuracies.append(accuracy) # Return raw accuracy with non-varied annotations # and maximal accuracy across all variations return accuracies[0], np.max(accuracies)
[ "def", "cemgil", "(", "reference_beats", ",", "estimated_beats", ",", "cemgil_sigma", "=", "0.04", ")", ":", "validate", "(", "reference_beats", ",", "estimated_beats", ")", "# When estimated beats are empty, no beats are correct; metric is 0", "if", "estimated_beats", ".", "size", "==", "0", "or", "reference_beats", ".", "size", "==", "0", ":", "return", "0.", ",", "0.", "# We'll compute Cemgil's accuracy for each variation", "accuracies", "=", "[", "]", "for", "reference_beats", "in", "_get_reference_beat_variations", "(", "reference_beats", ")", ":", "accuracy", "=", "0", "# Cycle through beats", "for", "beat", "in", "reference_beats", ":", "# Find the error for the closest beat to the reference beat", "beat_diff", "=", "np", ".", "min", "(", "np", ".", "abs", "(", "beat", "-", "estimated_beats", ")", ")", "# Add gaussian error into the accuracy", "accuracy", "+=", "np", ".", "exp", "(", "-", "(", "beat_diff", "**", "2", ")", "/", "(", "2.0", "*", "cemgil_sigma", "**", "2", ")", ")", "# Normalize the accuracy", "accuracy", "/=", ".5", "*", "(", "estimated_beats", ".", "shape", "[", "0", "]", "+", "reference_beats", ".", "shape", "[", "0", "]", ")", "# Add it to our list of accuracy scores", "accuracies", ".", "append", "(", "accuracy", ")", "# Return raw accuracy with non-varied annotations", "# and maximal accuracy across all variations", "return", "accuracies", "[", "0", "]", ",", "np", ".", "max", "(", "accuracies", ")" ]
Cemgil's score, computes a gaussian error of each estimated beat. Compares against the original beat times and all metrical variations. Examples -------- >>> reference_beats = mir_eval.io.load_events('reference.txt') >>> reference_beats = mir_eval.beat.trim_beats(reference_beats) >>> estimated_beats = mir_eval.io.load_events('estimated.txt') >>> estimated_beats = mir_eval.beat.trim_beats(estimated_beats) >>> cemgil_score, cemgil_max = mir_eval.beat.cemgil(reference_beats, estimated_beats) Parameters ---------- reference_beats : np.ndarray reference beat times, in seconds estimated_beats : np.ndarray query beat times, in seconds cemgil_sigma : float Sigma parameter of gaussian error windows (Default value = 0.04) Returns ------- cemgil_score : float Cemgil's score for the original reference beats cemgil_max : float The best Cemgil score for all metrical variations
[ "Cemgil", "s", "score", "computes", "a", "gaussian", "error", "of", "each", "estimated", "beat", ".", "Compares", "against", "the", "original", "beat", "times", "and", "all", "metrical", "variations", "." ]
f41c8dafaea04b411252a516d1965af43c7d531b
https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/beat.py#L181-L233
train
craffel/mir_eval
mir_eval/beat.py
goto
def goto(reference_beats, estimated_beats, goto_threshold=0.35, goto_mu=0.2, goto_sigma=0.2): """Calculate Goto's score, a binary 1 or 0 depending on some specific heuristic criteria Examples -------- >>> reference_beats = mir_eval.io.load_events('reference.txt') >>> reference_beats = mir_eval.beat.trim_beats(reference_beats) >>> estimated_beats = mir_eval.io.load_events('estimated.txt') >>> estimated_beats = mir_eval.beat.trim_beats(estimated_beats) >>> goto_score = mir_eval.beat.goto(reference_beats, estimated_beats) Parameters ---------- reference_beats : np.ndarray reference beat times, in seconds estimated_beats : np.ndarray query beat times, in seconds goto_threshold : float Threshold of beat error for a beat to be "correct" (Default value = 0.35) goto_mu : float The mean of the beat errors in the continuously correct track must be less than this (Default value = 0.2) goto_sigma : float The std of the beat errors in the continuously correct track must be less than this (Default value = 0.2) Returns ------- goto_score : float Either 1.0 or 0.0 if some specific criteria are met """ validate(reference_beats, estimated_beats) # When estimated beats are empty, no beats are correct; metric is 0 if estimated_beats.size == 0 or reference_beats.size == 0: return 0. # Error for each beat beat_error = np.ones(reference_beats.shape[0]) # Flag for whether the reference and estimated beats are paired paired = np.zeros(reference_beats.shape[0]) # Keep track of Goto's three criteria goto_criteria = 0 for n in range(1, reference_beats.shape[0]-1): # Get previous inner-reference-beat-interval previous_interval = 0.5*(reference_beats[n] - reference_beats[n-1]) # Window start - in the middle of the current beat and the previous window_min = reference_beats[n] - previous_interval # Next inter-reference-beat-interval next_interval = 0.5*(reference_beats[n+1] - reference_beats[n]) # Window end - in the middle of the current beat and the next window_max = reference_beats[n] + next_interval # Get estimated beats in the window beats_in_window = np.logical_and((estimated_beats >= window_min), (estimated_beats < window_max)) # False negative/positive if beats_in_window.sum() == 0 or beats_in_window.sum() > 1: paired[n] = 0 beat_error[n] = 1 else: # Single beat is paired! paired[n] = 1 # Get offset of the estimated beat and the reference beat offset = estimated_beats[beats_in_window] - reference_beats[n] # Scale by previous or next interval if offset < 0: beat_error[n] = offset/previous_interval else: beat_error[n] = offset/next_interval # Get indices of incorrect beats incorrect_beats = np.flatnonzero(np.abs(beat_error) > goto_threshold) # All beats are correct (first and last will be 0 so always correct) if incorrect_beats.shape[0] < 3: # Get the track of correct beats track = beat_error[incorrect_beats[0] + 1:incorrect_beats[-1] - 1] goto_criteria = 1 else: # Get the track of maximal length track_len = np.max(np.diff(incorrect_beats)) track_start = np.flatnonzero(np.diff(incorrect_beats) == track_len)[0] # Is the track length at least 25% of the song? if track_len - 1 > .25*(reference_beats.shape[0] - 2): goto_criteria = 1 start_beat = incorrect_beats[track_start] end_beat = incorrect_beats[track_start + 1] track = beat_error[start_beat:end_beat + 1] # If we have a track if goto_criteria: # Are mean and std of the track less than the required thresholds? if np.mean(np.abs(track)) < goto_mu \ and np.std(track, ddof=1) < goto_sigma: goto_criteria = 3 # If all criteria are met, score is 100%! return 1.0*(goto_criteria == 3)
python
def goto(reference_beats, estimated_beats, goto_threshold=0.35, goto_mu=0.2, goto_sigma=0.2): """Calculate Goto's score, a binary 1 or 0 depending on some specific heuristic criteria Examples -------- >>> reference_beats = mir_eval.io.load_events('reference.txt') >>> reference_beats = mir_eval.beat.trim_beats(reference_beats) >>> estimated_beats = mir_eval.io.load_events('estimated.txt') >>> estimated_beats = mir_eval.beat.trim_beats(estimated_beats) >>> goto_score = mir_eval.beat.goto(reference_beats, estimated_beats) Parameters ---------- reference_beats : np.ndarray reference beat times, in seconds estimated_beats : np.ndarray query beat times, in seconds goto_threshold : float Threshold of beat error for a beat to be "correct" (Default value = 0.35) goto_mu : float The mean of the beat errors in the continuously correct track must be less than this (Default value = 0.2) goto_sigma : float The std of the beat errors in the continuously correct track must be less than this (Default value = 0.2) Returns ------- goto_score : float Either 1.0 or 0.0 if some specific criteria are met """ validate(reference_beats, estimated_beats) # When estimated beats are empty, no beats are correct; metric is 0 if estimated_beats.size == 0 or reference_beats.size == 0: return 0. # Error for each beat beat_error = np.ones(reference_beats.shape[0]) # Flag for whether the reference and estimated beats are paired paired = np.zeros(reference_beats.shape[0]) # Keep track of Goto's three criteria goto_criteria = 0 for n in range(1, reference_beats.shape[0]-1): # Get previous inner-reference-beat-interval previous_interval = 0.5*(reference_beats[n] - reference_beats[n-1]) # Window start - in the middle of the current beat and the previous window_min = reference_beats[n] - previous_interval # Next inter-reference-beat-interval next_interval = 0.5*(reference_beats[n+1] - reference_beats[n]) # Window end - in the middle of the current beat and the next window_max = reference_beats[n] + next_interval # Get estimated beats in the window beats_in_window = np.logical_and((estimated_beats >= window_min), (estimated_beats < window_max)) # False negative/positive if beats_in_window.sum() == 0 or beats_in_window.sum() > 1: paired[n] = 0 beat_error[n] = 1 else: # Single beat is paired! paired[n] = 1 # Get offset of the estimated beat and the reference beat offset = estimated_beats[beats_in_window] - reference_beats[n] # Scale by previous or next interval if offset < 0: beat_error[n] = offset/previous_interval else: beat_error[n] = offset/next_interval # Get indices of incorrect beats incorrect_beats = np.flatnonzero(np.abs(beat_error) > goto_threshold) # All beats are correct (first and last will be 0 so always correct) if incorrect_beats.shape[0] < 3: # Get the track of correct beats track = beat_error[incorrect_beats[0] + 1:incorrect_beats[-1] - 1] goto_criteria = 1 else: # Get the track of maximal length track_len = np.max(np.diff(incorrect_beats)) track_start = np.flatnonzero(np.diff(incorrect_beats) == track_len)[0] # Is the track length at least 25% of the song? if track_len - 1 > .25*(reference_beats.shape[0] - 2): goto_criteria = 1 start_beat = incorrect_beats[track_start] end_beat = incorrect_beats[track_start + 1] track = beat_error[start_beat:end_beat + 1] # If we have a track if goto_criteria: # Are mean and std of the track less than the required thresholds? if np.mean(np.abs(track)) < goto_mu \ and np.std(track, ddof=1) < goto_sigma: goto_criteria = 3 # If all criteria are met, score is 100%! return 1.0*(goto_criteria == 3)
[ "def", "goto", "(", "reference_beats", ",", "estimated_beats", ",", "goto_threshold", "=", "0.35", ",", "goto_mu", "=", "0.2", ",", "goto_sigma", "=", "0.2", ")", ":", "validate", "(", "reference_beats", ",", "estimated_beats", ")", "# When estimated beats are empty, no beats are correct; metric is 0", "if", "estimated_beats", ".", "size", "==", "0", "or", "reference_beats", ".", "size", "==", "0", ":", "return", "0.", "# Error for each beat", "beat_error", "=", "np", ".", "ones", "(", "reference_beats", ".", "shape", "[", "0", "]", ")", "# Flag for whether the reference and estimated beats are paired", "paired", "=", "np", ".", "zeros", "(", "reference_beats", ".", "shape", "[", "0", "]", ")", "# Keep track of Goto's three criteria", "goto_criteria", "=", "0", "for", "n", "in", "range", "(", "1", ",", "reference_beats", ".", "shape", "[", "0", "]", "-", "1", ")", ":", "# Get previous inner-reference-beat-interval", "previous_interval", "=", "0.5", "*", "(", "reference_beats", "[", "n", "]", "-", "reference_beats", "[", "n", "-", "1", "]", ")", "# Window start - in the middle of the current beat and the previous", "window_min", "=", "reference_beats", "[", "n", "]", "-", "previous_interval", "# Next inter-reference-beat-interval", "next_interval", "=", "0.5", "*", "(", "reference_beats", "[", "n", "+", "1", "]", "-", "reference_beats", "[", "n", "]", ")", "# Window end - in the middle of the current beat and the next", "window_max", "=", "reference_beats", "[", "n", "]", "+", "next_interval", "# Get estimated beats in the window", "beats_in_window", "=", "np", ".", "logical_and", "(", "(", "estimated_beats", ">=", "window_min", ")", ",", "(", "estimated_beats", "<", "window_max", ")", ")", "# False negative/positive", "if", "beats_in_window", ".", "sum", "(", ")", "==", "0", "or", "beats_in_window", ".", "sum", "(", ")", ">", "1", ":", "paired", "[", "n", "]", "=", "0", "beat_error", "[", "n", "]", "=", "1", "else", ":", "# Single beat is paired!", "paired", "[", "n", "]", "=", "1", "# Get offset of the estimated beat and the reference beat", "offset", "=", "estimated_beats", "[", "beats_in_window", "]", "-", "reference_beats", "[", "n", "]", "# Scale by previous or next interval", "if", "offset", "<", "0", ":", "beat_error", "[", "n", "]", "=", "offset", "/", "previous_interval", "else", ":", "beat_error", "[", "n", "]", "=", "offset", "/", "next_interval", "# Get indices of incorrect beats", "incorrect_beats", "=", "np", ".", "flatnonzero", "(", "np", ".", "abs", "(", "beat_error", ")", ">", "goto_threshold", ")", "# All beats are correct (first and last will be 0 so always correct)", "if", "incorrect_beats", ".", "shape", "[", "0", "]", "<", "3", ":", "# Get the track of correct beats", "track", "=", "beat_error", "[", "incorrect_beats", "[", "0", "]", "+", "1", ":", "incorrect_beats", "[", "-", "1", "]", "-", "1", "]", "goto_criteria", "=", "1", "else", ":", "# Get the track of maximal length", "track_len", "=", "np", ".", "max", "(", "np", ".", "diff", "(", "incorrect_beats", ")", ")", "track_start", "=", "np", ".", "flatnonzero", "(", "np", ".", "diff", "(", "incorrect_beats", ")", "==", "track_len", ")", "[", "0", "]", "# Is the track length at least 25% of the song?", "if", "track_len", "-", "1", ">", ".25", "*", "(", "reference_beats", ".", "shape", "[", "0", "]", "-", "2", ")", ":", "goto_criteria", "=", "1", "start_beat", "=", "incorrect_beats", "[", "track_start", "]", "end_beat", "=", "incorrect_beats", "[", "track_start", "+", "1", "]", "track", "=", "beat_error", "[", "start_beat", ":", "end_beat", "+", "1", "]", "# If we have a track", "if", "goto_criteria", ":", "# Are mean and std of the track less than the required thresholds?", "if", "np", ".", "mean", "(", "np", ".", "abs", "(", "track", ")", ")", "<", "goto_mu", "and", "np", ".", "std", "(", "track", ",", "ddof", "=", "1", ")", "<", "goto_sigma", ":", "goto_criteria", "=", "3", "# If all criteria are met, score is 100%!", "return", "1.0", "*", "(", "goto_criteria", "==", "3", ")" ]
Calculate Goto's score, a binary 1 or 0 depending on some specific heuristic criteria Examples -------- >>> reference_beats = mir_eval.io.load_events('reference.txt') >>> reference_beats = mir_eval.beat.trim_beats(reference_beats) >>> estimated_beats = mir_eval.io.load_events('estimated.txt') >>> estimated_beats = mir_eval.beat.trim_beats(estimated_beats) >>> goto_score = mir_eval.beat.goto(reference_beats, estimated_beats) Parameters ---------- reference_beats : np.ndarray reference beat times, in seconds estimated_beats : np.ndarray query beat times, in seconds goto_threshold : float Threshold of beat error for a beat to be "correct" (Default value = 0.35) goto_mu : float The mean of the beat errors in the continuously correct track must be less than this (Default value = 0.2) goto_sigma : float The std of the beat errors in the continuously correct track must be less than this (Default value = 0.2) Returns ------- goto_score : float Either 1.0 or 0.0 if some specific criteria are met
[ "Calculate", "Goto", "s", "score", "a", "binary", "1", "or", "0", "depending", "on", "some", "specific", "heuristic", "criteria" ]
f41c8dafaea04b411252a516d1965af43c7d531b
https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/beat.py#L236-L335
train
craffel/mir_eval
mir_eval/beat.py
p_score
def p_score(reference_beats, estimated_beats, p_score_threshold=0.2): """Get McKinney's P-score. Based on the autocorrelation of the reference and estimated beats Examples -------- >>> reference_beats = mir_eval.io.load_events('reference.txt') >>> reference_beats = mir_eval.beat.trim_beats(reference_beats) >>> estimated_beats = mir_eval.io.load_events('estimated.txt') >>> estimated_beats = mir_eval.beat.trim_beats(estimated_beats) >>> p_score = mir_eval.beat.p_score(reference_beats, estimated_beats) Parameters ---------- reference_beats : np.ndarray reference beat times, in seconds estimated_beats : np.ndarray query beat times, in seconds p_score_threshold : float Window size will be ``p_score_threshold*np.median(inter_annotation_intervals)``, (Default value = 0.2) Returns ------- correlation : float McKinney's P-score """ validate(reference_beats, estimated_beats) # Warn when only one beat is provided for either estimated or reference, # report a warning if reference_beats.size == 1: warnings.warn("Only one reference beat was provided, so beat intervals" " cannot be computed.") if estimated_beats.size == 1: warnings.warn("Only one estimated beat was provided, so beat intervals" " cannot be computed.") # When estimated or reference beats have <= 1 beats, can't compute the # metric, so return 0 if estimated_beats.size <= 1 or reference_beats.size <= 1: return 0. # Quantize beats to 10ms sampling_rate = int(1.0/0.010) # Shift beats so that the minimum in either sequence is zero offset = min(estimated_beats.min(), reference_beats.min()) estimated_beats = np.array(estimated_beats - offset) reference_beats = np.array(reference_beats - offset) # Get the largest time index end_point = np.int(np.ceil(np.max([np.max(estimated_beats), np.max(reference_beats)]))) # Make impulse trains with impulses at beat locations reference_train = np.zeros(end_point*sampling_rate + 1) beat_indices = np.ceil(reference_beats*sampling_rate).astype(np.int) reference_train[beat_indices] = 1.0 estimated_train = np.zeros(end_point*sampling_rate + 1) beat_indices = np.ceil(estimated_beats*sampling_rate).astype(np.int) estimated_train[beat_indices] = 1.0 # Window size to take the correlation over # defined as .2*median(inter-annotation-intervals) annotation_intervals = np.diff(np.flatnonzero(reference_train)) win_size = int(np.round(p_score_threshold*np.median(annotation_intervals))) # Get full correlation train_correlation = np.correlate(reference_train, estimated_train, 'full') # Get the middle element - note we are rounding down on purpose here middle_lag = train_correlation.shape[0]//2 # Truncate to only valid lags (those corresponding to the window) start = middle_lag - win_size end = middle_lag + win_size + 1 train_correlation = train_correlation[start:end] # Compute and return the P-score n_beats = np.max([estimated_beats.shape[0], reference_beats.shape[0]]) return np.sum(train_correlation)/n_beats
python
def p_score(reference_beats, estimated_beats, p_score_threshold=0.2): """Get McKinney's P-score. Based on the autocorrelation of the reference and estimated beats Examples -------- >>> reference_beats = mir_eval.io.load_events('reference.txt') >>> reference_beats = mir_eval.beat.trim_beats(reference_beats) >>> estimated_beats = mir_eval.io.load_events('estimated.txt') >>> estimated_beats = mir_eval.beat.trim_beats(estimated_beats) >>> p_score = mir_eval.beat.p_score(reference_beats, estimated_beats) Parameters ---------- reference_beats : np.ndarray reference beat times, in seconds estimated_beats : np.ndarray query beat times, in seconds p_score_threshold : float Window size will be ``p_score_threshold*np.median(inter_annotation_intervals)``, (Default value = 0.2) Returns ------- correlation : float McKinney's P-score """ validate(reference_beats, estimated_beats) # Warn when only one beat is provided for either estimated or reference, # report a warning if reference_beats.size == 1: warnings.warn("Only one reference beat was provided, so beat intervals" " cannot be computed.") if estimated_beats.size == 1: warnings.warn("Only one estimated beat was provided, so beat intervals" " cannot be computed.") # When estimated or reference beats have <= 1 beats, can't compute the # metric, so return 0 if estimated_beats.size <= 1 or reference_beats.size <= 1: return 0. # Quantize beats to 10ms sampling_rate = int(1.0/0.010) # Shift beats so that the minimum in either sequence is zero offset = min(estimated_beats.min(), reference_beats.min()) estimated_beats = np.array(estimated_beats - offset) reference_beats = np.array(reference_beats - offset) # Get the largest time index end_point = np.int(np.ceil(np.max([np.max(estimated_beats), np.max(reference_beats)]))) # Make impulse trains with impulses at beat locations reference_train = np.zeros(end_point*sampling_rate + 1) beat_indices = np.ceil(reference_beats*sampling_rate).astype(np.int) reference_train[beat_indices] = 1.0 estimated_train = np.zeros(end_point*sampling_rate + 1) beat_indices = np.ceil(estimated_beats*sampling_rate).astype(np.int) estimated_train[beat_indices] = 1.0 # Window size to take the correlation over # defined as .2*median(inter-annotation-intervals) annotation_intervals = np.diff(np.flatnonzero(reference_train)) win_size = int(np.round(p_score_threshold*np.median(annotation_intervals))) # Get full correlation train_correlation = np.correlate(reference_train, estimated_train, 'full') # Get the middle element - note we are rounding down on purpose here middle_lag = train_correlation.shape[0]//2 # Truncate to only valid lags (those corresponding to the window) start = middle_lag - win_size end = middle_lag + win_size + 1 train_correlation = train_correlation[start:end] # Compute and return the P-score n_beats = np.max([estimated_beats.shape[0], reference_beats.shape[0]]) return np.sum(train_correlation)/n_beats
[ "def", "p_score", "(", "reference_beats", ",", "estimated_beats", ",", "p_score_threshold", "=", "0.2", ")", ":", "validate", "(", "reference_beats", ",", "estimated_beats", ")", "# Warn when only one beat is provided for either estimated or reference,", "# report a warning", "if", "reference_beats", ".", "size", "==", "1", ":", "warnings", ".", "warn", "(", "\"Only one reference beat was provided, so beat intervals\"", "\" cannot be computed.\"", ")", "if", "estimated_beats", ".", "size", "==", "1", ":", "warnings", ".", "warn", "(", "\"Only one estimated beat was provided, so beat intervals\"", "\" cannot be computed.\"", ")", "# When estimated or reference beats have <= 1 beats, can't compute the", "# metric, so return 0", "if", "estimated_beats", ".", "size", "<=", "1", "or", "reference_beats", ".", "size", "<=", "1", ":", "return", "0.", "# Quantize beats to 10ms", "sampling_rate", "=", "int", "(", "1.0", "/", "0.010", ")", "# Shift beats so that the minimum in either sequence is zero", "offset", "=", "min", "(", "estimated_beats", ".", "min", "(", ")", ",", "reference_beats", ".", "min", "(", ")", ")", "estimated_beats", "=", "np", ".", "array", "(", "estimated_beats", "-", "offset", ")", "reference_beats", "=", "np", ".", "array", "(", "reference_beats", "-", "offset", ")", "# Get the largest time index", "end_point", "=", "np", ".", "int", "(", "np", ".", "ceil", "(", "np", ".", "max", "(", "[", "np", ".", "max", "(", "estimated_beats", ")", ",", "np", ".", "max", "(", "reference_beats", ")", "]", ")", ")", ")", "# Make impulse trains with impulses at beat locations", "reference_train", "=", "np", ".", "zeros", "(", "end_point", "*", "sampling_rate", "+", "1", ")", "beat_indices", "=", "np", ".", "ceil", "(", "reference_beats", "*", "sampling_rate", ")", ".", "astype", "(", "np", ".", "int", ")", "reference_train", "[", "beat_indices", "]", "=", "1.0", "estimated_train", "=", "np", ".", "zeros", "(", "end_point", "*", "sampling_rate", "+", "1", ")", "beat_indices", "=", "np", ".", "ceil", "(", "estimated_beats", "*", "sampling_rate", ")", ".", "astype", "(", "np", ".", "int", ")", "estimated_train", "[", "beat_indices", "]", "=", "1.0", "# Window size to take the correlation over", "# defined as .2*median(inter-annotation-intervals)", "annotation_intervals", "=", "np", ".", "diff", "(", "np", ".", "flatnonzero", "(", "reference_train", ")", ")", "win_size", "=", "int", "(", "np", ".", "round", "(", "p_score_threshold", "*", "np", ".", "median", "(", "annotation_intervals", ")", ")", ")", "# Get full correlation", "train_correlation", "=", "np", ".", "correlate", "(", "reference_train", ",", "estimated_train", ",", "'full'", ")", "# Get the middle element - note we are rounding down on purpose here", "middle_lag", "=", "train_correlation", ".", "shape", "[", "0", "]", "//", "2", "# Truncate to only valid lags (those corresponding to the window)", "start", "=", "middle_lag", "-", "win_size", "end", "=", "middle_lag", "+", "win_size", "+", "1", "train_correlation", "=", "train_correlation", "[", "start", ":", "end", "]", "# Compute and return the P-score", "n_beats", "=", "np", ".", "max", "(", "[", "estimated_beats", ".", "shape", "[", "0", "]", ",", "reference_beats", ".", "shape", "[", "0", "]", "]", ")", "return", "np", ".", "sum", "(", "train_correlation", ")", "/", "n_beats" ]
Get McKinney's P-score. Based on the autocorrelation of the reference and estimated beats Examples -------- >>> reference_beats = mir_eval.io.load_events('reference.txt') >>> reference_beats = mir_eval.beat.trim_beats(reference_beats) >>> estimated_beats = mir_eval.io.load_events('estimated.txt') >>> estimated_beats = mir_eval.beat.trim_beats(estimated_beats) >>> p_score = mir_eval.beat.p_score(reference_beats, estimated_beats) Parameters ---------- reference_beats : np.ndarray reference beat times, in seconds estimated_beats : np.ndarray query beat times, in seconds p_score_threshold : float Window size will be ``p_score_threshold*np.median(inter_annotation_intervals)``, (Default value = 0.2) Returns ------- correlation : float McKinney's P-score
[ "Get", "McKinney", "s", "P", "-", "score", ".", "Based", "on", "the", "autocorrelation", "of", "the", "reference", "and", "estimated", "beats" ]
f41c8dafaea04b411252a516d1965af43c7d531b
https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/beat.py#L338-L412
train
craffel/mir_eval
mir_eval/beat.py
information_gain
def information_gain(reference_beats, estimated_beats, bins=41): """Get the information gain - K-L divergence of the beat error histogram to a uniform histogram Examples -------- >>> reference_beats = mir_eval.io.load_events('reference.txt') >>> reference_beats = mir_eval.beat.trim_beats(reference_beats) >>> estimated_beats = mir_eval.io.load_events('estimated.txt') >>> estimated_beats = mir_eval.beat.trim_beats(estimated_beats) >>> information_gain = mir_eval.beat.information_gain(reference_beats, estimated_beats) Parameters ---------- reference_beats : np.ndarray reference beat times, in seconds estimated_beats : np.ndarray query beat times, in seconds bins : int Number of bins in the beat error histogram (Default value = 41) Returns ------- information_gain_score : float Entropy of beat error histogram """ validate(reference_beats, estimated_beats) # If an even number of bins is provided, # there will be no bin centered at zero, so warn the user. if not bins % 2: warnings.warn("bins parameter is even, " "so there will not be a bin centered at zero.") # Warn when only one beat is provided for either estimated or reference, # report a warning if reference_beats.size == 1: warnings.warn("Only one reference beat was provided, so beat intervals" " cannot be computed.") if estimated_beats.size == 1: warnings.warn("Only one estimated beat was provided, so beat intervals" " cannot be computed.") # When estimated or reference beats have <= 1 beats, can't compute the # metric, so return 0 if estimated_beats.size <= 1 or reference_beats.size <= 1: return 0. # Get entropy for reference beats->estimated beats # and estimated beats->reference beats forward_entropy = _get_entropy(reference_beats, estimated_beats, bins) backward_entropy = _get_entropy(estimated_beats, reference_beats, bins) # Pick the larger of the entropies norm = np.log2(bins) if forward_entropy > backward_entropy: # Note that the beat evaluation toolbox does not normalize information_gain_score = (norm - forward_entropy)/norm else: information_gain_score = (norm - backward_entropy)/norm return information_gain_score
python
def information_gain(reference_beats, estimated_beats, bins=41): """Get the information gain - K-L divergence of the beat error histogram to a uniform histogram Examples -------- >>> reference_beats = mir_eval.io.load_events('reference.txt') >>> reference_beats = mir_eval.beat.trim_beats(reference_beats) >>> estimated_beats = mir_eval.io.load_events('estimated.txt') >>> estimated_beats = mir_eval.beat.trim_beats(estimated_beats) >>> information_gain = mir_eval.beat.information_gain(reference_beats, estimated_beats) Parameters ---------- reference_beats : np.ndarray reference beat times, in seconds estimated_beats : np.ndarray query beat times, in seconds bins : int Number of bins in the beat error histogram (Default value = 41) Returns ------- information_gain_score : float Entropy of beat error histogram """ validate(reference_beats, estimated_beats) # If an even number of bins is provided, # there will be no bin centered at zero, so warn the user. if not bins % 2: warnings.warn("bins parameter is even, " "so there will not be a bin centered at zero.") # Warn when only one beat is provided for either estimated or reference, # report a warning if reference_beats.size == 1: warnings.warn("Only one reference beat was provided, so beat intervals" " cannot be computed.") if estimated_beats.size == 1: warnings.warn("Only one estimated beat was provided, so beat intervals" " cannot be computed.") # When estimated or reference beats have <= 1 beats, can't compute the # metric, so return 0 if estimated_beats.size <= 1 or reference_beats.size <= 1: return 0. # Get entropy for reference beats->estimated beats # and estimated beats->reference beats forward_entropy = _get_entropy(reference_beats, estimated_beats, bins) backward_entropy = _get_entropy(estimated_beats, reference_beats, bins) # Pick the larger of the entropies norm = np.log2(bins) if forward_entropy > backward_entropy: # Note that the beat evaluation toolbox does not normalize information_gain_score = (norm - forward_entropy)/norm else: information_gain_score = (norm - backward_entropy)/norm return information_gain_score
[ "def", "information_gain", "(", "reference_beats", ",", "estimated_beats", ",", "bins", "=", "41", ")", ":", "validate", "(", "reference_beats", ",", "estimated_beats", ")", "# If an even number of bins is provided,", "# there will be no bin centered at zero, so warn the user.", "if", "not", "bins", "%", "2", ":", "warnings", ".", "warn", "(", "\"bins parameter is even, \"", "\"so there will not be a bin centered at zero.\"", ")", "# Warn when only one beat is provided for either estimated or reference,", "# report a warning", "if", "reference_beats", ".", "size", "==", "1", ":", "warnings", ".", "warn", "(", "\"Only one reference beat was provided, so beat intervals\"", "\" cannot be computed.\"", ")", "if", "estimated_beats", ".", "size", "==", "1", ":", "warnings", ".", "warn", "(", "\"Only one estimated beat was provided, so beat intervals\"", "\" cannot be computed.\"", ")", "# When estimated or reference beats have <= 1 beats, can't compute the", "# metric, so return 0", "if", "estimated_beats", ".", "size", "<=", "1", "or", "reference_beats", ".", "size", "<=", "1", ":", "return", "0.", "# Get entropy for reference beats->estimated beats", "# and estimated beats->reference beats", "forward_entropy", "=", "_get_entropy", "(", "reference_beats", ",", "estimated_beats", ",", "bins", ")", "backward_entropy", "=", "_get_entropy", "(", "estimated_beats", ",", "reference_beats", ",", "bins", ")", "# Pick the larger of the entropies", "norm", "=", "np", ".", "log2", "(", "bins", ")", "if", "forward_entropy", ">", "backward_entropy", ":", "# Note that the beat evaluation toolbox does not normalize", "information_gain_score", "=", "(", "norm", "-", "forward_entropy", ")", "/", "norm", "else", ":", "information_gain_score", "=", "(", "norm", "-", "backward_entropy", ")", "/", "norm", "return", "information_gain_score" ]
Get the information gain - K-L divergence of the beat error histogram to a uniform histogram Examples -------- >>> reference_beats = mir_eval.io.load_events('reference.txt') >>> reference_beats = mir_eval.beat.trim_beats(reference_beats) >>> estimated_beats = mir_eval.io.load_events('estimated.txt') >>> estimated_beats = mir_eval.beat.trim_beats(estimated_beats) >>> information_gain = mir_eval.beat.information_gain(reference_beats, estimated_beats) Parameters ---------- reference_beats : np.ndarray reference beat times, in seconds estimated_beats : np.ndarray query beat times, in seconds bins : int Number of bins in the beat error histogram (Default value = 41) Returns ------- information_gain_score : float Entropy of beat error histogram
[ "Get", "the", "information", "gain", "-", "K", "-", "L", "divergence", "of", "the", "beat", "error", "histogram", "to", "a", "uniform", "histogram" ]
f41c8dafaea04b411252a516d1965af43c7d531b
https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/beat.py#L580-L639
train
craffel/mir_eval
mir_eval/util.py
index_labels
def index_labels(labels, case_sensitive=False): """Convert a list of string identifiers into numerical indices. Parameters ---------- labels : list of strings, shape=(n,) A list of annotations, e.g., segment or chord labels from an annotation file. case_sensitive : bool Set to True to enable case-sensitive label indexing (Default value = False) Returns ------- indices : list, shape=(n,) Numerical representation of ``labels`` index_to_label : dict Mapping to convert numerical indices back to labels. ``labels[i] == index_to_label[indices[i]]`` """ label_to_index = {} index_to_label = {} # If we're not case-sensitive, if not case_sensitive: labels = [str(s).lower() for s in labels] # First, build the unique label mapping for index, s in enumerate(sorted(set(labels))): label_to_index[s] = index index_to_label[index] = s # Remap the labels to indices indices = [label_to_index[s] for s in labels] # Return the converted labels, and the inverse mapping return indices, index_to_label
python
def index_labels(labels, case_sensitive=False): """Convert a list of string identifiers into numerical indices. Parameters ---------- labels : list of strings, shape=(n,) A list of annotations, e.g., segment or chord labels from an annotation file. case_sensitive : bool Set to True to enable case-sensitive label indexing (Default value = False) Returns ------- indices : list, shape=(n,) Numerical representation of ``labels`` index_to_label : dict Mapping to convert numerical indices back to labels. ``labels[i] == index_to_label[indices[i]]`` """ label_to_index = {} index_to_label = {} # If we're not case-sensitive, if not case_sensitive: labels = [str(s).lower() for s in labels] # First, build the unique label mapping for index, s in enumerate(sorted(set(labels))): label_to_index[s] = index index_to_label[index] = s # Remap the labels to indices indices = [label_to_index[s] for s in labels] # Return the converted labels, and the inverse mapping return indices, index_to_label
[ "def", "index_labels", "(", "labels", ",", "case_sensitive", "=", "False", ")", ":", "label_to_index", "=", "{", "}", "index_to_label", "=", "{", "}", "# If we're not case-sensitive,", "if", "not", "case_sensitive", ":", "labels", "=", "[", "str", "(", "s", ")", ".", "lower", "(", ")", "for", "s", "in", "labels", "]", "# First, build the unique label mapping", "for", "index", ",", "s", "in", "enumerate", "(", "sorted", "(", "set", "(", "labels", ")", ")", ")", ":", "label_to_index", "[", "s", "]", "=", "index", "index_to_label", "[", "index", "]", "=", "s", "# Remap the labels to indices", "indices", "=", "[", "label_to_index", "[", "s", "]", "for", "s", "in", "labels", "]", "# Return the converted labels, and the inverse mapping", "return", "indices", ",", "index_to_label" ]
Convert a list of string identifiers into numerical indices. Parameters ---------- labels : list of strings, shape=(n,) A list of annotations, e.g., segment or chord labels from an annotation file. case_sensitive : bool Set to True to enable case-sensitive label indexing (Default value = False) Returns ------- indices : list, shape=(n,) Numerical representation of ``labels`` index_to_label : dict Mapping to convert numerical indices back to labels. ``labels[i] == index_to_label[indices[i]]``
[ "Convert", "a", "list", "of", "string", "identifiers", "into", "numerical", "indices", "." ]
f41c8dafaea04b411252a516d1965af43c7d531b
https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/util.py#L13-L52
train
craffel/mir_eval
mir_eval/util.py
intervals_to_samples
def intervals_to_samples(intervals, labels, offset=0, sample_size=0.1, fill_value=None): """Convert an array of labeled time intervals to annotated samples. Parameters ---------- intervals : np.ndarray, shape=(n, d) An array of time intervals, as returned by :func:`mir_eval.io.load_intervals()` or :func:`mir_eval.io.load_labeled_intervals()`. The ``i`` th interval spans time ``intervals[i, 0]`` to ``intervals[i, 1]``. labels : list, shape=(n,) The annotation for each interval offset : float > 0 Phase offset of the sampled time grid (in seconds) (Default value = 0) sample_size : float > 0 duration of each sample to be generated (in seconds) (Default value = 0.1) fill_value : type(labels[0]) Object to use for the label with out-of-range time points. (Default value = None) Returns ------- sample_times : list list of sample times sample_labels : list array of labels for each generated sample Notes ----- Intervals will be rounded down to the nearest multiple of ``sample_size``. """ # Round intervals to the sample size num_samples = int(np.floor(intervals.max() / sample_size)) sample_indices = np.arange(num_samples, dtype=np.float32) sample_times = (sample_indices*sample_size + offset).tolist() sampled_labels = interpolate_intervals( intervals, labels, sample_times, fill_value) return sample_times, sampled_labels
python
def intervals_to_samples(intervals, labels, offset=0, sample_size=0.1, fill_value=None): """Convert an array of labeled time intervals to annotated samples. Parameters ---------- intervals : np.ndarray, shape=(n, d) An array of time intervals, as returned by :func:`mir_eval.io.load_intervals()` or :func:`mir_eval.io.load_labeled_intervals()`. The ``i`` th interval spans time ``intervals[i, 0]`` to ``intervals[i, 1]``. labels : list, shape=(n,) The annotation for each interval offset : float > 0 Phase offset of the sampled time grid (in seconds) (Default value = 0) sample_size : float > 0 duration of each sample to be generated (in seconds) (Default value = 0.1) fill_value : type(labels[0]) Object to use for the label with out-of-range time points. (Default value = None) Returns ------- sample_times : list list of sample times sample_labels : list array of labels for each generated sample Notes ----- Intervals will be rounded down to the nearest multiple of ``sample_size``. """ # Round intervals to the sample size num_samples = int(np.floor(intervals.max() / sample_size)) sample_indices = np.arange(num_samples, dtype=np.float32) sample_times = (sample_indices*sample_size + offset).tolist() sampled_labels = interpolate_intervals( intervals, labels, sample_times, fill_value) return sample_times, sampled_labels
[ "def", "intervals_to_samples", "(", "intervals", ",", "labels", ",", "offset", "=", "0", ",", "sample_size", "=", "0.1", ",", "fill_value", "=", "None", ")", ":", "# Round intervals to the sample size", "num_samples", "=", "int", "(", "np", ".", "floor", "(", "intervals", ".", "max", "(", ")", "/", "sample_size", ")", ")", "sample_indices", "=", "np", ".", "arange", "(", "num_samples", ",", "dtype", "=", "np", ".", "float32", ")", "sample_times", "=", "(", "sample_indices", "*", "sample_size", "+", "offset", ")", ".", "tolist", "(", ")", "sampled_labels", "=", "interpolate_intervals", "(", "intervals", ",", "labels", ",", "sample_times", ",", "fill_value", ")", "return", "sample_times", ",", "sampled_labels" ]
Convert an array of labeled time intervals to annotated samples. Parameters ---------- intervals : np.ndarray, shape=(n, d) An array of time intervals, as returned by :func:`mir_eval.io.load_intervals()` or :func:`mir_eval.io.load_labeled_intervals()`. The ``i`` th interval spans time ``intervals[i, 0]`` to ``intervals[i, 1]``. labels : list, shape=(n,) The annotation for each interval offset : float > 0 Phase offset of the sampled time grid (in seconds) (Default value = 0) sample_size : float > 0 duration of each sample to be generated (in seconds) (Default value = 0.1) fill_value : type(labels[0]) Object to use for the label with out-of-range time points. (Default value = None) Returns ------- sample_times : list list of sample times sample_labels : list array of labels for each generated sample Notes ----- Intervals will be rounded down to the nearest multiple of ``sample_size``.
[ "Convert", "an", "array", "of", "labeled", "time", "intervals", "to", "annotated", "samples", "." ]
f41c8dafaea04b411252a516d1965af43c7d531b
https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/util.py#L76-L126
train
craffel/mir_eval
mir_eval/util.py
interpolate_intervals
def interpolate_intervals(intervals, labels, time_points, fill_value=None): """Assign labels to a set of points in time given a set of intervals. Time points that do not lie within an interval are mapped to `fill_value`. Parameters ---------- intervals : np.ndarray, shape=(n, 2) An array of time intervals, as returned by :func:`mir_eval.io.load_intervals()`. The ``i`` th interval spans time ``intervals[i, 0]`` to ``intervals[i, 1]``. Intervals are assumed to be disjoint. labels : list, shape=(n,) The annotation for each interval time_points : array_like, shape=(m,) Points in time to assign labels. These must be in non-decreasing order. fill_value : type(labels[0]) Object to use for the label with out-of-range time points. (Default value = None) Returns ------- aligned_labels : list Labels corresponding to the given time points. Raises ------ ValueError If `time_points` is not in non-decreasing order. """ # Verify that time_points is sorted time_points = np.asarray(time_points) if np.any(time_points[1:] < time_points[:-1]): raise ValueError('time_points must be in non-decreasing order') aligned_labels = [fill_value] * len(time_points) starts = np.searchsorted(time_points, intervals[:, 0], side='left') ends = np.searchsorted(time_points, intervals[:, 1], side='right') for (start, end, lab) in zip(starts, ends, labels): aligned_labels[start:end] = [lab] * (end - start) return aligned_labels
python
def interpolate_intervals(intervals, labels, time_points, fill_value=None): """Assign labels to a set of points in time given a set of intervals. Time points that do not lie within an interval are mapped to `fill_value`. Parameters ---------- intervals : np.ndarray, shape=(n, 2) An array of time intervals, as returned by :func:`mir_eval.io.load_intervals()`. The ``i`` th interval spans time ``intervals[i, 0]`` to ``intervals[i, 1]``. Intervals are assumed to be disjoint. labels : list, shape=(n,) The annotation for each interval time_points : array_like, shape=(m,) Points in time to assign labels. These must be in non-decreasing order. fill_value : type(labels[0]) Object to use for the label with out-of-range time points. (Default value = None) Returns ------- aligned_labels : list Labels corresponding to the given time points. Raises ------ ValueError If `time_points` is not in non-decreasing order. """ # Verify that time_points is sorted time_points = np.asarray(time_points) if np.any(time_points[1:] < time_points[:-1]): raise ValueError('time_points must be in non-decreasing order') aligned_labels = [fill_value] * len(time_points) starts = np.searchsorted(time_points, intervals[:, 0], side='left') ends = np.searchsorted(time_points, intervals[:, 1], side='right') for (start, end, lab) in zip(starts, ends, labels): aligned_labels[start:end] = [lab] * (end - start) return aligned_labels
[ "def", "interpolate_intervals", "(", "intervals", ",", "labels", ",", "time_points", ",", "fill_value", "=", "None", ")", ":", "# Verify that time_points is sorted", "time_points", "=", "np", ".", "asarray", "(", "time_points", ")", "if", "np", ".", "any", "(", "time_points", "[", "1", ":", "]", "<", "time_points", "[", ":", "-", "1", "]", ")", ":", "raise", "ValueError", "(", "'time_points must be in non-decreasing order'", ")", "aligned_labels", "=", "[", "fill_value", "]", "*", "len", "(", "time_points", ")", "starts", "=", "np", ".", "searchsorted", "(", "time_points", ",", "intervals", "[", ":", ",", "0", "]", ",", "side", "=", "'left'", ")", "ends", "=", "np", ".", "searchsorted", "(", "time_points", ",", "intervals", "[", ":", ",", "1", "]", ",", "side", "=", "'right'", ")", "for", "(", "start", ",", "end", ",", "lab", ")", "in", "zip", "(", "starts", ",", "ends", ",", "labels", ")", ":", "aligned_labels", "[", "start", ":", "end", "]", "=", "[", "lab", "]", "*", "(", "end", "-", "start", ")", "return", "aligned_labels" ]
Assign labels to a set of points in time given a set of intervals. Time points that do not lie within an interval are mapped to `fill_value`. Parameters ---------- intervals : np.ndarray, shape=(n, 2) An array of time intervals, as returned by :func:`mir_eval.io.load_intervals()`. The ``i`` th interval spans time ``intervals[i, 0]`` to ``intervals[i, 1]``. Intervals are assumed to be disjoint. labels : list, shape=(n,) The annotation for each interval time_points : array_like, shape=(m,) Points in time to assign labels. These must be in non-decreasing order. fill_value : type(labels[0]) Object to use for the label with out-of-range time points. (Default value = None) Returns ------- aligned_labels : list Labels corresponding to the given time points. Raises ------ ValueError If `time_points` is not in non-decreasing order.
[ "Assign", "labels", "to", "a", "set", "of", "points", "in", "time", "given", "a", "set", "of", "intervals", "." ]
f41c8dafaea04b411252a516d1965af43c7d531b
https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/util.py#L129-L180
train
craffel/mir_eval
mir_eval/util.py
sort_labeled_intervals
def sort_labeled_intervals(intervals, labels=None): '''Sort intervals, and optionally, their corresponding labels according to start time. Parameters ---------- intervals : np.ndarray, shape=(n, 2) The input intervals labels : list, optional Labels for each interval Returns ------- intervals_sorted or (intervals_sorted, labels_sorted) Labels are only returned if provided as input ''' idx = np.argsort(intervals[:, 0]) intervals_sorted = intervals[idx] if labels is None: return intervals_sorted else: return intervals_sorted, [labels[_] for _ in idx]
python
def sort_labeled_intervals(intervals, labels=None): '''Sort intervals, and optionally, their corresponding labels according to start time. Parameters ---------- intervals : np.ndarray, shape=(n, 2) The input intervals labels : list, optional Labels for each interval Returns ------- intervals_sorted or (intervals_sorted, labels_sorted) Labels are only returned if provided as input ''' idx = np.argsort(intervals[:, 0]) intervals_sorted = intervals[idx] if labels is None: return intervals_sorted else: return intervals_sorted, [labels[_] for _ in idx]
[ "def", "sort_labeled_intervals", "(", "intervals", ",", "labels", "=", "None", ")", ":", "idx", "=", "np", ".", "argsort", "(", "intervals", "[", ":", ",", "0", "]", ")", "intervals_sorted", "=", "intervals", "[", "idx", "]", "if", "labels", "is", "None", ":", "return", "intervals_sorted", "else", ":", "return", "intervals_sorted", ",", "[", "labels", "[", "_", "]", "for", "_", "in", "idx", "]" ]
Sort intervals, and optionally, their corresponding labels according to start time. Parameters ---------- intervals : np.ndarray, shape=(n, 2) The input intervals labels : list, optional Labels for each interval Returns ------- intervals_sorted or (intervals_sorted, labels_sorted) Labels are only returned if provided as input
[ "Sort", "intervals", "and", "optionally", "their", "corresponding", "labels", "according", "to", "start", "time", "." ]
f41c8dafaea04b411252a516d1965af43c7d531b
https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/util.py#L183-L208
train
craffel/mir_eval
mir_eval/util.py
f_measure
def f_measure(precision, recall, beta=1.0): """Compute the f-measure from precision and recall scores. Parameters ---------- precision : float in (0, 1] Precision recall : float in (0, 1] Recall beta : float > 0 Weighting factor for f-measure (Default value = 1.0) Returns ------- f_measure : float The weighted f-measure """ if precision == 0 and recall == 0: return 0.0 return (1 + beta**2)*precision*recall/((beta**2)*precision + recall)
python
def f_measure(precision, recall, beta=1.0): """Compute the f-measure from precision and recall scores. Parameters ---------- precision : float in (0, 1] Precision recall : float in (0, 1] Recall beta : float > 0 Weighting factor for f-measure (Default value = 1.0) Returns ------- f_measure : float The weighted f-measure """ if precision == 0 and recall == 0: return 0.0 return (1 + beta**2)*precision*recall/((beta**2)*precision + recall)
[ "def", "f_measure", "(", "precision", ",", "recall", ",", "beta", "=", "1.0", ")", ":", "if", "precision", "==", "0", "and", "recall", "==", "0", ":", "return", "0.0", "return", "(", "1", "+", "beta", "**", "2", ")", "*", "precision", "*", "recall", "/", "(", "(", "beta", "**", "2", ")", "*", "precision", "+", "recall", ")" ]
Compute the f-measure from precision and recall scores. Parameters ---------- precision : float in (0, 1] Precision recall : float in (0, 1] Recall beta : float > 0 Weighting factor for f-measure (Default value = 1.0) Returns ------- f_measure : float The weighted f-measure
[ "Compute", "the", "f", "-", "measure", "from", "precision", "and", "recall", "scores", "." ]
f41c8dafaea04b411252a516d1965af43c7d531b
https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/util.py#L211-L234
train
craffel/mir_eval
mir_eval/util.py
intervals_to_boundaries
def intervals_to_boundaries(intervals, q=5): """Convert interval times into boundaries. Parameters ---------- intervals : np.ndarray, shape=(n_events, 2) Array of interval start and end-times q : int Number of decimals to round to. (Default value = 5) Returns ------- boundaries : np.ndarray Interval boundary times, including the end of the final interval """ return np.unique(np.ravel(np.round(intervals, decimals=q)))
python
def intervals_to_boundaries(intervals, q=5): """Convert interval times into boundaries. Parameters ---------- intervals : np.ndarray, shape=(n_events, 2) Array of interval start and end-times q : int Number of decimals to round to. (Default value = 5) Returns ------- boundaries : np.ndarray Interval boundary times, including the end of the final interval """ return np.unique(np.ravel(np.round(intervals, decimals=q)))
[ "def", "intervals_to_boundaries", "(", "intervals", ",", "q", "=", "5", ")", ":", "return", "np", ".", "unique", "(", "np", ".", "ravel", "(", "np", ".", "round", "(", "intervals", ",", "decimals", "=", "q", ")", ")", ")" ]
Convert interval times into boundaries. Parameters ---------- intervals : np.ndarray, shape=(n_events, 2) Array of interval start and end-times q : int Number of decimals to round to. (Default value = 5) Returns ------- boundaries : np.ndarray Interval boundary times, including the end of the final interval
[ "Convert", "interval", "times", "into", "boundaries", "." ]
f41c8dafaea04b411252a516d1965af43c7d531b
https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/util.py#L237-L254
train
craffel/mir_eval
mir_eval/util.py
boundaries_to_intervals
def boundaries_to_intervals(boundaries): """Convert an array of event times into intervals Parameters ---------- boundaries : list-like List-like of event times. These are assumed to be unique timestamps in ascending order. Returns ------- intervals : np.ndarray, shape=(n_intervals, 2) Start and end time for each interval """ if not np.allclose(boundaries, np.unique(boundaries)): raise ValueError('Boundary times are not unique or not ascending.') intervals = np.asarray(list(zip(boundaries[:-1], boundaries[1:]))) return intervals
python
def boundaries_to_intervals(boundaries): """Convert an array of event times into intervals Parameters ---------- boundaries : list-like List-like of event times. These are assumed to be unique timestamps in ascending order. Returns ------- intervals : np.ndarray, shape=(n_intervals, 2) Start and end time for each interval """ if not np.allclose(boundaries, np.unique(boundaries)): raise ValueError('Boundary times are not unique or not ascending.') intervals = np.asarray(list(zip(boundaries[:-1], boundaries[1:]))) return intervals
[ "def", "boundaries_to_intervals", "(", "boundaries", ")", ":", "if", "not", "np", ".", "allclose", "(", "boundaries", ",", "np", ".", "unique", "(", "boundaries", ")", ")", ":", "raise", "ValueError", "(", "'Boundary times are not unique or not ascending.'", ")", "intervals", "=", "np", ".", "asarray", "(", "list", "(", "zip", "(", "boundaries", "[", ":", "-", "1", "]", ",", "boundaries", "[", "1", ":", "]", ")", ")", ")", "return", "intervals" ]
Convert an array of event times into intervals Parameters ---------- boundaries : list-like List-like of event times. These are assumed to be unique timestamps in ascending order. Returns ------- intervals : np.ndarray, shape=(n_intervals, 2) Start and end time for each interval
[ "Convert", "an", "array", "of", "event", "times", "into", "intervals" ]
f41c8dafaea04b411252a516d1965af43c7d531b
https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/util.py#L257-L277
train
craffel/mir_eval
mir_eval/util.py
merge_labeled_intervals
def merge_labeled_intervals(x_intervals, x_labels, y_intervals, y_labels): r"""Merge the time intervals of two sequences. Parameters ---------- x_intervals : np.ndarray Array of interval times (seconds) x_labels : list or None List of labels y_intervals : np.ndarray Array of interval times (seconds) y_labels : list or None List of labels Returns ------- new_intervals : np.ndarray New interval times of the merged sequences. new_x_labels : list New labels for the sequence ``x`` new_y_labels : list New labels for the sequence ``y`` """ align_check = [x_intervals[0, 0] == y_intervals[0, 0], x_intervals[-1, 1] == y_intervals[-1, 1]] if False in align_check: raise ValueError( "Time intervals do not align; did you mean to call " "'adjust_intervals()' first?") time_boundaries = np.unique( np.concatenate([x_intervals, y_intervals], axis=0)) output_intervals = np.array( [time_boundaries[:-1], time_boundaries[1:]]).T x_labels_out, y_labels_out = [], [] x_label_range = np.arange(len(x_labels)) y_label_range = np.arange(len(y_labels)) for t0, _ in output_intervals: x_idx = x_label_range[(t0 >= x_intervals[:, 0])] x_labels_out.append(x_labels[x_idx[-1]]) y_idx = y_label_range[(t0 >= y_intervals[:, 0])] y_labels_out.append(y_labels[y_idx[-1]]) return output_intervals, x_labels_out, y_labels_out
python
def merge_labeled_intervals(x_intervals, x_labels, y_intervals, y_labels): r"""Merge the time intervals of two sequences. Parameters ---------- x_intervals : np.ndarray Array of interval times (seconds) x_labels : list or None List of labels y_intervals : np.ndarray Array of interval times (seconds) y_labels : list or None List of labels Returns ------- new_intervals : np.ndarray New interval times of the merged sequences. new_x_labels : list New labels for the sequence ``x`` new_y_labels : list New labels for the sequence ``y`` """ align_check = [x_intervals[0, 0] == y_intervals[0, 0], x_intervals[-1, 1] == y_intervals[-1, 1]] if False in align_check: raise ValueError( "Time intervals do not align; did you mean to call " "'adjust_intervals()' first?") time_boundaries = np.unique( np.concatenate([x_intervals, y_intervals], axis=0)) output_intervals = np.array( [time_boundaries[:-1], time_boundaries[1:]]).T x_labels_out, y_labels_out = [], [] x_label_range = np.arange(len(x_labels)) y_label_range = np.arange(len(y_labels)) for t0, _ in output_intervals: x_idx = x_label_range[(t0 >= x_intervals[:, 0])] x_labels_out.append(x_labels[x_idx[-1]]) y_idx = y_label_range[(t0 >= y_intervals[:, 0])] y_labels_out.append(y_labels[y_idx[-1]]) return output_intervals, x_labels_out, y_labels_out
[ "def", "merge_labeled_intervals", "(", "x_intervals", ",", "x_labels", ",", "y_intervals", ",", "y_labels", ")", ":", "align_check", "=", "[", "x_intervals", "[", "0", ",", "0", "]", "==", "y_intervals", "[", "0", ",", "0", "]", ",", "x_intervals", "[", "-", "1", ",", "1", "]", "==", "y_intervals", "[", "-", "1", ",", "1", "]", "]", "if", "False", "in", "align_check", ":", "raise", "ValueError", "(", "\"Time intervals do not align; did you mean to call \"", "\"'adjust_intervals()' first?\"", ")", "time_boundaries", "=", "np", ".", "unique", "(", "np", ".", "concatenate", "(", "[", "x_intervals", ",", "y_intervals", "]", ",", "axis", "=", "0", ")", ")", "output_intervals", "=", "np", ".", "array", "(", "[", "time_boundaries", "[", ":", "-", "1", "]", ",", "time_boundaries", "[", "1", ":", "]", "]", ")", ".", "T", "x_labels_out", ",", "y_labels_out", "=", "[", "]", ",", "[", "]", "x_label_range", "=", "np", ".", "arange", "(", "len", "(", "x_labels", ")", ")", "y_label_range", "=", "np", ".", "arange", "(", "len", "(", "y_labels", ")", ")", "for", "t0", ",", "_", "in", "output_intervals", ":", "x_idx", "=", "x_label_range", "[", "(", "t0", ">=", "x_intervals", "[", ":", ",", "0", "]", ")", "]", "x_labels_out", ".", "append", "(", "x_labels", "[", "x_idx", "[", "-", "1", "]", "]", ")", "y_idx", "=", "y_label_range", "[", "(", "t0", ">=", "y_intervals", "[", ":", ",", "0", "]", ")", "]", "y_labels_out", ".", "append", "(", "y_labels", "[", "y_idx", "[", "-", "1", "]", "]", ")", "return", "output_intervals", ",", "x_labels_out", ",", "y_labels_out" ]
r"""Merge the time intervals of two sequences. Parameters ---------- x_intervals : np.ndarray Array of interval times (seconds) x_labels : list or None List of labels y_intervals : np.ndarray Array of interval times (seconds) y_labels : list or None List of labels Returns ------- new_intervals : np.ndarray New interval times of the merged sequences. new_x_labels : list New labels for the sequence ``x`` new_y_labels : list New labels for the sequence ``y``
[ "r", "Merge", "the", "time", "intervals", "of", "two", "sequences", "." ]
f41c8dafaea04b411252a516d1965af43c7d531b
https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/util.py#L501-L544
train
craffel/mir_eval
mir_eval/util.py
match_events
def match_events(ref, est, window, distance=None): """Compute a maximum matching between reference and estimated event times, subject to a window constraint. Given two lists of event times ``ref`` and ``est``, we seek the largest set of correspondences ``(ref[i], est[j])`` such that ``distance(ref[i], est[j]) <= window``, and each ``ref[i]`` and ``est[j]`` is matched at most once. This is useful for computing precision/recall metrics in beat tracking, onset detection, and segmentation. Parameters ---------- ref : np.ndarray, shape=(n,) Array of reference values est : np.ndarray, shape=(m,) Array of estimated values window : float > 0 Size of the window. distance : function function that computes the outer distance of ref and est. By default uses ``|ref[i] - est[j]|`` Returns ------- matching : list of tuples A list of matched reference and event numbers. ``matching[i] == (i, j)`` where ``ref[i]`` matches ``est[j]``. """ if distance is not None: # Compute the indices of feasible pairings hits = np.where(distance(ref, est) <= window) else: hits = _fast_hit_windows(ref, est, window) # Construct the graph input G = {} for ref_i, est_i in zip(*hits): if est_i not in G: G[est_i] = [] G[est_i].append(ref_i) # Compute the maximum matching matching = sorted(_bipartite_match(G).items()) return matching
python
def match_events(ref, est, window, distance=None): """Compute a maximum matching between reference and estimated event times, subject to a window constraint. Given two lists of event times ``ref`` and ``est``, we seek the largest set of correspondences ``(ref[i], est[j])`` such that ``distance(ref[i], est[j]) <= window``, and each ``ref[i]`` and ``est[j]`` is matched at most once. This is useful for computing precision/recall metrics in beat tracking, onset detection, and segmentation. Parameters ---------- ref : np.ndarray, shape=(n,) Array of reference values est : np.ndarray, shape=(m,) Array of estimated values window : float > 0 Size of the window. distance : function function that computes the outer distance of ref and est. By default uses ``|ref[i] - est[j]|`` Returns ------- matching : list of tuples A list of matched reference and event numbers. ``matching[i] == (i, j)`` where ``ref[i]`` matches ``est[j]``. """ if distance is not None: # Compute the indices of feasible pairings hits = np.where(distance(ref, est) <= window) else: hits = _fast_hit_windows(ref, est, window) # Construct the graph input G = {} for ref_i, est_i in zip(*hits): if est_i not in G: G[est_i] = [] G[est_i].append(ref_i) # Compute the maximum matching matching = sorted(_bipartite_match(G).items()) return matching
[ "def", "match_events", "(", "ref", ",", "est", ",", "window", ",", "distance", "=", "None", ")", ":", "if", "distance", "is", "not", "None", ":", "# Compute the indices of feasible pairings", "hits", "=", "np", ".", "where", "(", "distance", "(", "ref", ",", "est", ")", "<=", "window", ")", "else", ":", "hits", "=", "_fast_hit_windows", "(", "ref", ",", "est", ",", "window", ")", "# Construct the graph input", "G", "=", "{", "}", "for", "ref_i", ",", "est_i", "in", "zip", "(", "*", "hits", ")", ":", "if", "est_i", "not", "in", "G", ":", "G", "[", "est_i", "]", "=", "[", "]", "G", "[", "est_i", "]", ".", "append", "(", "ref_i", ")", "# Compute the maximum matching", "matching", "=", "sorted", "(", "_bipartite_match", "(", "G", ")", ".", "items", "(", ")", ")", "return", "matching" ]
Compute a maximum matching between reference and estimated event times, subject to a window constraint. Given two lists of event times ``ref`` and ``est``, we seek the largest set of correspondences ``(ref[i], est[j])`` such that ``distance(ref[i], est[j]) <= window``, and each ``ref[i]`` and ``est[j]`` is matched at most once. This is useful for computing precision/recall metrics in beat tracking, onset detection, and segmentation. Parameters ---------- ref : np.ndarray, shape=(n,) Array of reference values est : np.ndarray, shape=(m,) Array of estimated values window : float > 0 Size of the window. distance : function function that computes the outer distance of ref and est. By default uses ``|ref[i] - est[j]|`` Returns ------- matching : list of tuples A list of matched reference and event numbers. ``matching[i] == (i, j)`` where ``ref[i]`` matches ``est[j]``.
[ "Compute", "a", "maximum", "matching", "between", "reference", "and", "estimated", "event", "times", "subject", "to", "a", "window", "constraint", "." ]
f41c8dafaea04b411252a516d1965af43c7d531b
https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/util.py#L663-L710
train