repo
stringlengths 7
55
| path
stringlengths 4
127
| func_name
stringlengths 1
88
| original_string
stringlengths 75
19.8k
| language
stringclasses 1
value | code
stringlengths 75
19.8k
| code_tokens
list | docstring
stringlengths 3
17.3k
| docstring_tokens
list | sha
stringlengths 40
40
| url
stringlengths 87
242
| partition
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|
tamasgal/km3pipe | km3pipe/io/aanet.py | AanetPump.blob_generator | def blob_generator(self):
"""Create a blob generator."""
# pylint: disable:F0401,W0612
import aa # pylint: disablF0401 # noqa
from ROOT import EventFile # pylint: disable F0401
filename = self.filename
log.info("Reading from file: {0}".format(filename))
if not os.path.exists(filename):
log.warning(filename + " not available: continue without it")
try:
event_file = EventFile(filename)
except Exception:
raise SystemExit("Could not open file")
log.info("Generating blobs through new aanet API...")
self.print("Reading metadata using 'JPrintMeta'")
meta_parser = MetaParser(filename=filename)
meta = meta_parser.get_table()
if meta is None:
self.log.warning(
"No metadata found, this means no data provenance!"
)
if self.bare:
log.info("Skipping data conversion, only passing bare aanet data")
for event in event_file:
yield Blob({'evt': event, 'event_file': event_file})
else:
log.info("Unpacking aanet header into dictionary...")
hdr = self._parse_header(event_file.header)
if not hdr:
log.info("Empty header dict found, skipping...")
self.raw_header = None
else:
log.info("Converting Header dict to Table...")
self.raw_header = self._convert_header_dict_to_table(hdr)
log.info("Creating HDF5Header")
self.header = HDF5Header.from_table(self.raw_header)
for event in event_file:
log.debug('Reading event...')
blob = self._read_event(event, filename)
log.debug('Reading header...')
blob["RawHeader"] = self.raw_header
blob["Header"] = self.header
if meta is not None:
blob['Meta'] = meta
self.group_id += 1
yield blob
del event_file | python | def blob_generator(self):
"""Create a blob generator."""
# pylint: disable:F0401,W0612
import aa # pylint: disablF0401 # noqa
from ROOT import EventFile # pylint: disable F0401
filename = self.filename
log.info("Reading from file: {0}".format(filename))
if not os.path.exists(filename):
log.warning(filename + " not available: continue without it")
try:
event_file = EventFile(filename)
except Exception:
raise SystemExit("Could not open file")
log.info("Generating blobs through new aanet API...")
self.print("Reading metadata using 'JPrintMeta'")
meta_parser = MetaParser(filename=filename)
meta = meta_parser.get_table()
if meta is None:
self.log.warning(
"No metadata found, this means no data provenance!"
)
if self.bare:
log.info("Skipping data conversion, only passing bare aanet data")
for event in event_file:
yield Blob({'evt': event, 'event_file': event_file})
else:
log.info("Unpacking aanet header into dictionary...")
hdr = self._parse_header(event_file.header)
if not hdr:
log.info("Empty header dict found, skipping...")
self.raw_header = None
else:
log.info("Converting Header dict to Table...")
self.raw_header = self._convert_header_dict_to_table(hdr)
log.info("Creating HDF5Header")
self.header = HDF5Header.from_table(self.raw_header)
for event in event_file:
log.debug('Reading event...')
blob = self._read_event(event, filename)
log.debug('Reading header...')
blob["RawHeader"] = self.raw_header
blob["Header"] = self.header
if meta is not None:
blob['Meta'] = meta
self.group_id += 1
yield blob
del event_file | [
"def",
"blob_generator",
"(",
"self",
")",
":",
"# pylint: disable:F0401,W0612",
"import",
"aa",
"# pylint: disablF0401 # noqa",
"from",
"ROOT",
"import",
"EventFile",
"# pylint: disable F0401",
"filename",
"=",
"self",
".",
"filename",
"log",
".",
"info",
"(",
"\"Reading from file: {0}\"",
".",
"format",
"(",
"filename",
")",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"filename",
")",
":",
"log",
".",
"warning",
"(",
"filename",
"+",
"\" not available: continue without it\"",
")",
"try",
":",
"event_file",
"=",
"EventFile",
"(",
"filename",
")",
"except",
"Exception",
":",
"raise",
"SystemExit",
"(",
"\"Could not open file\"",
")",
"log",
".",
"info",
"(",
"\"Generating blobs through new aanet API...\"",
")",
"self",
".",
"print",
"(",
"\"Reading metadata using 'JPrintMeta'\"",
")",
"meta_parser",
"=",
"MetaParser",
"(",
"filename",
"=",
"filename",
")",
"meta",
"=",
"meta_parser",
".",
"get_table",
"(",
")",
"if",
"meta",
"is",
"None",
":",
"self",
".",
"log",
".",
"warning",
"(",
"\"No metadata found, this means no data provenance!\"",
")",
"if",
"self",
".",
"bare",
":",
"log",
".",
"info",
"(",
"\"Skipping data conversion, only passing bare aanet data\"",
")",
"for",
"event",
"in",
"event_file",
":",
"yield",
"Blob",
"(",
"{",
"'evt'",
":",
"event",
",",
"'event_file'",
":",
"event_file",
"}",
")",
"else",
":",
"log",
".",
"info",
"(",
"\"Unpacking aanet header into dictionary...\"",
")",
"hdr",
"=",
"self",
".",
"_parse_header",
"(",
"event_file",
".",
"header",
")",
"if",
"not",
"hdr",
":",
"log",
".",
"info",
"(",
"\"Empty header dict found, skipping...\"",
")",
"self",
".",
"raw_header",
"=",
"None",
"else",
":",
"log",
".",
"info",
"(",
"\"Converting Header dict to Table...\"",
")",
"self",
".",
"raw_header",
"=",
"self",
".",
"_convert_header_dict_to_table",
"(",
"hdr",
")",
"log",
".",
"info",
"(",
"\"Creating HDF5Header\"",
")",
"self",
".",
"header",
"=",
"HDF5Header",
".",
"from_table",
"(",
"self",
".",
"raw_header",
")",
"for",
"event",
"in",
"event_file",
":",
"log",
".",
"debug",
"(",
"'Reading event...'",
")",
"blob",
"=",
"self",
".",
"_read_event",
"(",
"event",
",",
"filename",
")",
"log",
".",
"debug",
"(",
"'Reading header...'",
")",
"blob",
"[",
"\"RawHeader\"",
"]",
"=",
"self",
".",
"raw_header",
"blob",
"[",
"\"Header\"",
"]",
"=",
"self",
".",
"header",
"if",
"meta",
"is",
"not",
"None",
":",
"blob",
"[",
"'Meta'",
"]",
"=",
"meta",
"self",
".",
"group_id",
"+=",
"1",
"yield",
"blob",
"del",
"event_file"
] | Create a blob generator. | [
"Create",
"a",
"blob",
"generator",
"."
] | 7a9b59ac899a28775b5bdc5d391d9a5340d08040 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/io/aanet.py#L264-L320 | train |
tamasgal/km3pipe | km3pipe/io/aanet.py | MetaParser.parse_string | def parse_string(self, string):
"""Parse ASCII output of JPrintMeta"""
self.log.info("Parsing ASCII data")
if not string:
self.log.warning("Empty metadata")
return
lines = string.splitlines()
application_data = []
application = lines[0].split()[0]
self.log.debug("Reading meta information for '%s'" % application)
for line in lines:
if application is None:
self.log.debug(
"Reading meta information for '%s'" % application
)
application = line.split()[0]
application_data.append(line)
if line.startswith(application + b' Linux'):
self._record_app_data(application_data)
application_data = []
application = None | python | def parse_string(self, string):
"""Parse ASCII output of JPrintMeta"""
self.log.info("Parsing ASCII data")
if not string:
self.log.warning("Empty metadata")
return
lines = string.splitlines()
application_data = []
application = lines[0].split()[0]
self.log.debug("Reading meta information for '%s'" % application)
for line in lines:
if application is None:
self.log.debug(
"Reading meta information for '%s'" % application
)
application = line.split()[0]
application_data.append(line)
if line.startswith(application + b' Linux'):
self._record_app_data(application_data)
application_data = []
application = None | [
"def",
"parse_string",
"(",
"self",
",",
"string",
")",
":",
"self",
".",
"log",
".",
"info",
"(",
"\"Parsing ASCII data\"",
")",
"if",
"not",
"string",
":",
"self",
".",
"log",
".",
"warning",
"(",
"\"Empty metadata\"",
")",
"return",
"lines",
"=",
"string",
".",
"splitlines",
"(",
")",
"application_data",
"=",
"[",
"]",
"application",
"=",
"lines",
"[",
"0",
"]",
".",
"split",
"(",
")",
"[",
"0",
"]",
"self",
".",
"log",
".",
"debug",
"(",
"\"Reading meta information for '%s'\"",
"%",
"application",
")",
"for",
"line",
"in",
"lines",
":",
"if",
"application",
"is",
"None",
":",
"self",
".",
"log",
".",
"debug",
"(",
"\"Reading meta information for '%s'\"",
"%",
"application",
")",
"application",
"=",
"line",
".",
"split",
"(",
")",
"[",
"0",
"]",
"application_data",
".",
"append",
"(",
"line",
")",
"if",
"line",
".",
"startswith",
"(",
"application",
"+",
"b' Linux'",
")",
":",
"self",
".",
"_record_app_data",
"(",
"application_data",
")",
"application_data",
"=",
"[",
"]",
"application",
"=",
"None"
] | Parse ASCII output of JPrintMeta | [
"Parse",
"ASCII",
"output",
"of",
"JPrintMeta"
] | 7a9b59ac899a28775b5bdc5d391d9a5340d08040 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/io/aanet.py#L692-L716 | train |
tamasgal/km3pipe | km3pipe/io/aanet.py | MetaParser._record_app_data | def _record_app_data(self, data):
"""Parse raw metadata output for a single application
The usual output is:
ApplicationName RevisionNumber
ApplicationName ROOT_Version
ApplicationName KM3NET
ApplicationName ./command/line --arguments --which --can
contain
also
multiple lines
and --addtional flags
etc.
ApplicationName Linux ... (just the `uname -a` output)
"""
name, revision = data[0].split()
root_version = data[1].split()[1]
command = b'\n'.join(data[3:]).split(b'\n' + name + b' Linux')[0]
self.meta.append({
'application_name': np.string_(name),
'revision': np.string_(revision),
'root_version': np.string_(root_version),
'command': np.string_(command)
}) | python | def _record_app_data(self, data):
"""Parse raw metadata output for a single application
The usual output is:
ApplicationName RevisionNumber
ApplicationName ROOT_Version
ApplicationName KM3NET
ApplicationName ./command/line --arguments --which --can
contain
also
multiple lines
and --addtional flags
etc.
ApplicationName Linux ... (just the `uname -a` output)
"""
name, revision = data[0].split()
root_version = data[1].split()[1]
command = b'\n'.join(data[3:]).split(b'\n' + name + b' Linux')[0]
self.meta.append({
'application_name': np.string_(name),
'revision': np.string_(revision),
'root_version': np.string_(root_version),
'command': np.string_(command)
}) | [
"def",
"_record_app_data",
"(",
"self",
",",
"data",
")",
":",
"name",
",",
"revision",
"=",
"data",
"[",
"0",
"]",
".",
"split",
"(",
")",
"root_version",
"=",
"data",
"[",
"1",
"]",
".",
"split",
"(",
")",
"[",
"1",
"]",
"command",
"=",
"b'\\n'",
".",
"join",
"(",
"data",
"[",
"3",
":",
"]",
")",
".",
"split",
"(",
"b'\\n'",
"+",
"name",
"+",
"b' Linux'",
")",
"[",
"0",
"]",
"self",
".",
"meta",
".",
"append",
"(",
"{",
"'application_name'",
":",
"np",
".",
"string_",
"(",
"name",
")",
",",
"'revision'",
":",
"np",
".",
"string_",
"(",
"revision",
")",
",",
"'root_version'",
":",
"np",
".",
"string_",
"(",
"root_version",
")",
",",
"'command'",
":",
"np",
".",
"string_",
"(",
"command",
")",
"}",
")"
] | Parse raw metadata output for a single application
The usual output is:
ApplicationName RevisionNumber
ApplicationName ROOT_Version
ApplicationName KM3NET
ApplicationName ./command/line --arguments --which --can
contain
also
multiple lines
and --addtional flags
etc.
ApplicationName Linux ... (just the `uname -a` output) | [
"Parse",
"raw",
"metadata",
"output",
"for",
"a",
"single",
"application"
] | 7a9b59ac899a28775b5bdc5d391d9a5340d08040 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/io/aanet.py#L718-L741 | train |
tamasgal/km3pipe | km3pipe/io/aanet.py | MetaParser.get_table | def get_table(self, name='Meta', h5loc='/meta'):
"""Convert metadata to a KM3Pipe Table.
Returns `None` if there is no data.
Each column's dtype will be set to a fixed size string (numpy.string_)
with the length of the longest entry, since writing variable length
strings does not fit the current scheme.
"""
if not self.meta:
return None
data = defaultdict(list)
for entry in self.meta:
for key, value in entry.items():
data[key].append(value)
dtypes = []
for key, values in data.items():
max_len = max(map(len, values))
dtype = 'S{}'.format(max_len)
dtypes.append((key, dtype))
tab = Table(
data, dtype=dtypes, h5loc=h5loc, name='Meta', h5singleton=True
)
return tab | python | def get_table(self, name='Meta', h5loc='/meta'):
"""Convert metadata to a KM3Pipe Table.
Returns `None` if there is no data.
Each column's dtype will be set to a fixed size string (numpy.string_)
with the length of the longest entry, since writing variable length
strings does not fit the current scheme.
"""
if not self.meta:
return None
data = defaultdict(list)
for entry in self.meta:
for key, value in entry.items():
data[key].append(value)
dtypes = []
for key, values in data.items():
max_len = max(map(len, values))
dtype = 'S{}'.format(max_len)
dtypes.append((key, dtype))
tab = Table(
data, dtype=dtypes, h5loc=h5loc, name='Meta', h5singleton=True
)
return tab | [
"def",
"get_table",
"(",
"self",
",",
"name",
"=",
"'Meta'",
",",
"h5loc",
"=",
"'/meta'",
")",
":",
"if",
"not",
"self",
".",
"meta",
":",
"return",
"None",
"data",
"=",
"defaultdict",
"(",
"list",
")",
"for",
"entry",
"in",
"self",
".",
"meta",
":",
"for",
"key",
",",
"value",
"in",
"entry",
".",
"items",
"(",
")",
":",
"data",
"[",
"key",
"]",
".",
"append",
"(",
"value",
")",
"dtypes",
"=",
"[",
"]",
"for",
"key",
",",
"values",
"in",
"data",
".",
"items",
"(",
")",
":",
"max_len",
"=",
"max",
"(",
"map",
"(",
"len",
",",
"values",
")",
")",
"dtype",
"=",
"'S{}'",
".",
"format",
"(",
"max_len",
")",
"dtypes",
".",
"append",
"(",
"(",
"key",
",",
"dtype",
")",
")",
"tab",
"=",
"Table",
"(",
"data",
",",
"dtype",
"=",
"dtypes",
",",
"h5loc",
"=",
"h5loc",
",",
"name",
"=",
"'Meta'",
",",
"h5singleton",
"=",
"True",
")",
"return",
"tab"
] | Convert metadata to a KM3Pipe Table.
Returns `None` if there is no data.
Each column's dtype will be set to a fixed size string (numpy.string_)
with the length of the longest entry, since writing variable length
strings does not fit the current scheme. | [
"Convert",
"metadata",
"to",
"a",
"KM3Pipe",
"Table",
"."
] | 7a9b59ac899a28775b5bdc5d391d9a5340d08040 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/io/aanet.py#L743-L767 | train |
NaPs/Kolekto | kolekto/db.py | MoviesMetadata.itermovieshash | def itermovieshash(self):
""" Iterate over movies hash stored in the database.
"""
cur = self._db.firstkey()
while cur is not None:
yield cur
cur = self._db.nextkey(cur) | python | def itermovieshash(self):
""" Iterate over movies hash stored in the database.
"""
cur = self._db.firstkey()
while cur is not None:
yield cur
cur = self._db.nextkey(cur) | [
"def",
"itermovieshash",
"(",
"self",
")",
":",
"cur",
"=",
"self",
".",
"_db",
".",
"firstkey",
"(",
")",
"while",
"cur",
"is",
"not",
"None",
":",
"yield",
"cur",
"cur",
"=",
"self",
".",
"_db",
".",
"nextkey",
"(",
"cur",
")"
] | Iterate over movies hash stored in the database. | [
"Iterate",
"over",
"movies",
"hash",
"stored",
"in",
"the",
"database",
"."
] | 29c5469da8782780a06bf9a76c59414bb6fd8fe3 | https://github.com/NaPs/Kolekto/blob/29c5469da8782780a06bf9a76c59414bb6fd8fe3/kolekto/db.py#L12-L18 | train |
LeadPages/gcloud_requests | gcloud_requests/datastore.py | DatastoreRequestsProxy._max_retries_for_error | def _max_retries_for_error(self, error):
"""Handles Datastore response errors according to their documentation.
Parameters:
error(dict)
Returns:
int or None: The max number of times this error should be
retried or None if it shouldn't.
See also:
https://cloud.google.com/datastore/docs/concepts/errors
"""
status = error.get("status")
if status == "ABORTED" and get_transactions() > 0:
# Avoids retrying Conflicts when inside a transaction.
return None
return self._MAX_RETRIES.get(status) | python | def _max_retries_for_error(self, error):
"""Handles Datastore response errors according to their documentation.
Parameters:
error(dict)
Returns:
int or None: The max number of times this error should be
retried or None if it shouldn't.
See also:
https://cloud.google.com/datastore/docs/concepts/errors
"""
status = error.get("status")
if status == "ABORTED" and get_transactions() > 0:
# Avoids retrying Conflicts when inside a transaction.
return None
return self._MAX_RETRIES.get(status) | [
"def",
"_max_retries_for_error",
"(",
"self",
",",
"error",
")",
":",
"status",
"=",
"error",
".",
"get",
"(",
"\"status\"",
")",
"if",
"status",
"==",
"\"ABORTED\"",
"and",
"get_transactions",
"(",
")",
">",
"0",
":",
"# Avoids retrying Conflicts when inside a transaction.",
"return",
"None",
"return",
"self",
".",
"_MAX_RETRIES",
".",
"get",
"(",
"status",
")"
] | Handles Datastore response errors according to their documentation.
Parameters:
error(dict)
Returns:
int or None: The max number of times this error should be
retried or None if it shouldn't.
See also:
https://cloud.google.com/datastore/docs/concepts/errors | [
"Handles",
"Datastore",
"response",
"errors",
"according",
"to",
"their",
"documentation",
"."
] | 8933363c4e9fa1e5ec0e90d683fca8ef8a949752 | https://github.com/LeadPages/gcloud_requests/blob/8933363c4e9fa1e5ec0e90d683fca8ef8a949752/gcloud_requests/datastore.py#L56-L73 | train |
materials-data-facility/toolbox | mdf_toolbox/toolbox.py | anonymous_login | def anonymous_login(services):
"""Initialize services without authenticating to Globus Auth.
Note:
Clients may have reduced functionality without authentication.
Arguments:
services (str or list of str): The services to initialize clients for.
Returns:
dict: The clients requested, indexed by service name.
"""
if isinstance(services, str):
services = [services]
clients = {}
# Initialize valid services
for serv in services:
try:
clients[serv] = KNOWN_CLIENTS[serv](http_timeout=STD_TIMEOUT)
except KeyError: # No known client
print("Error: No known client for '{}' service.".format(serv))
except Exception: # Other issue, probably auth
print("Error: Unable to create client for '{}' service.\n"
"Anonymous access may not be allowed.".format(serv))
return clients | python | def anonymous_login(services):
"""Initialize services without authenticating to Globus Auth.
Note:
Clients may have reduced functionality without authentication.
Arguments:
services (str or list of str): The services to initialize clients for.
Returns:
dict: The clients requested, indexed by service name.
"""
if isinstance(services, str):
services = [services]
clients = {}
# Initialize valid services
for serv in services:
try:
clients[serv] = KNOWN_CLIENTS[serv](http_timeout=STD_TIMEOUT)
except KeyError: # No known client
print("Error: No known client for '{}' service.".format(serv))
except Exception: # Other issue, probably auth
print("Error: Unable to create client for '{}' service.\n"
"Anonymous access may not be allowed.".format(serv))
return clients | [
"def",
"anonymous_login",
"(",
"services",
")",
":",
"if",
"isinstance",
"(",
"services",
",",
"str",
")",
":",
"services",
"=",
"[",
"services",
"]",
"clients",
"=",
"{",
"}",
"# Initialize valid services",
"for",
"serv",
"in",
"services",
":",
"try",
":",
"clients",
"[",
"serv",
"]",
"=",
"KNOWN_CLIENTS",
"[",
"serv",
"]",
"(",
"http_timeout",
"=",
"STD_TIMEOUT",
")",
"except",
"KeyError",
":",
"# No known client",
"print",
"(",
"\"Error: No known client for '{}' service.\"",
".",
"format",
"(",
"serv",
")",
")",
"except",
"Exception",
":",
"# Other issue, probably auth",
"print",
"(",
"\"Error: Unable to create client for '{}' service.\\n\"",
"\"Anonymous access may not be allowed.\"",
".",
"format",
"(",
"serv",
")",
")",
"return",
"clients"
] | Initialize services without authenticating to Globus Auth.
Note:
Clients may have reduced functionality without authentication.
Arguments:
services (str or list of str): The services to initialize clients for.
Returns:
dict: The clients requested, indexed by service name. | [
"Initialize",
"services",
"without",
"authenticating",
"to",
"Globus",
"Auth",
"."
] | 2a4ac2b6a892238263008efa6a5f3923d9a83505 | https://github.com/materials-data-facility/toolbox/blob/2a4ac2b6a892238263008efa6a5f3923d9a83505/mdf_toolbox/toolbox.py#L364-L390 | train |
materials-data-facility/toolbox | mdf_toolbox/toolbox.py | logout | def logout(token_dir=DEFAULT_CRED_PATH):
"""Remove ALL tokens in the token directory.
This will force re-authentication to all services.
Arguments:
token_dir (str): The path to the directory to save tokens in and look for
credentials by default. If this argument was given to a ``login()`` function,
the same value must be given here to properly logout.
**Default**: ``DEFAULT_CRED_PATH``.
"""
for f in os.listdir(token_dir):
if f.endswith("tokens.json"):
try:
os.remove(os.path.join(token_dir, f))
except OSError as e:
# Eat ENOENT (no such file/dir, tokens already deleted) only,
# raise any other issue (bad permissions, etc.)
if e.errno != errno.ENOENT:
raise | python | def logout(token_dir=DEFAULT_CRED_PATH):
"""Remove ALL tokens in the token directory.
This will force re-authentication to all services.
Arguments:
token_dir (str): The path to the directory to save tokens in and look for
credentials by default. If this argument was given to a ``login()`` function,
the same value must be given here to properly logout.
**Default**: ``DEFAULT_CRED_PATH``.
"""
for f in os.listdir(token_dir):
if f.endswith("tokens.json"):
try:
os.remove(os.path.join(token_dir, f))
except OSError as e:
# Eat ENOENT (no such file/dir, tokens already deleted) only,
# raise any other issue (bad permissions, etc.)
if e.errno != errno.ENOENT:
raise | [
"def",
"logout",
"(",
"token_dir",
"=",
"DEFAULT_CRED_PATH",
")",
":",
"for",
"f",
"in",
"os",
".",
"listdir",
"(",
"token_dir",
")",
":",
"if",
"f",
".",
"endswith",
"(",
"\"tokens.json\"",
")",
":",
"try",
":",
"os",
".",
"remove",
"(",
"os",
".",
"path",
".",
"join",
"(",
"token_dir",
",",
"f",
")",
")",
"except",
"OSError",
"as",
"e",
":",
"# Eat ENOENT (no such file/dir, tokens already deleted) only,",
"# raise any other issue (bad permissions, etc.)",
"if",
"e",
".",
"errno",
"!=",
"errno",
".",
"ENOENT",
":",
"raise"
] | Remove ALL tokens in the token directory.
This will force re-authentication to all services.
Arguments:
token_dir (str): The path to the directory to save tokens in and look for
credentials by default. If this argument was given to a ``login()`` function,
the same value must be given here to properly logout.
**Default**: ``DEFAULT_CRED_PATH``. | [
"Remove",
"ALL",
"tokens",
"in",
"the",
"token",
"directory",
".",
"This",
"will",
"force",
"re",
"-",
"authentication",
"to",
"all",
"services",
"."
] | 2a4ac2b6a892238263008efa6a5f3923d9a83505 | https://github.com/materials-data-facility/toolbox/blob/2a4ac2b6a892238263008efa6a5f3923d9a83505/mdf_toolbox/toolbox.py#L393-L411 | train |
materials-data-facility/toolbox | mdf_toolbox/toolbox.py | format_gmeta | def format_gmeta(data, acl=None, identifier=None):
"""Format input into GMeta format, suitable for ingesting into Globus Search.
Formats a dictionary into a GMetaEntry.
Formats a list of GMetaEntry into a GMetaList inside a GMetaIngest.
**Example usage**::
glist = []
for document in all_my_documents:
gmeta_entry = format_gmeta(document, ["public"], document["id"])
glist.append(gmeta_entry)
ingest_ready_document = format_gmeta(glist)
Arguments:
data (dict or list): The data to be formatted.
If data is a dict, arguments ``acl`` and ``identifier`` are required.
If data is a list, it must consist of GMetaEntry documents.
acl (list of str): The list of Globus UUIDs allowed to view the document,
or the special value ``["public"]`` to allow anyone access.
Required if data is a dict. Ignored if data is a list.
Will be formatted into URNs if required.
identifier (str): A unique identifier for this document. If this value is not unique,
ingests into Globus Search may merge entries.
Required is data is a dict. Ignored if data is a list.
Returns:
dict (if ``data`` is ``dict``): The data as a GMetaEntry.
dict (if ``data`` is ``list``): The data as a GMetaIngest.
"""
if isinstance(data, dict):
if acl is None or identifier is None:
raise ValueError("acl and identifier are required when formatting a GMetaEntry.")
if isinstance(acl, str):
acl = [acl]
# "Correctly" format ACL entries into URNs
prefixed_acl = []
for uuid in acl:
# If entry is not special value "public" and is not a URN, make URN
# It is not known what the type of UUID is, so use both
# This solution is known to be hacky
if uuid != "public" and not uuid.lower().startswith("urn:"):
prefixed_acl.append("urn:globus:auth:identity:"+uuid.lower())
prefixed_acl.append("urn:globus:groups:id:"+uuid.lower())
# Otherwise, no modification
else:
prefixed_acl.append(uuid)
return {
"@datatype": "GMetaEntry",
"@version": "2016-11-09",
"subject": identifier,
"visible_to": prefixed_acl,
"content": data
}
elif isinstance(data, list):
return {
"@datatype": "GIngest",
"@version": "2016-11-09",
"ingest_type": "GMetaList",
"ingest_data": {
"@datatype": "GMetaList",
"@version": "2016-11-09",
"gmeta": data
}
}
else:
raise TypeError("Cannot format '" + str(type(data)) + "' into GMeta.") | python | def format_gmeta(data, acl=None, identifier=None):
"""Format input into GMeta format, suitable for ingesting into Globus Search.
Formats a dictionary into a GMetaEntry.
Formats a list of GMetaEntry into a GMetaList inside a GMetaIngest.
**Example usage**::
glist = []
for document in all_my_documents:
gmeta_entry = format_gmeta(document, ["public"], document["id"])
glist.append(gmeta_entry)
ingest_ready_document = format_gmeta(glist)
Arguments:
data (dict or list): The data to be formatted.
If data is a dict, arguments ``acl`` and ``identifier`` are required.
If data is a list, it must consist of GMetaEntry documents.
acl (list of str): The list of Globus UUIDs allowed to view the document,
or the special value ``["public"]`` to allow anyone access.
Required if data is a dict. Ignored if data is a list.
Will be formatted into URNs if required.
identifier (str): A unique identifier for this document. If this value is not unique,
ingests into Globus Search may merge entries.
Required is data is a dict. Ignored if data is a list.
Returns:
dict (if ``data`` is ``dict``): The data as a GMetaEntry.
dict (if ``data`` is ``list``): The data as a GMetaIngest.
"""
if isinstance(data, dict):
if acl is None or identifier is None:
raise ValueError("acl and identifier are required when formatting a GMetaEntry.")
if isinstance(acl, str):
acl = [acl]
# "Correctly" format ACL entries into URNs
prefixed_acl = []
for uuid in acl:
# If entry is not special value "public" and is not a URN, make URN
# It is not known what the type of UUID is, so use both
# This solution is known to be hacky
if uuid != "public" and not uuid.lower().startswith("urn:"):
prefixed_acl.append("urn:globus:auth:identity:"+uuid.lower())
prefixed_acl.append("urn:globus:groups:id:"+uuid.lower())
# Otherwise, no modification
else:
prefixed_acl.append(uuid)
return {
"@datatype": "GMetaEntry",
"@version": "2016-11-09",
"subject": identifier,
"visible_to": prefixed_acl,
"content": data
}
elif isinstance(data, list):
return {
"@datatype": "GIngest",
"@version": "2016-11-09",
"ingest_type": "GMetaList",
"ingest_data": {
"@datatype": "GMetaList",
"@version": "2016-11-09",
"gmeta": data
}
}
else:
raise TypeError("Cannot format '" + str(type(data)) + "' into GMeta.") | [
"def",
"format_gmeta",
"(",
"data",
",",
"acl",
"=",
"None",
",",
"identifier",
"=",
"None",
")",
":",
"if",
"isinstance",
"(",
"data",
",",
"dict",
")",
":",
"if",
"acl",
"is",
"None",
"or",
"identifier",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"\"acl and identifier are required when formatting a GMetaEntry.\"",
")",
"if",
"isinstance",
"(",
"acl",
",",
"str",
")",
":",
"acl",
"=",
"[",
"acl",
"]",
"# \"Correctly\" format ACL entries into URNs",
"prefixed_acl",
"=",
"[",
"]",
"for",
"uuid",
"in",
"acl",
":",
"# If entry is not special value \"public\" and is not a URN, make URN",
"# It is not known what the type of UUID is, so use both",
"# This solution is known to be hacky",
"if",
"uuid",
"!=",
"\"public\"",
"and",
"not",
"uuid",
".",
"lower",
"(",
")",
".",
"startswith",
"(",
"\"urn:\"",
")",
":",
"prefixed_acl",
".",
"append",
"(",
"\"urn:globus:auth:identity:\"",
"+",
"uuid",
".",
"lower",
"(",
")",
")",
"prefixed_acl",
".",
"append",
"(",
"\"urn:globus:groups:id:\"",
"+",
"uuid",
".",
"lower",
"(",
")",
")",
"# Otherwise, no modification",
"else",
":",
"prefixed_acl",
".",
"append",
"(",
"uuid",
")",
"return",
"{",
"\"@datatype\"",
":",
"\"GMetaEntry\"",
",",
"\"@version\"",
":",
"\"2016-11-09\"",
",",
"\"subject\"",
":",
"identifier",
",",
"\"visible_to\"",
":",
"prefixed_acl",
",",
"\"content\"",
":",
"data",
"}",
"elif",
"isinstance",
"(",
"data",
",",
"list",
")",
":",
"return",
"{",
"\"@datatype\"",
":",
"\"GIngest\"",
",",
"\"@version\"",
":",
"\"2016-11-09\"",
",",
"\"ingest_type\"",
":",
"\"GMetaList\"",
",",
"\"ingest_data\"",
":",
"{",
"\"@datatype\"",
":",
"\"GMetaList\"",
",",
"\"@version\"",
":",
"\"2016-11-09\"",
",",
"\"gmeta\"",
":",
"data",
"}",
"}",
"else",
":",
"raise",
"TypeError",
"(",
"\"Cannot format '\"",
"+",
"str",
"(",
"type",
"(",
"data",
")",
")",
"+",
"\"' into GMeta.\"",
")"
] | Format input into GMeta format, suitable for ingesting into Globus Search.
Formats a dictionary into a GMetaEntry.
Formats a list of GMetaEntry into a GMetaList inside a GMetaIngest.
**Example usage**::
glist = []
for document in all_my_documents:
gmeta_entry = format_gmeta(document, ["public"], document["id"])
glist.append(gmeta_entry)
ingest_ready_document = format_gmeta(glist)
Arguments:
data (dict or list): The data to be formatted.
If data is a dict, arguments ``acl`` and ``identifier`` are required.
If data is a list, it must consist of GMetaEntry documents.
acl (list of str): The list of Globus UUIDs allowed to view the document,
or the special value ``["public"]`` to allow anyone access.
Required if data is a dict. Ignored if data is a list.
Will be formatted into URNs if required.
identifier (str): A unique identifier for this document. If this value is not unique,
ingests into Globus Search may merge entries.
Required is data is a dict. Ignored if data is a list.
Returns:
dict (if ``data`` is ``dict``): The data as a GMetaEntry.
dict (if ``data`` is ``list``): The data as a GMetaIngest. | [
"Format",
"input",
"into",
"GMeta",
"format",
"suitable",
"for",
"ingesting",
"into",
"Globus",
"Search",
".",
"Formats",
"a",
"dictionary",
"into",
"a",
"GMetaEntry",
".",
"Formats",
"a",
"list",
"of",
"GMetaEntry",
"into",
"a",
"GMetaList",
"inside",
"a",
"GMetaIngest",
"."
] | 2a4ac2b6a892238263008efa6a5f3923d9a83505 | https://github.com/materials-data-facility/toolbox/blob/2a4ac2b6a892238263008efa6a5f3923d9a83505/mdf_toolbox/toolbox.py#L471-L539 | train |
materials-data-facility/toolbox | mdf_toolbox/toolbox.py | gmeta_pop | def gmeta_pop(gmeta, info=False):
"""Remove GMeta wrapping from a Globus Search result.
This function can be called on the raw GlobusHTTPResponse that Search returns,
or a string or dictionary representation of it.
Arguments:
gmeta (dict, str, or GlobusHTTPResponse): The Globus Search result to unwrap.
info (bool): If ``False``, will return a list of the results
and discard the metadata. If ``True``, will return a tuple containing
the results list, and other information about the query.
**Default**: ``False``.
Returns:
list (if ``info=False``): The unwrapped results.
tuple (if ``info=True``): The unwrapped results, and a dictionary of query information.
"""
if type(gmeta) is GlobusHTTPResponse:
gmeta = json.loads(gmeta.text)
elif type(gmeta) is str:
gmeta = json.loads(gmeta)
elif type(gmeta) is not dict:
raise TypeError("gmeta must be dict, GlobusHTTPResponse, or JSON string")
results = []
for res in gmeta["gmeta"]:
for con in res["content"]:
results.append(con)
if info:
fyi = {
"total_query_matches": gmeta.get("total")
}
return results, fyi
else:
return results | python | def gmeta_pop(gmeta, info=False):
"""Remove GMeta wrapping from a Globus Search result.
This function can be called on the raw GlobusHTTPResponse that Search returns,
or a string or dictionary representation of it.
Arguments:
gmeta (dict, str, or GlobusHTTPResponse): The Globus Search result to unwrap.
info (bool): If ``False``, will return a list of the results
and discard the metadata. If ``True``, will return a tuple containing
the results list, and other information about the query.
**Default**: ``False``.
Returns:
list (if ``info=False``): The unwrapped results.
tuple (if ``info=True``): The unwrapped results, and a dictionary of query information.
"""
if type(gmeta) is GlobusHTTPResponse:
gmeta = json.loads(gmeta.text)
elif type(gmeta) is str:
gmeta = json.loads(gmeta)
elif type(gmeta) is not dict:
raise TypeError("gmeta must be dict, GlobusHTTPResponse, or JSON string")
results = []
for res in gmeta["gmeta"]:
for con in res["content"]:
results.append(con)
if info:
fyi = {
"total_query_matches": gmeta.get("total")
}
return results, fyi
else:
return results | [
"def",
"gmeta_pop",
"(",
"gmeta",
",",
"info",
"=",
"False",
")",
":",
"if",
"type",
"(",
"gmeta",
")",
"is",
"GlobusHTTPResponse",
":",
"gmeta",
"=",
"json",
".",
"loads",
"(",
"gmeta",
".",
"text",
")",
"elif",
"type",
"(",
"gmeta",
")",
"is",
"str",
":",
"gmeta",
"=",
"json",
".",
"loads",
"(",
"gmeta",
")",
"elif",
"type",
"(",
"gmeta",
")",
"is",
"not",
"dict",
":",
"raise",
"TypeError",
"(",
"\"gmeta must be dict, GlobusHTTPResponse, or JSON string\"",
")",
"results",
"=",
"[",
"]",
"for",
"res",
"in",
"gmeta",
"[",
"\"gmeta\"",
"]",
":",
"for",
"con",
"in",
"res",
"[",
"\"content\"",
"]",
":",
"results",
".",
"append",
"(",
"con",
")",
"if",
"info",
":",
"fyi",
"=",
"{",
"\"total_query_matches\"",
":",
"gmeta",
".",
"get",
"(",
"\"total\"",
")",
"}",
"return",
"results",
",",
"fyi",
"else",
":",
"return",
"results"
] | Remove GMeta wrapping from a Globus Search result.
This function can be called on the raw GlobusHTTPResponse that Search returns,
or a string or dictionary representation of it.
Arguments:
gmeta (dict, str, or GlobusHTTPResponse): The Globus Search result to unwrap.
info (bool): If ``False``, will return a list of the results
and discard the metadata. If ``True``, will return a tuple containing
the results list, and other information about the query.
**Default**: ``False``.
Returns:
list (if ``info=False``): The unwrapped results.
tuple (if ``info=True``): The unwrapped results, and a dictionary of query information. | [
"Remove",
"GMeta",
"wrapping",
"from",
"a",
"Globus",
"Search",
"result",
".",
"This",
"function",
"can",
"be",
"called",
"on",
"the",
"raw",
"GlobusHTTPResponse",
"that",
"Search",
"returns",
"or",
"a",
"string",
"or",
"dictionary",
"representation",
"of",
"it",
"."
] | 2a4ac2b6a892238263008efa6a5f3923d9a83505 | https://github.com/materials-data-facility/toolbox/blob/2a4ac2b6a892238263008efa6a5f3923d9a83505/mdf_toolbox/toolbox.py#L542-L574 | train |
materials-data-facility/toolbox | mdf_toolbox/toolbox.py | translate_index | def translate_index(index_name):
"""Translate a known Globus Search index into the index UUID.
The UUID is the proper way to access indices, and will eventually be the only way.
This method will return names it cannot disambiguate.
Arguments:
index_name (str): The name of the index.
Returns:
str: The UUID of the index. If the index is not known and is not unambiguous,
this will be the ``index_name`` unchanged instead.
"""
uuid = SEARCH_INDEX_UUIDS.get(index_name.strip().lower())
if not uuid:
try:
index_info = globus_sdk.SearchClient().get_index(index_name).data
if not isinstance(index_info, dict):
raise ValueError("Multiple UUIDs possible")
uuid = index_info.get("id", index_name)
except Exception:
uuid = index_name
return uuid | python | def translate_index(index_name):
"""Translate a known Globus Search index into the index UUID.
The UUID is the proper way to access indices, and will eventually be the only way.
This method will return names it cannot disambiguate.
Arguments:
index_name (str): The name of the index.
Returns:
str: The UUID of the index. If the index is not known and is not unambiguous,
this will be the ``index_name`` unchanged instead.
"""
uuid = SEARCH_INDEX_UUIDS.get(index_name.strip().lower())
if not uuid:
try:
index_info = globus_sdk.SearchClient().get_index(index_name).data
if not isinstance(index_info, dict):
raise ValueError("Multiple UUIDs possible")
uuid = index_info.get("id", index_name)
except Exception:
uuid = index_name
return uuid | [
"def",
"translate_index",
"(",
"index_name",
")",
":",
"uuid",
"=",
"SEARCH_INDEX_UUIDS",
".",
"get",
"(",
"index_name",
".",
"strip",
"(",
")",
".",
"lower",
"(",
")",
")",
"if",
"not",
"uuid",
":",
"try",
":",
"index_info",
"=",
"globus_sdk",
".",
"SearchClient",
"(",
")",
".",
"get_index",
"(",
"index_name",
")",
".",
"data",
"if",
"not",
"isinstance",
"(",
"index_info",
",",
"dict",
")",
":",
"raise",
"ValueError",
"(",
"\"Multiple UUIDs possible\"",
")",
"uuid",
"=",
"index_info",
".",
"get",
"(",
"\"id\"",
",",
"index_name",
")",
"except",
"Exception",
":",
"uuid",
"=",
"index_name",
"return",
"uuid"
] | Translate a known Globus Search index into the index UUID.
The UUID is the proper way to access indices, and will eventually be the only way.
This method will return names it cannot disambiguate.
Arguments:
index_name (str): The name of the index.
Returns:
str: The UUID of the index. If the index is not known and is not unambiguous,
this will be the ``index_name`` unchanged instead. | [
"Translate",
"a",
"known",
"Globus",
"Search",
"index",
"into",
"the",
"index",
"UUID",
".",
"The",
"UUID",
"is",
"the",
"proper",
"way",
"to",
"access",
"indices",
"and",
"will",
"eventually",
"be",
"the",
"only",
"way",
".",
"This",
"method",
"will",
"return",
"names",
"it",
"cannot",
"disambiguate",
"."
] | 2a4ac2b6a892238263008efa6a5f3923d9a83505 | https://github.com/materials-data-facility/toolbox/blob/2a4ac2b6a892238263008efa6a5f3923d9a83505/mdf_toolbox/toolbox.py#L577-L598 | train |
materials-data-facility/toolbox | mdf_toolbox/toolbox.py | quick_transfer | def quick_transfer(transfer_client, source_ep, dest_ep, path_list, interval=None, retries=10,
notify=True):
"""Perform a Globus Transfer and monitor for success.
Arguments:
transfer_client (TransferClient): An authenticated Transfer client.
source_ep (str): The source Globus Endpoint ID.
dest_ep (str): The destination Globus Endpoint ID.
path_list (list of tuple of 2 str): A list of tuples containing the paths to transfer as
``(source, destination)``.
**Example**::
[("/source/files/file.dat", "/dest/mydocs/doc.dat"),
("/source/all_reports/", "/dest/reports/")]
interval (int): Number of seconds to wait before polling Transfer status.
Minimum ``1``.**Default**: ``DEFAULT_INTERVAL``.
retries (int): The number of errors to tolerate before cancelling the task.
Globus Transfer makes no distinction between hard errors
(e.g. "permission denied") and soft errors
(e.g. "endpoint [temporarily] too busy") so requiring retries is
not uncommon for large Transfers.
``-1`` for infinite tries (Transfer still fails after a period of no activity).
``None`` is synonymous with ``0``.
**Default**: ``10``.
notify (bool): When ``True``, trigger a notification email from Globus to the user when
the Transfer succeeds or fails. When ``False``, disable the notification.
**Default**: ``True``.
Returns:
str: ID of the Globus Transfer.
"""
if retries is None:
retries = 0
iterations = 0
transfer = custom_transfer(transfer_client, source_ep, dest_ep, path_list, notify=notify)
res = next(transfer)
try:
# Loop ends on StopIteration from generator exhaustion
while True:
if iterations < retries or retries == -1:
res = transfer.send(True)
iterations += 1
else:
res = transfer.send(False)
except StopIteration:
pass
if res["success"]:
error = "No error"
else:
error = "{}: {}".format(res.get("fatal_error", {}).get("code", "Error"),
res.get("fatal_error", {}).get("description", "Unknown"))
return {
"success": res["success"],
"task_id": res["task_id"],
"error": error
} | python | def quick_transfer(transfer_client, source_ep, dest_ep, path_list, interval=None, retries=10,
notify=True):
"""Perform a Globus Transfer and monitor for success.
Arguments:
transfer_client (TransferClient): An authenticated Transfer client.
source_ep (str): The source Globus Endpoint ID.
dest_ep (str): The destination Globus Endpoint ID.
path_list (list of tuple of 2 str): A list of tuples containing the paths to transfer as
``(source, destination)``.
**Example**::
[("/source/files/file.dat", "/dest/mydocs/doc.dat"),
("/source/all_reports/", "/dest/reports/")]
interval (int): Number of seconds to wait before polling Transfer status.
Minimum ``1``.**Default**: ``DEFAULT_INTERVAL``.
retries (int): The number of errors to tolerate before cancelling the task.
Globus Transfer makes no distinction between hard errors
(e.g. "permission denied") and soft errors
(e.g. "endpoint [temporarily] too busy") so requiring retries is
not uncommon for large Transfers.
``-1`` for infinite tries (Transfer still fails after a period of no activity).
``None`` is synonymous with ``0``.
**Default**: ``10``.
notify (bool): When ``True``, trigger a notification email from Globus to the user when
the Transfer succeeds or fails. When ``False``, disable the notification.
**Default**: ``True``.
Returns:
str: ID of the Globus Transfer.
"""
if retries is None:
retries = 0
iterations = 0
transfer = custom_transfer(transfer_client, source_ep, dest_ep, path_list, notify=notify)
res = next(transfer)
try:
# Loop ends on StopIteration from generator exhaustion
while True:
if iterations < retries or retries == -1:
res = transfer.send(True)
iterations += 1
else:
res = transfer.send(False)
except StopIteration:
pass
if res["success"]:
error = "No error"
else:
error = "{}: {}".format(res.get("fatal_error", {}).get("code", "Error"),
res.get("fatal_error", {}).get("description", "Unknown"))
return {
"success": res["success"],
"task_id": res["task_id"],
"error": error
} | [
"def",
"quick_transfer",
"(",
"transfer_client",
",",
"source_ep",
",",
"dest_ep",
",",
"path_list",
",",
"interval",
"=",
"None",
",",
"retries",
"=",
"10",
",",
"notify",
"=",
"True",
")",
":",
"if",
"retries",
"is",
"None",
":",
"retries",
"=",
"0",
"iterations",
"=",
"0",
"transfer",
"=",
"custom_transfer",
"(",
"transfer_client",
",",
"source_ep",
",",
"dest_ep",
",",
"path_list",
",",
"notify",
"=",
"notify",
")",
"res",
"=",
"next",
"(",
"transfer",
")",
"try",
":",
"# Loop ends on StopIteration from generator exhaustion",
"while",
"True",
":",
"if",
"iterations",
"<",
"retries",
"or",
"retries",
"==",
"-",
"1",
":",
"res",
"=",
"transfer",
".",
"send",
"(",
"True",
")",
"iterations",
"+=",
"1",
"else",
":",
"res",
"=",
"transfer",
".",
"send",
"(",
"False",
")",
"except",
"StopIteration",
":",
"pass",
"if",
"res",
"[",
"\"success\"",
"]",
":",
"error",
"=",
"\"No error\"",
"else",
":",
"error",
"=",
"\"{}: {}\"",
".",
"format",
"(",
"res",
".",
"get",
"(",
"\"fatal_error\"",
",",
"{",
"}",
")",
".",
"get",
"(",
"\"code\"",
",",
"\"Error\"",
")",
",",
"res",
".",
"get",
"(",
"\"fatal_error\"",
",",
"{",
"}",
")",
".",
"get",
"(",
"\"description\"",
",",
"\"Unknown\"",
")",
")",
"return",
"{",
"\"success\"",
":",
"res",
"[",
"\"success\"",
"]",
",",
"\"task_id\"",
":",
"res",
"[",
"\"task_id\"",
"]",
",",
"\"error\"",
":",
"error",
"}"
] | Perform a Globus Transfer and monitor for success.
Arguments:
transfer_client (TransferClient): An authenticated Transfer client.
source_ep (str): The source Globus Endpoint ID.
dest_ep (str): The destination Globus Endpoint ID.
path_list (list of tuple of 2 str): A list of tuples containing the paths to transfer as
``(source, destination)``.
**Example**::
[("/source/files/file.dat", "/dest/mydocs/doc.dat"),
("/source/all_reports/", "/dest/reports/")]
interval (int): Number of seconds to wait before polling Transfer status.
Minimum ``1``.**Default**: ``DEFAULT_INTERVAL``.
retries (int): The number of errors to tolerate before cancelling the task.
Globus Transfer makes no distinction between hard errors
(e.g. "permission denied") and soft errors
(e.g. "endpoint [temporarily] too busy") so requiring retries is
not uncommon for large Transfers.
``-1`` for infinite tries (Transfer still fails after a period of no activity).
``None`` is synonymous with ``0``.
**Default**: ``10``.
notify (bool): When ``True``, trigger a notification email from Globus to the user when
the Transfer succeeds or fails. When ``False``, disable the notification.
**Default**: ``True``.
Returns:
str: ID of the Globus Transfer. | [
"Perform",
"a",
"Globus",
"Transfer",
"and",
"monitor",
"for",
"success",
"."
] | 2a4ac2b6a892238263008efa6a5f3923d9a83505 | https://github.com/materials-data-facility/toolbox/blob/2a4ac2b6a892238263008efa6a5f3923d9a83505/mdf_toolbox/toolbox.py#L805-L863 | train |
materials-data-facility/toolbox | mdf_toolbox/toolbox.py | insensitive_comparison | def insensitive_comparison(item1, item2, type_insensitive=False, string_insensitive=False):
"""Compare two items without regard to order.
The following rules are used to determine equivalence:
* Items that are not of the same type can be equivalent only when ``type_insensitive=True``.
* Mapping objects are equal iff the keys in each item exist in both items and have
the same value (with the same ``insensitive_comparison``).
* Other containers except for strings are equal iff every element in each item exists
in both items (duplicate items must be present the same number of times).
* Containers must be ``Iterable`` to be compared in this way.
* Non-containers are equivalent if the equality operator returns ``True``.
* Strings are treated as non-containers when ``string_insensitive=False``,
and are treated as containers when ``string_insensitive=True``. When treated as
containers, each (case-insensitive) character is treated as an element and
whitespace is ignored.
* If the items are in different categories above, they are never equivalent,
even when ``type_insensitive=True``.
Arguments:
item1 (any): The first item to compare.
item2 (any): The second item to compare.
type_insensitive (bool): When ``True``, items of a different type are not automatically
unequivalent. When ``False``, items must be the same type to be equivalent.
**Default**: ``False``.
string_insensitive (bool): When ``True``, strings are treated as containers, with each
character being one element in the container.
When ``False``, strings are treated as non-containers and compared directly.
**Default**: ``False``.
Returns:
bool: ``True`` iff the two items are equivalent (see above).
``False`` otherwise.
"""
# If type-sensitive, check types
if not type_insensitive and type(item1) != type(item2):
return False
# Handle Mapping objects (dict)
if isinstance(item1, Mapping):
# Second item must be Mapping
if not isinstance(item2, Mapping):
return False
# Items must have the same number of elements
if not len(item1) == len(item2):
return False
# Keys must be the same
if not insensitive_comparison(list(item1.keys()), list(item2.keys()),
type_insensitive=True):
return False
# Each key's value must be the same
# We can just check item1.items because the keys are the same
for key, val in item1.items():
if not insensitive_comparison(item1[key], item2[key],
type_insensitive=type_insensitive,
string_insensitive=string_insensitive):
return False
# Keys and values are the same
return True
# Handle strings
elif isinstance(item1, str):
# Second item must be string
if not isinstance(item2, str):
return False
# Items must have the same number of elements (except string_insensitive)
if not len(item1) == len(item2) and not string_insensitive:
return False
# If we're insensitive to case, spaces, and order, compare characters
if string_insensitive:
# If the string is one character long, skip additional comparison
if len(item1) <= 1:
return item1.lower() == item2.lower()
# Make strings into containers (lists) and discard whitespace
item1_list = [c for c in item1.lower() if not c.isspace()]
item2_list = [c for c in item2.lower() if not c.isspace()]
# The insensitive args shouldn't matter, but they're here just in case
return insensitive_comparison(item1_list, item2_list,
type_insensitive=type_insensitive,
string_insensitive=string_insensitive)
# Otherwise, case and order matter
else:
return item1 == item2
# Handle other Iterable Containers
elif isinstance(item1, Container) and isinstance(item1, Iterable):
# Second item must be an Iterable Container
if not isinstance(item2, Container) or not isinstance(item2, Iterable):
return False
# Items must have the same number of elements
if not len(item1) == len(item2):
return False
# Every element in item1 must be in item2, and vice-versa
# Painfully slow, but unavoidable for deep comparison
# Each match in item1 removes the corresponding element from item2_copy
# If they're the same, item2_copy should be empty at the end,
# unless a .remove() failed, in which case we have to re-match using item2
item2_copy = list(deepcopy(item2))
remove_failed = False
for elem in item1:
matched = False
# Try every element
for candidate in item2:
# If comparison succeeds, flag a match, remove match from copy, and dump out
if insensitive_comparison(elem, candidate,
type_insensitive=type_insensitive,
string_insensitive=string_insensitive):
matched = True
try:
item2_copy.remove(candidate)
except ValueError: # list.remove(x): x not in list
remove_failed = True
break
# One failure indicates unequivalence
if not matched:
return False
# If all removes succeeded, we can shortcut checking all item2 elements in item1
if not remove_failed:
# If the Containers are equivalent, all elements in item2_copy should be removed
# Otherwise
return len(item2_copy) == 0
# If something failed, we have to verify all of item2
# We can't assume item2 != item1, because removal is comparative
else:
for elem in item2:
matched = False
# Try every element
for candidate in item1:
# If comparison succeeds, flag a match, remove match from copy, and dump out
if insensitive_comparison(elem, candidate,
type_insensitive=type_insensitive,
string_insensitive=string_insensitive):
matched = True
break
# One failure indicates unequivalence
if not matched:
return False
# All elements have a match
return True
# Handle otherwise unhandled type (catchall)
else:
return item1 == item2 | python | def insensitive_comparison(item1, item2, type_insensitive=False, string_insensitive=False):
"""Compare two items without regard to order.
The following rules are used to determine equivalence:
* Items that are not of the same type can be equivalent only when ``type_insensitive=True``.
* Mapping objects are equal iff the keys in each item exist in both items and have
the same value (with the same ``insensitive_comparison``).
* Other containers except for strings are equal iff every element in each item exists
in both items (duplicate items must be present the same number of times).
* Containers must be ``Iterable`` to be compared in this way.
* Non-containers are equivalent if the equality operator returns ``True``.
* Strings are treated as non-containers when ``string_insensitive=False``,
and are treated as containers when ``string_insensitive=True``. When treated as
containers, each (case-insensitive) character is treated as an element and
whitespace is ignored.
* If the items are in different categories above, they are never equivalent,
even when ``type_insensitive=True``.
Arguments:
item1 (any): The first item to compare.
item2 (any): The second item to compare.
type_insensitive (bool): When ``True``, items of a different type are not automatically
unequivalent. When ``False``, items must be the same type to be equivalent.
**Default**: ``False``.
string_insensitive (bool): When ``True``, strings are treated as containers, with each
character being one element in the container.
When ``False``, strings are treated as non-containers and compared directly.
**Default**: ``False``.
Returns:
bool: ``True`` iff the two items are equivalent (see above).
``False`` otherwise.
"""
# If type-sensitive, check types
if not type_insensitive and type(item1) != type(item2):
return False
# Handle Mapping objects (dict)
if isinstance(item1, Mapping):
# Second item must be Mapping
if not isinstance(item2, Mapping):
return False
# Items must have the same number of elements
if not len(item1) == len(item2):
return False
# Keys must be the same
if not insensitive_comparison(list(item1.keys()), list(item2.keys()),
type_insensitive=True):
return False
# Each key's value must be the same
# We can just check item1.items because the keys are the same
for key, val in item1.items():
if not insensitive_comparison(item1[key], item2[key],
type_insensitive=type_insensitive,
string_insensitive=string_insensitive):
return False
# Keys and values are the same
return True
# Handle strings
elif isinstance(item1, str):
# Second item must be string
if not isinstance(item2, str):
return False
# Items must have the same number of elements (except string_insensitive)
if not len(item1) == len(item2) and not string_insensitive:
return False
# If we're insensitive to case, spaces, and order, compare characters
if string_insensitive:
# If the string is one character long, skip additional comparison
if len(item1) <= 1:
return item1.lower() == item2.lower()
# Make strings into containers (lists) and discard whitespace
item1_list = [c for c in item1.lower() if not c.isspace()]
item2_list = [c for c in item2.lower() if not c.isspace()]
# The insensitive args shouldn't matter, but they're here just in case
return insensitive_comparison(item1_list, item2_list,
type_insensitive=type_insensitive,
string_insensitive=string_insensitive)
# Otherwise, case and order matter
else:
return item1 == item2
# Handle other Iterable Containers
elif isinstance(item1, Container) and isinstance(item1, Iterable):
# Second item must be an Iterable Container
if not isinstance(item2, Container) or not isinstance(item2, Iterable):
return False
# Items must have the same number of elements
if not len(item1) == len(item2):
return False
# Every element in item1 must be in item2, and vice-versa
# Painfully slow, but unavoidable for deep comparison
# Each match in item1 removes the corresponding element from item2_copy
# If they're the same, item2_copy should be empty at the end,
# unless a .remove() failed, in which case we have to re-match using item2
item2_copy = list(deepcopy(item2))
remove_failed = False
for elem in item1:
matched = False
# Try every element
for candidate in item2:
# If comparison succeeds, flag a match, remove match from copy, and dump out
if insensitive_comparison(elem, candidate,
type_insensitive=type_insensitive,
string_insensitive=string_insensitive):
matched = True
try:
item2_copy.remove(candidate)
except ValueError: # list.remove(x): x not in list
remove_failed = True
break
# One failure indicates unequivalence
if not matched:
return False
# If all removes succeeded, we can shortcut checking all item2 elements in item1
if not remove_failed:
# If the Containers are equivalent, all elements in item2_copy should be removed
# Otherwise
return len(item2_copy) == 0
# If something failed, we have to verify all of item2
# We can't assume item2 != item1, because removal is comparative
else:
for elem in item2:
matched = False
# Try every element
for candidate in item1:
# If comparison succeeds, flag a match, remove match from copy, and dump out
if insensitive_comparison(elem, candidate,
type_insensitive=type_insensitive,
string_insensitive=string_insensitive):
matched = True
break
# One failure indicates unequivalence
if not matched:
return False
# All elements have a match
return True
# Handle otherwise unhandled type (catchall)
else:
return item1 == item2 | [
"def",
"insensitive_comparison",
"(",
"item1",
",",
"item2",
",",
"type_insensitive",
"=",
"False",
",",
"string_insensitive",
"=",
"False",
")",
":",
"# If type-sensitive, check types",
"if",
"not",
"type_insensitive",
"and",
"type",
"(",
"item1",
")",
"!=",
"type",
"(",
"item2",
")",
":",
"return",
"False",
"# Handle Mapping objects (dict)",
"if",
"isinstance",
"(",
"item1",
",",
"Mapping",
")",
":",
"# Second item must be Mapping",
"if",
"not",
"isinstance",
"(",
"item2",
",",
"Mapping",
")",
":",
"return",
"False",
"# Items must have the same number of elements",
"if",
"not",
"len",
"(",
"item1",
")",
"==",
"len",
"(",
"item2",
")",
":",
"return",
"False",
"# Keys must be the same",
"if",
"not",
"insensitive_comparison",
"(",
"list",
"(",
"item1",
".",
"keys",
"(",
")",
")",
",",
"list",
"(",
"item2",
".",
"keys",
"(",
")",
")",
",",
"type_insensitive",
"=",
"True",
")",
":",
"return",
"False",
"# Each key's value must be the same",
"# We can just check item1.items because the keys are the same",
"for",
"key",
",",
"val",
"in",
"item1",
".",
"items",
"(",
")",
":",
"if",
"not",
"insensitive_comparison",
"(",
"item1",
"[",
"key",
"]",
",",
"item2",
"[",
"key",
"]",
",",
"type_insensitive",
"=",
"type_insensitive",
",",
"string_insensitive",
"=",
"string_insensitive",
")",
":",
"return",
"False",
"# Keys and values are the same",
"return",
"True",
"# Handle strings",
"elif",
"isinstance",
"(",
"item1",
",",
"str",
")",
":",
"# Second item must be string",
"if",
"not",
"isinstance",
"(",
"item2",
",",
"str",
")",
":",
"return",
"False",
"# Items must have the same number of elements (except string_insensitive)",
"if",
"not",
"len",
"(",
"item1",
")",
"==",
"len",
"(",
"item2",
")",
"and",
"not",
"string_insensitive",
":",
"return",
"False",
"# If we're insensitive to case, spaces, and order, compare characters",
"if",
"string_insensitive",
":",
"# If the string is one character long, skip additional comparison",
"if",
"len",
"(",
"item1",
")",
"<=",
"1",
":",
"return",
"item1",
".",
"lower",
"(",
")",
"==",
"item2",
".",
"lower",
"(",
")",
"# Make strings into containers (lists) and discard whitespace",
"item1_list",
"=",
"[",
"c",
"for",
"c",
"in",
"item1",
".",
"lower",
"(",
")",
"if",
"not",
"c",
".",
"isspace",
"(",
")",
"]",
"item2_list",
"=",
"[",
"c",
"for",
"c",
"in",
"item2",
".",
"lower",
"(",
")",
"if",
"not",
"c",
".",
"isspace",
"(",
")",
"]",
"# The insensitive args shouldn't matter, but they're here just in case",
"return",
"insensitive_comparison",
"(",
"item1_list",
",",
"item2_list",
",",
"type_insensitive",
"=",
"type_insensitive",
",",
"string_insensitive",
"=",
"string_insensitive",
")",
"# Otherwise, case and order matter",
"else",
":",
"return",
"item1",
"==",
"item2",
"# Handle other Iterable Containers",
"elif",
"isinstance",
"(",
"item1",
",",
"Container",
")",
"and",
"isinstance",
"(",
"item1",
",",
"Iterable",
")",
":",
"# Second item must be an Iterable Container",
"if",
"not",
"isinstance",
"(",
"item2",
",",
"Container",
")",
"or",
"not",
"isinstance",
"(",
"item2",
",",
"Iterable",
")",
":",
"return",
"False",
"# Items must have the same number of elements",
"if",
"not",
"len",
"(",
"item1",
")",
"==",
"len",
"(",
"item2",
")",
":",
"return",
"False",
"# Every element in item1 must be in item2, and vice-versa",
"# Painfully slow, but unavoidable for deep comparison",
"# Each match in item1 removes the corresponding element from item2_copy",
"# If they're the same, item2_copy should be empty at the end,",
"# unless a .remove() failed, in which case we have to re-match using item2",
"item2_copy",
"=",
"list",
"(",
"deepcopy",
"(",
"item2",
")",
")",
"remove_failed",
"=",
"False",
"for",
"elem",
"in",
"item1",
":",
"matched",
"=",
"False",
"# Try every element",
"for",
"candidate",
"in",
"item2",
":",
"# If comparison succeeds, flag a match, remove match from copy, and dump out",
"if",
"insensitive_comparison",
"(",
"elem",
",",
"candidate",
",",
"type_insensitive",
"=",
"type_insensitive",
",",
"string_insensitive",
"=",
"string_insensitive",
")",
":",
"matched",
"=",
"True",
"try",
":",
"item2_copy",
".",
"remove",
"(",
"candidate",
")",
"except",
"ValueError",
":",
"# list.remove(x): x not in list",
"remove_failed",
"=",
"True",
"break",
"# One failure indicates unequivalence",
"if",
"not",
"matched",
":",
"return",
"False",
"# If all removes succeeded, we can shortcut checking all item2 elements in item1",
"if",
"not",
"remove_failed",
":",
"# If the Containers are equivalent, all elements in item2_copy should be removed",
"# Otherwise",
"return",
"len",
"(",
"item2_copy",
")",
"==",
"0",
"# If something failed, we have to verify all of item2",
"# We can't assume item2 != item1, because removal is comparative",
"else",
":",
"for",
"elem",
"in",
"item2",
":",
"matched",
"=",
"False",
"# Try every element",
"for",
"candidate",
"in",
"item1",
":",
"# If comparison succeeds, flag a match, remove match from copy, and dump out",
"if",
"insensitive_comparison",
"(",
"elem",
",",
"candidate",
",",
"type_insensitive",
"=",
"type_insensitive",
",",
"string_insensitive",
"=",
"string_insensitive",
")",
":",
"matched",
"=",
"True",
"break",
"# One failure indicates unequivalence",
"if",
"not",
"matched",
":",
"return",
"False",
"# All elements have a match",
"return",
"True",
"# Handle otherwise unhandled type (catchall)",
"else",
":",
"return",
"item1",
"==",
"item2"
] | Compare two items without regard to order.
The following rules are used to determine equivalence:
* Items that are not of the same type can be equivalent only when ``type_insensitive=True``.
* Mapping objects are equal iff the keys in each item exist in both items and have
the same value (with the same ``insensitive_comparison``).
* Other containers except for strings are equal iff every element in each item exists
in both items (duplicate items must be present the same number of times).
* Containers must be ``Iterable`` to be compared in this way.
* Non-containers are equivalent if the equality operator returns ``True``.
* Strings are treated as non-containers when ``string_insensitive=False``,
and are treated as containers when ``string_insensitive=True``. When treated as
containers, each (case-insensitive) character is treated as an element and
whitespace is ignored.
* If the items are in different categories above, they are never equivalent,
even when ``type_insensitive=True``.
Arguments:
item1 (any): The first item to compare.
item2 (any): The second item to compare.
type_insensitive (bool): When ``True``, items of a different type are not automatically
unequivalent. When ``False``, items must be the same type to be equivalent.
**Default**: ``False``.
string_insensitive (bool): When ``True``, strings are treated as containers, with each
character being one element in the container.
When ``False``, strings are treated as non-containers and compared directly.
**Default**: ``False``.
Returns:
bool: ``True`` iff the two items are equivalent (see above).
``False`` otherwise. | [
"Compare",
"two",
"items",
"without",
"regard",
"to",
"order",
"."
] | 2a4ac2b6a892238263008efa6a5f3923d9a83505 | https://github.com/materials-data-facility/toolbox/blob/2a4ac2b6a892238263008efa6a5f3923d9a83505/mdf_toolbox/toolbox.py#L933-L1071 | train |
tapilab/brandelion | brandelion/cli/analyze.py | parse_json | def parse_json(json_file, include_date=False):
""" Yield screen_name, text tuples from a json file. """
if json_file[-2:] == 'gz':
fh = gzip.open(json_file, 'rt')
else:
fh = io.open(json_file, mode='rt', encoding='utf8')
for line in fh:
try:
jj = json.loads(line)
if type(jj) is not list:
jj = [jj]
for j in jj:
if include_date:
yield (j['user']['screen_name'].lower(), j['text'], j['created_at'])
else:
if 'full_text' in j: # get untruncated text if available.
yield (j['user']['screen_name'].lower(), j['full_text'])
else:
yield (j['user']['screen_name'].lower(), j['text'])
except Exception as e:
sys.stderr.write('skipping json error: %s\n' % e) | python | def parse_json(json_file, include_date=False):
""" Yield screen_name, text tuples from a json file. """
if json_file[-2:] == 'gz':
fh = gzip.open(json_file, 'rt')
else:
fh = io.open(json_file, mode='rt', encoding='utf8')
for line in fh:
try:
jj = json.loads(line)
if type(jj) is not list:
jj = [jj]
for j in jj:
if include_date:
yield (j['user']['screen_name'].lower(), j['text'], j['created_at'])
else:
if 'full_text' in j: # get untruncated text if available.
yield (j['user']['screen_name'].lower(), j['full_text'])
else:
yield (j['user']['screen_name'].lower(), j['text'])
except Exception as e:
sys.stderr.write('skipping json error: %s\n' % e) | [
"def",
"parse_json",
"(",
"json_file",
",",
"include_date",
"=",
"False",
")",
":",
"if",
"json_file",
"[",
"-",
"2",
":",
"]",
"==",
"'gz'",
":",
"fh",
"=",
"gzip",
".",
"open",
"(",
"json_file",
",",
"'rt'",
")",
"else",
":",
"fh",
"=",
"io",
".",
"open",
"(",
"json_file",
",",
"mode",
"=",
"'rt'",
",",
"encoding",
"=",
"'utf8'",
")",
"for",
"line",
"in",
"fh",
":",
"try",
":",
"jj",
"=",
"json",
".",
"loads",
"(",
"line",
")",
"if",
"type",
"(",
"jj",
")",
"is",
"not",
"list",
":",
"jj",
"=",
"[",
"jj",
"]",
"for",
"j",
"in",
"jj",
":",
"if",
"include_date",
":",
"yield",
"(",
"j",
"[",
"'user'",
"]",
"[",
"'screen_name'",
"]",
".",
"lower",
"(",
")",
",",
"j",
"[",
"'text'",
"]",
",",
"j",
"[",
"'created_at'",
"]",
")",
"else",
":",
"if",
"'full_text'",
"in",
"j",
":",
"# get untruncated text if available.",
"yield",
"(",
"j",
"[",
"'user'",
"]",
"[",
"'screen_name'",
"]",
".",
"lower",
"(",
")",
",",
"j",
"[",
"'full_text'",
"]",
")",
"else",
":",
"yield",
"(",
"j",
"[",
"'user'",
"]",
"[",
"'screen_name'",
"]",
".",
"lower",
"(",
")",
",",
"j",
"[",
"'text'",
"]",
")",
"except",
"Exception",
"as",
"e",
":",
"sys",
".",
"stderr",
".",
"write",
"(",
"'skipping json error: %s\\n'",
"%",
"e",
")"
] | Yield screen_name, text tuples from a json file. | [
"Yield",
"screen_name",
"text",
"tuples",
"from",
"a",
"json",
"file",
"."
] | 40a5a5333cf704182c8666d1fbbbdadc7ff88546 | https://github.com/tapilab/brandelion/blob/40a5a5333cf704182c8666d1fbbbdadc7ff88546/brandelion/cli/analyze.py#L50-L71 | train |
tapilab/brandelion | brandelion/cli/analyze.py | extract_tweets | def extract_tweets(json_file):
""" Yield screen_name, string tuples, where the string is the
concatenation of all tweets of this user. """
for screen_name, tweet_iter in groupby(parse_json(json_file), lambda x: x[0]):
tweets = [t[1] for t in tweet_iter]
yield screen_name, ' '.join(tweets) | python | def extract_tweets(json_file):
""" Yield screen_name, string tuples, where the string is the
concatenation of all tweets of this user. """
for screen_name, tweet_iter in groupby(parse_json(json_file), lambda x: x[0]):
tweets = [t[1] for t in tweet_iter]
yield screen_name, ' '.join(tweets) | [
"def",
"extract_tweets",
"(",
"json_file",
")",
":",
"for",
"screen_name",
",",
"tweet_iter",
"in",
"groupby",
"(",
"parse_json",
"(",
"json_file",
")",
",",
"lambda",
"x",
":",
"x",
"[",
"0",
"]",
")",
":",
"tweets",
"=",
"[",
"t",
"[",
"1",
"]",
"for",
"t",
"in",
"tweet_iter",
"]",
"yield",
"screen_name",
",",
"' '",
".",
"join",
"(",
"tweets",
")"
] | Yield screen_name, string tuples, where the string is the
concatenation of all tweets of this user. | [
"Yield",
"screen_name",
"string",
"tuples",
"where",
"the",
"string",
"is",
"the",
"concatenation",
"of",
"all",
"tweets",
"of",
"this",
"user",
"."
] | 40a5a5333cf704182c8666d1fbbbdadc7ff88546 | https://github.com/tapilab/brandelion/blob/40a5a5333cf704182c8666d1fbbbdadc7ff88546/brandelion/cli/analyze.py#L74-L79 | train |
tapilab/brandelion | brandelion/cli/analyze.py | vectorize | def vectorize(json_file, vec, dofit=True):
""" Return a matrix where each row corresponds to a Twitter account, and
each column corresponds to the number of times a term is used by that
account. """
## CountVectorizer, efficiently.
screen_names = [x[0] for x in extract_tweets(json_file)]
if dofit:
X = vec.fit_transform(x[1] for x in extract_tweets(json_file))
else:
X = vec.transform(x[1] for x in extract_tweets(json_file))
return screen_names, X | python | def vectorize(json_file, vec, dofit=True):
""" Return a matrix where each row corresponds to a Twitter account, and
each column corresponds to the number of times a term is used by that
account. """
## CountVectorizer, efficiently.
screen_names = [x[0] for x in extract_tweets(json_file)]
if dofit:
X = vec.fit_transform(x[1] for x in extract_tweets(json_file))
else:
X = vec.transform(x[1] for x in extract_tweets(json_file))
return screen_names, X | [
"def",
"vectorize",
"(",
"json_file",
",",
"vec",
",",
"dofit",
"=",
"True",
")",
":",
"## CountVectorizer, efficiently.",
"screen_names",
"=",
"[",
"x",
"[",
"0",
"]",
"for",
"x",
"in",
"extract_tweets",
"(",
"json_file",
")",
"]",
"if",
"dofit",
":",
"X",
"=",
"vec",
".",
"fit_transform",
"(",
"x",
"[",
"1",
"]",
"for",
"x",
"in",
"extract_tweets",
"(",
"json_file",
")",
")",
"else",
":",
"X",
"=",
"vec",
".",
"transform",
"(",
"x",
"[",
"1",
"]",
"for",
"x",
"in",
"extract_tweets",
"(",
"json_file",
")",
")",
"return",
"screen_names",
",",
"X"
] | Return a matrix where each row corresponds to a Twitter account, and
each column corresponds to the number of times a term is used by that
account. | [
"Return",
"a",
"matrix",
"where",
"each",
"row",
"corresponds",
"to",
"a",
"Twitter",
"account",
"and",
"each",
"column",
"corresponds",
"to",
"the",
"number",
"of",
"times",
"a",
"term",
"is",
"used",
"by",
"that",
"account",
"."
] | 40a5a5333cf704182c8666d1fbbbdadc7ff88546 | https://github.com/tapilab/brandelion/blob/40a5a5333cf704182c8666d1fbbbdadc7ff88546/brandelion/cli/analyze.py#L99-L109 | train |
tapilab/brandelion | brandelion/cli/analyze.py | read_follower_file | def read_follower_file(fname, min_followers=0, max_followers=1e10, blacklist=set()):
""" Read a file of follower information and return a dictionary mapping screen_name to a set of follower ids. """
result = {}
with open(fname, 'rt') as f:
for line in f:
parts = line.split()
if len(parts) > 3:
if parts[1].lower() not in blacklist:
followers = set(int(x) for x in parts[2:])
if len(followers) > min_followers and len(followers) <= max_followers:
result[parts[1].lower()] = followers
else:
print('skipping exemplar', parts[1].lower())
return result | python | def read_follower_file(fname, min_followers=0, max_followers=1e10, blacklist=set()):
""" Read a file of follower information and return a dictionary mapping screen_name to a set of follower ids. """
result = {}
with open(fname, 'rt') as f:
for line in f:
parts = line.split()
if len(parts) > 3:
if parts[1].lower() not in blacklist:
followers = set(int(x) for x in parts[2:])
if len(followers) > min_followers and len(followers) <= max_followers:
result[parts[1].lower()] = followers
else:
print('skipping exemplar', parts[1].lower())
return result | [
"def",
"read_follower_file",
"(",
"fname",
",",
"min_followers",
"=",
"0",
",",
"max_followers",
"=",
"1e10",
",",
"blacklist",
"=",
"set",
"(",
")",
")",
":",
"result",
"=",
"{",
"}",
"with",
"open",
"(",
"fname",
",",
"'rt'",
")",
"as",
"f",
":",
"for",
"line",
"in",
"f",
":",
"parts",
"=",
"line",
".",
"split",
"(",
")",
"if",
"len",
"(",
"parts",
")",
">",
"3",
":",
"if",
"parts",
"[",
"1",
"]",
".",
"lower",
"(",
")",
"not",
"in",
"blacklist",
":",
"followers",
"=",
"set",
"(",
"int",
"(",
"x",
")",
"for",
"x",
"in",
"parts",
"[",
"2",
":",
"]",
")",
"if",
"len",
"(",
"followers",
")",
">",
"min_followers",
"and",
"len",
"(",
"followers",
")",
"<=",
"max_followers",
":",
"result",
"[",
"parts",
"[",
"1",
"]",
".",
"lower",
"(",
")",
"]",
"=",
"followers",
"else",
":",
"print",
"(",
"'skipping exemplar'",
",",
"parts",
"[",
"1",
"]",
".",
"lower",
"(",
")",
")",
"return",
"result"
] | Read a file of follower information and return a dictionary mapping screen_name to a set of follower ids. | [
"Read",
"a",
"file",
"of",
"follower",
"information",
"and",
"return",
"a",
"dictionary",
"mapping",
"screen_name",
"to",
"a",
"set",
"of",
"follower",
"ids",
"."
] | 40a5a5333cf704182c8666d1fbbbdadc7ff88546 | https://github.com/tapilab/brandelion/blob/40a5a5333cf704182c8666d1fbbbdadc7ff88546/brandelion/cli/analyze.py#L171-L184 | train |
tapilab/brandelion | brandelion/cli/analyze.py | jaccard_merge | def jaccard_merge(brands, exemplars):
""" Return the average Jaccard similarity between a brand's followers and
the followers of each exemplar. We merge all exemplar followers into one
big pseudo-account."""
scores = {}
exemplar_followers = set()
for followers in exemplars.values():
exemplar_followers |= followers
for brand, followers in brands:
scores[brand] = _jaccard(followers, exemplar_followers)
return scores | python | def jaccard_merge(brands, exemplars):
""" Return the average Jaccard similarity between a brand's followers and
the followers of each exemplar. We merge all exemplar followers into one
big pseudo-account."""
scores = {}
exemplar_followers = set()
for followers in exemplars.values():
exemplar_followers |= followers
for brand, followers in brands:
scores[brand] = _jaccard(followers, exemplar_followers)
return scores | [
"def",
"jaccard_merge",
"(",
"brands",
",",
"exemplars",
")",
":",
"scores",
"=",
"{",
"}",
"exemplar_followers",
"=",
"set",
"(",
")",
"for",
"followers",
"in",
"exemplars",
".",
"values",
"(",
")",
":",
"exemplar_followers",
"|=",
"followers",
"for",
"brand",
",",
"followers",
"in",
"brands",
":",
"scores",
"[",
"brand",
"]",
"=",
"_jaccard",
"(",
"followers",
",",
"exemplar_followers",
")",
"return",
"scores"
] | Return the average Jaccard similarity between a brand's followers and
the followers of each exemplar. We merge all exemplar followers into one
big pseudo-account. | [
"Return",
"the",
"average",
"Jaccard",
"similarity",
"between",
"a",
"brand",
"s",
"followers",
"and",
"the",
"followers",
"of",
"each",
"exemplar",
".",
"We",
"merge",
"all",
"exemplar",
"followers",
"into",
"one",
"big",
"pseudo",
"-",
"account",
"."
] | 40a5a5333cf704182c8666d1fbbbdadc7ff88546 | https://github.com/tapilab/brandelion/blob/40a5a5333cf704182c8666d1fbbbdadc7ff88546/brandelion/cli/analyze.py#L235-L246 | train |
tapilab/brandelion | brandelion/cli/analyze.py | cosine | def cosine(brands, exemplars, weighted_avg=False, sqrt=False):
"""
Return the cosine similarity betwee a brand's followers and the exemplars.
"""
scores = {}
for brand, followers in brands:
if weighted_avg:
scores[brand] = np.average([_cosine(followers, others) for others in exemplars.values()],
weights=[1. / len(others) for others in exemplars.values()])
else:
scores[brand] = 1. * sum(_cosine(followers, others) for others in exemplars.values()) / len(exemplars)
if sqrt:
scores = dict([(b, math.sqrt(s)) for b, s in scores.items()])
return scores | python | def cosine(brands, exemplars, weighted_avg=False, sqrt=False):
"""
Return the cosine similarity betwee a brand's followers and the exemplars.
"""
scores = {}
for brand, followers in brands:
if weighted_avg:
scores[brand] = np.average([_cosine(followers, others) for others in exemplars.values()],
weights=[1. / len(others) for others in exemplars.values()])
else:
scores[brand] = 1. * sum(_cosine(followers, others) for others in exemplars.values()) / len(exemplars)
if sqrt:
scores = dict([(b, math.sqrt(s)) for b, s in scores.items()])
return scores | [
"def",
"cosine",
"(",
"brands",
",",
"exemplars",
",",
"weighted_avg",
"=",
"False",
",",
"sqrt",
"=",
"False",
")",
":",
"scores",
"=",
"{",
"}",
"for",
"brand",
",",
"followers",
"in",
"brands",
":",
"if",
"weighted_avg",
":",
"scores",
"[",
"brand",
"]",
"=",
"np",
".",
"average",
"(",
"[",
"_cosine",
"(",
"followers",
",",
"others",
")",
"for",
"others",
"in",
"exemplars",
".",
"values",
"(",
")",
"]",
",",
"weights",
"=",
"[",
"1.",
"/",
"len",
"(",
"others",
")",
"for",
"others",
"in",
"exemplars",
".",
"values",
"(",
")",
"]",
")",
"else",
":",
"scores",
"[",
"brand",
"]",
"=",
"1.",
"*",
"sum",
"(",
"_cosine",
"(",
"followers",
",",
"others",
")",
"for",
"others",
"in",
"exemplars",
".",
"values",
"(",
")",
")",
"/",
"len",
"(",
"exemplars",
")",
"if",
"sqrt",
":",
"scores",
"=",
"dict",
"(",
"[",
"(",
"b",
",",
"math",
".",
"sqrt",
"(",
"s",
")",
")",
"for",
"b",
",",
"s",
"in",
"scores",
".",
"items",
"(",
")",
"]",
")",
"return",
"scores"
] | Return the cosine similarity betwee a brand's followers and the exemplars. | [
"Return",
"the",
"cosine",
"similarity",
"betwee",
"a",
"brand",
"s",
"followers",
"and",
"the",
"exemplars",
"."
] | 40a5a5333cf704182c8666d1fbbbdadc7ff88546 | https://github.com/tapilab/brandelion/blob/40a5a5333cf704182c8666d1fbbbdadc7ff88546/brandelion/cli/analyze.py#L319-L332 | train |
thebigmunch/google-music-utils | src/google_music_utils/misc.py | suggest_filename | def suggest_filename(metadata):
"""Generate a filename like Google for a song based on metadata.
Parameters:
metadata (~collections.abc.Mapping): A metadata dict.
Returns:
str: A filename string without an extension.
"""
if 'title' in metadata and 'track_number' in metadata: # Music Manager.
suggested_filename = f"{metadata['track_number']:0>2} {metadata['title']}"
elif 'title' in metadata and 'trackNumber' in metadata: # Mobile.
suggested_filename = f"{metadata['trackNumber']:0>2} {metadata['title']}"
elif 'title' in metadata and 'tracknumber' in metadata: # audio-metadata/mutagen.
track_number = _split_number_field(
list_to_single_value(
metadata['tracknumber']
)
)
title = list_to_single_value(metadata['title'])
suggested_filename = f"{track_number:0>2} {title}"
else:
suggested_filename = f"00 {list_to_single_value(metadata.get('title', ['']))}"
return _replace_invalid_characters(suggested_filename) | python | def suggest_filename(metadata):
"""Generate a filename like Google for a song based on metadata.
Parameters:
metadata (~collections.abc.Mapping): A metadata dict.
Returns:
str: A filename string without an extension.
"""
if 'title' in metadata and 'track_number' in metadata: # Music Manager.
suggested_filename = f"{metadata['track_number']:0>2} {metadata['title']}"
elif 'title' in metadata and 'trackNumber' in metadata: # Mobile.
suggested_filename = f"{metadata['trackNumber']:0>2} {metadata['title']}"
elif 'title' in metadata and 'tracknumber' in metadata: # audio-metadata/mutagen.
track_number = _split_number_field(
list_to_single_value(
metadata['tracknumber']
)
)
title = list_to_single_value(metadata['title'])
suggested_filename = f"{track_number:0>2} {title}"
else:
suggested_filename = f"00 {list_to_single_value(metadata.get('title', ['']))}"
return _replace_invalid_characters(suggested_filename) | [
"def",
"suggest_filename",
"(",
"metadata",
")",
":",
"if",
"'title'",
"in",
"metadata",
"and",
"'track_number'",
"in",
"metadata",
":",
"# Music Manager.",
"suggested_filename",
"=",
"f\"{metadata['track_number']:0>2} {metadata['title']}\"",
"elif",
"'title'",
"in",
"metadata",
"and",
"'trackNumber'",
"in",
"metadata",
":",
"# Mobile.",
"suggested_filename",
"=",
"f\"{metadata['trackNumber']:0>2} {metadata['title']}\"",
"elif",
"'title'",
"in",
"metadata",
"and",
"'tracknumber'",
"in",
"metadata",
":",
"# audio-metadata/mutagen.",
"track_number",
"=",
"_split_number_field",
"(",
"list_to_single_value",
"(",
"metadata",
"[",
"'tracknumber'",
"]",
")",
")",
"title",
"=",
"list_to_single_value",
"(",
"metadata",
"[",
"'title'",
"]",
")",
"suggested_filename",
"=",
"f\"{track_number:0>2} {title}\"",
"else",
":",
"suggested_filename",
"=",
"f\"00 {list_to_single_value(metadata.get('title', ['']))}\"",
"return",
"_replace_invalid_characters",
"(",
"suggested_filename",
")"
] | Generate a filename like Google for a song based on metadata.
Parameters:
metadata (~collections.abc.Mapping): A metadata dict.
Returns:
str: A filename string without an extension. | [
"Generate",
"a",
"filename",
"like",
"Google",
"for",
"a",
"song",
"based",
"on",
"metadata",
"."
] | 2e8873defe7d5aab7321b9d5ec8a80d72687578e | https://github.com/thebigmunch/google-music-utils/blob/2e8873defe7d5aab7321b9d5ec8a80d72687578e/src/google_music_utils/misc.py#L25-L51 | train |
thebigmunch/google-music-utils | src/google_music_utils/misc.py | template_to_filepath | def template_to_filepath(template, metadata, template_patterns=None):
"""Create directory structure and file name based on metadata template.
Note:
A template meant to be a base directory for suggested
names should have a trailing slash or backslash.
Parameters:
template (str or ~os.PathLike): A filepath which can include template patterns as defined by :param template_patterns:.
metadata (~collections.abc.Mapping): A metadata dict.
template_patterns (~collections.abc.Mapping): A dict of ``pattern: field`` pairs used to replace patterns with metadata field values.
Default: :const:`~google_music_utils.constants.TEMPLATE_PATTERNS`
Returns:
~pathlib.Path: A filepath.
"""
path = Path(template)
if template_patterns is None:
template_patterns = TEMPLATE_PATTERNS
suggested_filename = suggest_filename(metadata)
if (
path == Path.cwd()
or path == Path('%suggested%')
):
filepath = Path(suggested_filename)
elif any(template_pattern in path.parts for template_pattern in template_patterns):
if template.endswith(('/', '\\')):
template += suggested_filename
path = Path(template.replace('%suggested%', suggested_filename))
parts = []
for part in path.parts:
if part == path.anchor:
parts.append(part)
else:
for key in template_patterns:
if ( # pragma: no branch
key in part
and any(field in metadata for field in template_patterns[key])
):
field = more_itertools.first_true(
template_patterns[key],
pred=lambda k: k in metadata
)
if key.startswith(('%disc', '%track')):
number = _split_number_field(
str(
list_to_single_value(
metadata[field]
)
)
)
if key.endswith('2%'):
metadata[field] = number.zfill(2)
else:
metadata[field] = number
part = part.replace(
key,
list_to_single_value(
metadata[field]
)
)
parts.append(_replace_invalid_characters(part))
filepath = Path(*parts)
elif '%suggested%' in template:
filepath = Path(template.replace('%suggested%', suggested_filename))
elif template.endswith(('/', '\\')):
filepath = path / suggested_filename
else:
filepath = path
return filepath | python | def template_to_filepath(template, metadata, template_patterns=None):
"""Create directory structure and file name based on metadata template.
Note:
A template meant to be a base directory for suggested
names should have a trailing slash or backslash.
Parameters:
template (str or ~os.PathLike): A filepath which can include template patterns as defined by :param template_patterns:.
metadata (~collections.abc.Mapping): A metadata dict.
template_patterns (~collections.abc.Mapping): A dict of ``pattern: field`` pairs used to replace patterns with metadata field values.
Default: :const:`~google_music_utils.constants.TEMPLATE_PATTERNS`
Returns:
~pathlib.Path: A filepath.
"""
path = Path(template)
if template_patterns is None:
template_patterns = TEMPLATE_PATTERNS
suggested_filename = suggest_filename(metadata)
if (
path == Path.cwd()
or path == Path('%suggested%')
):
filepath = Path(suggested_filename)
elif any(template_pattern in path.parts for template_pattern in template_patterns):
if template.endswith(('/', '\\')):
template += suggested_filename
path = Path(template.replace('%suggested%', suggested_filename))
parts = []
for part in path.parts:
if part == path.anchor:
parts.append(part)
else:
for key in template_patterns:
if ( # pragma: no branch
key in part
and any(field in metadata for field in template_patterns[key])
):
field = more_itertools.first_true(
template_patterns[key],
pred=lambda k: k in metadata
)
if key.startswith(('%disc', '%track')):
number = _split_number_field(
str(
list_to_single_value(
metadata[field]
)
)
)
if key.endswith('2%'):
metadata[field] = number.zfill(2)
else:
metadata[field] = number
part = part.replace(
key,
list_to_single_value(
metadata[field]
)
)
parts.append(_replace_invalid_characters(part))
filepath = Path(*parts)
elif '%suggested%' in template:
filepath = Path(template.replace('%suggested%', suggested_filename))
elif template.endswith(('/', '\\')):
filepath = path / suggested_filename
else:
filepath = path
return filepath | [
"def",
"template_to_filepath",
"(",
"template",
",",
"metadata",
",",
"template_patterns",
"=",
"None",
")",
":",
"path",
"=",
"Path",
"(",
"template",
")",
"if",
"template_patterns",
"is",
"None",
":",
"template_patterns",
"=",
"TEMPLATE_PATTERNS",
"suggested_filename",
"=",
"suggest_filename",
"(",
"metadata",
")",
"if",
"(",
"path",
"==",
"Path",
".",
"cwd",
"(",
")",
"or",
"path",
"==",
"Path",
"(",
"'%suggested%'",
")",
")",
":",
"filepath",
"=",
"Path",
"(",
"suggested_filename",
")",
"elif",
"any",
"(",
"template_pattern",
"in",
"path",
".",
"parts",
"for",
"template_pattern",
"in",
"template_patterns",
")",
":",
"if",
"template",
".",
"endswith",
"(",
"(",
"'/'",
",",
"'\\\\'",
")",
")",
":",
"template",
"+=",
"suggested_filename",
"path",
"=",
"Path",
"(",
"template",
".",
"replace",
"(",
"'%suggested%'",
",",
"suggested_filename",
")",
")",
"parts",
"=",
"[",
"]",
"for",
"part",
"in",
"path",
".",
"parts",
":",
"if",
"part",
"==",
"path",
".",
"anchor",
":",
"parts",
".",
"append",
"(",
"part",
")",
"else",
":",
"for",
"key",
"in",
"template_patterns",
":",
"if",
"(",
"# pragma: no branch",
"key",
"in",
"part",
"and",
"any",
"(",
"field",
"in",
"metadata",
"for",
"field",
"in",
"template_patterns",
"[",
"key",
"]",
")",
")",
":",
"field",
"=",
"more_itertools",
".",
"first_true",
"(",
"template_patterns",
"[",
"key",
"]",
",",
"pred",
"=",
"lambda",
"k",
":",
"k",
"in",
"metadata",
")",
"if",
"key",
".",
"startswith",
"(",
"(",
"'%disc'",
",",
"'%track'",
")",
")",
":",
"number",
"=",
"_split_number_field",
"(",
"str",
"(",
"list_to_single_value",
"(",
"metadata",
"[",
"field",
"]",
")",
")",
")",
"if",
"key",
".",
"endswith",
"(",
"'2%'",
")",
":",
"metadata",
"[",
"field",
"]",
"=",
"number",
".",
"zfill",
"(",
"2",
")",
"else",
":",
"metadata",
"[",
"field",
"]",
"=",
"number",
"part",
"=",
"part",
".",
"replace",
"(",
"key",
",",
"list_to_single_value",
"(",
"metadata",
"[",
"field",
"]",
")",
")",
"parts",
".",
"append",
"(",
"_replace_invalid_characters",
"(",
"part",
")",
")",
"filepath",
"=",
"Path",
"(",
"*",
"parts",
")",
"elif",
"'%suggested%'",
"in",
"template",
":",
"filepath",
"=",
"Path",
"(",
"template",
".",
"replace",
"(",
"'%suggested%'",
",",
"suggested_filename",
")",
")",
"elif",
"template",
".",
"endswith",
"(",
"(",
"'/'",
",",
"'\\\\'",
")",
")",
":",
"filepath",
"=",
"path",
"/",
"suggested_filename",
"else",
":",
"filepath",
"=",
"path",
"return",
"filepath"
] | Create directory structure and file name based on metadata template.
Note:
A template meant to be a base directory for suggested
names should have a trailing slash or backslash.
Parameters:
template (str or ~os.PathLike): A filepath which can include template patterns as defined by :param template_patterns:.
metadata (~collections.abc.Mapping): A metadata dict.
template_patterns (~collections.abc.Mapping): A dict of ``pattern: field`` pairs used to replace patterns with metadata field values.
Default: :const:`~google_music_utils.constants.TEMPLATE_PATTERNS`
Returns:
~pathlib.Path: A filepath. | [
"Create",
"directory",
"structure",
"and",
"file",
"name",
"based",
"on",
"metadata",
"template",
"."
] | 2e8873defe7d5aab7321b9d5ec8a80d72687578e | https://github.com/thebigmunch/google-music-utils/blob/2e8873defe7d5aab7321b9d5ec8a80d72687578e/src/google_music_utils/misc.py#L54-L138 | train |
thebigmunch/google-music-utils | src/google_music_utils/filter.py | _match_field | def _match_field(field_value, pattern, ignore_case=False, normalize_values=False):
"""Match an item metadata field value by pattern.
Note:
Metadata values are lowercased when ``normalized_values`` is ``True``,
so ``ignore_case`` is automatically set to ``True``.
Parameters:
field_value (list or str): A metadata field value to check.
pattern (str): A regex pattern to check the field value(s) against.
ignore_case (bool): Perform case-insensitive matching.
Default: ``False``
normalize_values (bool): Normalize metadata values to remove common differences between sources.
Default: ``False``
Returns:
bool: True if matched, False if not.
"""
if normalize_values:
ignore_case = True
normalize = normalize_value if normalize_values else lambda x: str(x)
search = functools.partial(re.search, flags=re.I) if ignore_case else re.search
# audio_metadata fields contain a list of values.
if isinstance(field_value, list):
return any(search(pattern, normalize(value)) for value in field_value)
else:
return search(pattern, normalize(field_value)) | python | def _match_field(field_value, pattern, ignore_case=False, normalize_values=False):
"""Match an item metadata field value by pattern.
Note:
Metadata values are lowercased when ``normalized_values`` is ``True``,
so ``ignore_case`` is automatically set to ``True``.
Parameters:
field_value (list or str): A metadata field value to check.
pattern (str): A regex pattern to check the field value(s) against.
ignore_case (bool): Perform case-insensitive matching.
Default: ``False``
normalize_values (bool): Normalize metadata values to remove common differences between sources.
Default: ``False``
Returns:
bool: True if matched, False if not.
"""
if normalize_values:
ignore_case = True
normalize = normalize_value if normalize_values else lambda x: str(x)
search = functools.partial(re.search, flags=re.I) if ignore_case else re.search
# audio_metadata fields contain a list of values.
if isinstance(field_value, list):
return any(search(pattern, normalize(value)) for value in field_value)
else:
return search(pattern, normalize(field_value)) | [
"def",
"_match_field",
"(",
"field_value",
",",
"pattern",
",",
"ignore_case",
"=",
"False",
",",
"normalize_values",
"=",
"False",
")",
":",
"if",
"normalize_values",
":",
"ignore_case",
"=",
"True",
"normalize",
"=",
"normalize_value",
"if",
"normalize_values",
"else",
"lambda",
"x",
":",
"str",
"(",
"x",
")",
"search",
"=",
"functools",
".",
"partial",
"(",
"re",
".",
"search",
",",
"flags",
"=",
"re",
".",
"I",
")",
"if",
"ignore_case",
"else",
"re",
".",
"search",
"# audio_metadata fields contain a list of values.",
"if",
"isinstance",
"(",
"field_value",
",",
"list",
")",
":",
"return",
"any",
"(",
"search",
"(",
"pattern",
",",
"normalize",
"(",
"value",
")",
")",
"for",
"value",
"in",
"field_value",
")",
"else",
":",
"return",
"search",
"(",
"pattern",
",",
"normalize",
"(",
"field_value",
")",
")"
] | Match an item metadata field value by pattern.
Note:
Metadata values are lowercased when ``normalized_values`` is ``True``,
so ``ignore_case`` is automatically set to ``True``.
Parameters:
field_value (list or str): A metadata field value to check.
pattern (str): A regex pattern to check the field value(s) against.
ignore_case (bool): Perform case-insensitive matching.
Default: ``False``
normalize_values (bool): Normalize metadata values to remove common differences between sources.
Default: ``False``
Returns:
bool: True if matched, False if not. | [
"Match",
"an",
"item",
"metadata",
"field",
"value",
"by",
"pattern",
"."
] | 2e8873defe7d5aab7321b9d5ec8a80d72687578e | https://github.com/thebigmunch/google-music-utils/blob/2e8873defe7d5aab7321b9d5ec8a80d72687578e/src/google_music_utils/filter.py#L14-L43 | train |
thebigmunch/google-music-utils | src/google_music_utils/filter.py | _match_item | def _match_item(item, any_all=any, ignore_case=False, normalize_values=False, **kwargs):
"""Match items by metadata.
Note:
Metadata values are lowercased when ``normalized_values`` is ``True``,
so ``ignore_case`` is automatically set to ``True``.
Parameters:
item (~collections.abc.Mapping, str, os.PathLike): Item dict or filepath.
any_all (callable): A callable to determine if any or all filters must match to match item.
Expected values :obj:`any` (default) or :obj:`all`.
ignore_case (bool): Perform case-insensitive matching.
Default: ``False``
normalize_values (bool): Normalize metadata values to remove common differences between sources.
Default: ``False``
kwargs (list): Lists of values to match the given metadata field.
Returns:
bool: True if matched, False if not.
"""
it = get_item_tags(item)
return any_all(
_match_field(
get_field(it, field), pattern, ignore_case=ignore_case, normalize_values=normalize_values
) for field, patterns in kwargs.items() for pattern in patterns
) | python | def _match_item(item, any_all=any, ignore_case=False, normalize_values=False, **kwargs):
"""Match items by metadata.
Note:
Metadata values are lowercased when ``normalized_values`` is ``True``,
so ``ignore_case`` is automatically set to ``True``.
Parameters:
item (~collections.abc.Mapping, str, os.PathLike): Item dict or filepath.
any_all (callable): A callable to determine if any or all filters must match to match item.
Expected values :obj:`any` (default) or :obj:`all`.
ignore_case (bool): Perform case-insensitive matching.
Default: ``False``
normalize_values (bool): Normalize metadata values to remove common differences between sources.
Default: ``False``
kwargs (list): Lists of values to match the given metadata field.
Returns:
bool: True if matched, False if not.
"""
it = get_item_tags(item)
return any_all(
_match_field(
get_field(it, field), pattern, ignore_case=ignore_case, normalize_values=normalize_values
) for field, patterns in kwargs.items() for pattern in patterns
) | [
"def",
"_match_item",
"(",
"item",
",",
"any_all",
"=",
"any",
",",
"ignore_case",
"=",
"False",
",",
"normalize_values",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
":",
"it",
"=",
"get_item_tags",
"(",
"item",
")",
"return",
"any_all",
"(",
"_match_field",
"(",
"get_field",
"(",
"it",
",",
"field",
")",
",",
"pattern",
",",
"ignore_case",
"=",
"ignore_case",
",",
"normalize_values",
"=",
"normalize_values",
")",
"for",
"field",
",",
"patterns",
"in",
"kwargs",
".",
"items",
"(",
")",
"for",
"pattern",
"in",
"patterns",
")"
] | Match items by metadata.
Note:
Metadata values are lowercased when ``normalized_values`` is ``True``,
so ``ignore_case`` is automatically set to ``True``.
Parameters:
item (~collections.abc.Mapping, str, os.PathLike): Item dict or filepath.
any_all (callable): A callable to determine if any or all filters must match to match item.
Expected values :obj:`any` (default) or :obj:`all`.
ignore_case (bool): Perform case-insensitive matching.
Default: ``False``
normalize_values (bool): Normalize metadata values to remove common differences between sources.
Default: ``False``
kwargs (list): Lists of values to match the given metadata field.
Returns:
bool: True if matched, False if not. | [
"Match",
"items",
"by",
"metadata",
"."
] | 2e8873defe7d5aab7321b9d5ec8a80d72687578e | https://github.com/thebigmunch/google-music-utils/blob/2e8873defe7d5aab7321b9d5ec8a80d72687578e/src/google_music_utils/filter.py#L46-L73 | train |
thebigmunch/google-music-utils | src/google_music_utils/filter.py | exclude_items | def exclude_items(items, any_all=any, ignore_case=False, normalize_values=False, **kwargs):
"""Exclude items by matching metadata.
Note:
Metadata values are lowercased when ``normalized_values`` is ``True``,
so ``ignore_case`` is automatically set to ``True``.
Parameters:
items (list): A list of item dicts or filepaths.
any_all (callable): A callable to determine if any or all filters must match to exclude items.
Expected values :obj:`any` (default) or :obj:`all`.
ignore_case (bool): Perform case-insensitive matching.
Default: ``False``
normalize_values (bool): Normalize metadata values to remove common differences between sources.
Default: ``False``
kwargs (list): Lists of values to match the given metadata field.
Yields:
dict: The next item to be included.
Example:
>>> from google_music_utils import exclude_items
>>> list(exclude_items(song_list, any_all=all, ignore_case=True, normalize_values=True, artist=['Beck'], album=['Golden Feelings']))
"""
if kwargs:
match = functools.partial(
_match_item, any_all=any_all, ignore_case=ignore_case, normalize_values=normalize_values, **kwargs
)
return filterfalse(match, items)
else:
return iter(items) | python | def exclude_items(items, any_all=any, ignore_case=False, normalize_values=False, **kwargs):
"""Exclude items by matching metadata.
Note:
Metadata values are lowercased when ``normalized_values`` is ``True``,
so ``ignore_case`` is automatically set to ``True``.
Parameters:
items (list): A list of item dicts or filepaths.
any_all (callable): A callable to determine if any or all filters must match to exclude items.
Expected values :obj:`any` (default) or :obj:`all`.
ignore_case (bool): Perform case-insensitive matching.
Default: ``False``
normalize_values (bool): Normalize metadata values to remove common differences between sources.
Default: ``False``
kwargs (list): Lists of values to match the given metadata field.
Yields:
dict: The next item to be included.
Example:
>>> from google_music_utils import exclude_items
>>> list(exclude_items(song_list, any_all=all, ignore_case=True, normalize_values=True, artist=['Beck'], album=['Golden Feelings']))
"""
if kwargs:
match = functools.partial(
_match_item, any_all=any_all, ignore_case=ignore_case, normalize_values=normalize_values, **kwargs
)
return filterfalse(match, items)
else:
return iter(items) | [
"def",
"exclude_items",
"(",
"items",
",",
"any_all",
"=",
"any",
",",
"ignore_case",
"=",
"False",
",",
"normalize_values",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"kwargs",
":",
"match",
"=",
"functools",
".",
"partial",
"(",
"_match_item",
",",
"any_all",
"=",
"any_all",
",",
"ignore_case",
"=",
"ignore_case",
",",
"normalize_values",
"=",
"normalize_values",
",",
"*",
"*",
"kwargs",
")",
"return",
"filterfalse",
"(",
"match",
",",
"items",
")",
"else",
":",
"return",
"iter",
"(",
"items",
")"
] | Exclude items by matching metadata.
Note:
Metadata values are lowercased when ``normalized_values`` is ``True``,
so ``ignore_case`` is automatically set to ``True``.
Parameters:
items (list): A list of item dicts or filepaths.
any_all (callable): A callable to determine if any or all filters must match to exclude items.
Expected values :obj:`any` (default) or :obj:`all`.
ignore_case (bool): Perform case-insensitive matching.
Default: ``False``
normalize_values (bool): Normalize metadata values to remove common differences between sources.
Default: ``False``
kwargs (list): Lists of values to match the given metadata field.
Yields:
dict: The next item to be included.
Example:
>>> from google_music_utils import exclude_items
>>> list(exclude_items(song_list, any_all=all, ignore_case=True, normalize_values=True, artist=['Beck'], album=['Golden Feelings'])) | [
"Exclude",
"items",
"by",
"matching",
"metadata",
"."
] | 2e8873defe7d5aab7321b9d5ec8a80d72687578e | https://github.com/thebigmunch/google-music-utils/blob/2e8873defe7d5aab7321b9d5ec8a80d72687578e/src/google_music_utils/filter.py#L76-L108 | train |
thebigmunch/google-music-utils | src/google_music_utils/filter.py | include_items | def include_items(items, any_all=any, ignore_case=False, normalize_values=False, **kwargs):
"""Include items by matching metadata.
Note:
Metadata values are lowercased when ``normalized_values`` is ``True``,
so ``ignore_case`` is automatically set to ``True``.
Parameters:
items (list): A list of item dicts or filepaths.
any_all (callable): A callable to determine if any or all filters must match to include items.
Expected values :obj:`any` (default) or :obj:`all`.
ignore_case (bool): Perform case-insensitive matching.
Default: ``False``
normalize_values (bool): Normalize metadata values to remove common differences between sources.
Default: ``False``
kwargs (list): Lists of values to match the given metadata field.
Yields:
dict: The next item to be included.
Example:
>>> from google_music_utils import exclude_items
>>> list(include_items(song_list, any_all=all, ignore_case=True, normalize_values=True, artist=['Beck'], album=['Odelay']))
"""
if kwargs:
match = functools.partial(
_match_item, any_all=any_all, ignore_case=ignore_case, normalize_values=normalize_values, **kwargs
)
return filter(match, items)
else:
return iter(items) | python | def include_items(items, any_all=any, ignore_case=False, normalize_values=False, **kwargs):
"""Include items by matching metadata.
Note:
Metadata values are lowercased when ``normalized_values`` is ``True``,
so ``ignore_case`` is automatically set to ``True``.
Parameters:
items (list): A list of item dicts or filepaths.
any_all (callable): A callable to determine if any or all filters must match to include items.
Expected values :obj:`any` (default) or :obj:`all`.
ignore_case (bool): Perform case-insensitive matching.
Default: ``False``
normalize_values (bool): Normalize metadata values to remove common differences between sources.
Default: ``False``
kwargs (list): Lists of values to match the given metadata field.
Yields:
dict: The next item to be included.
Example:
>>> from google_music_utils import exclude_items
>>> list(include_items(song_list, any_all=all, ignore_case=True, normalize_values=True, artist=['Beck'], album=['Odelay']))
"""
if kwargs:
match = functools.partial(
_match_item, any_all=any_all, ignore_case=ignore_case, normalize_values=normalize_values, **kwargs
)
return filter(match, items)
else:
return iter(items) | [
"def",
"include_items",
"(",
"items",
",",
"any_all",
"=",
"any",
",",
"ignore_case",
"=",
"False",
",",
"normalize_values",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"kwargs",
":",
"match",
"=",
"functools",
".",
"partial",
"(",
"_match_item",
",",
"any_all",
"=",
"any_all",
",",
"ignore_case",
"=",
"ignore_case",
",",
"normalize_values",
"=",
"normalize_values",
",",
"*",
"*",
"kwargs",
")",
"return",
"filter",
"(",
"match",
",",
"items",
")",
"else",
":",
"return",
"iter",
"(",
"items",
")"
] | Include items by matching metadata.
Note:
Metadata values are lowercased when ``normalized_values`` is ``True``,
so ``ignore_case`` is automatically set to ``True``.
Parameters:
items (list): A list of item dicts or filepaths.
any_all (callable): A callable to determine if any or all filters must match to include items.
Expected values :obj:`any` (default) or :obj:`all`.
ignore_case (bool): Perform case-insensitive matching.
Default: ``False``
normalize_values (bool): Normalize metadata values to remove common differences between sources.
Default: ``False``
kwargs (list): Lists of values to match the given metadata field.
Yields:
dict: The next item to be included.
Example:
>>> from google_music_utils import exclude_items
>>> list(include_items(song_list, any_all=all, ignore_case=True, normalize_values=True, artist=['Beck'], album=['Odelay'])) | [
"Include",
"items",
"by",
"matching",
"metadata",
"."
] | 2e8873defe7d5aab7321b9d5ec8a80d72687578e | https://github.com/thebigmunch/google-music-utils/blob/2e8873defe7d5aab7321b9d5ec8a80d72687578e/src/google_music_utils/filter.py#L111-L143 | train |
IRC-SPHERE/HyperStream | hyperstream/utils/statistics/percentile.py | percentile | def percentile(a, q):
"""
Compute the qth percentile of the data along the specified axis.
Simpler version than the numpy version that always flattens input arrays.
Examples
--------
>>> a = [[10, 7, 4], [3, 2, 1]]
>>> percentile(a, 20)
2.0
>>> percentile(a, 50)
3.5
>>> percentile(a, [20, 80])
[2.0, 7.0]
>>> a = list(range(40))
>>> percentile(a, 25)
9.75
:param a: Input array or object that can be converted to an array.
:param q: Percentile to compute, which must be between 0 and 100 inclusive.
:return: the qth percentile(s) of the array elements.
"""
if not a:
return None
if isinstance(q, (float, int)):
qq = [q]
elif isinstance(q, (tuple, list)):
qq = q
else:
raise ValueError("Quantile type {} not understood".format(type(q)))
if isinstance(a, (float, int)):
a = [a]
for i in range(len(qq)):
if qq[i] < 0. or qq[i] > 100.:
raise ValueError("Percentiles must be in the range [0,100]")
qq[i] /= 100.
a = sorted(flatten(a))
r = []
for q in qq:
k = (len(a) - 1) * q
f = math.floor(k)
c = math.ceil(k)
if f == c:
r.append(float(a[int(k)]))
continue
d0 = a[int(f)] * (c - k)
d1 = a[int(c)] * (k - f)
r.append(float(d0 + d1))
if len(r) == 1:
return r[0]
return r | python | def percentile(a, q):
"""
Compute the qth percentile of the data along the specified axis.
Simpler version than the numpy version that always flattens input arrays.
Examples
--------
>>> a = [[10, 7, 4], [3, 2, 1]]
>>> percentile(a, 20)
2.0
>>> percentile(a, 50)
3.5
>>> percentile(a, [20, 80])
[2.0, 7.0]
>>> a = list(range(40))
>>> percentile(a, 25)
9.75
:param a: Input array or object that can be converted to an array.
:param q: Percentile to compute, which must be between 0 and 100 inclusive.
:return: the qth percentile(s) of the array elements.
"""
if not a:
return None
if isinstance(q, (float, int)):
qq = [q]
elif isinstance(q, (tuple, list)):
qq = q
else:
raise ValueError("Quantile type {} not understood".format(type(q)))
if isinstance(a, (float, int)):
a = [a]
for i in range(len(qq)):
if qq[i] < 0. or qq[i] > 100.:
raise ValueError("Percentiles must be in the range [0,100]")
qq[i] /= 100.
a = sorted(flatten(a))
r = []
for q in qq:
k = (len(a) - 1) * q
f = math.floor(k)
c = math.ceil(k)
if f == c:
r.append(float(a[int(k)]))
continue
d0 = a[int(f)] * (c - k)
d1 = a[int(c)] * (k - f)
r.append(float(d0 + d1))
if len(r) == 1:
return r[0]
return r | [
"def",
"percentile",
"(",
"a",
",",
"q",
")",
":",
"if",
"not",
"a",
":",
"return",
"None",
"if",
"isinstance",
"(",
"q",
",",
"(",
"float",
",",
"int",
")",
")",
":",
"qq",
"=",
"[",
"q",
"]",
"elif",
"isinstance",
"(",
"q",
",",
"(",
"tuple",
",",
"list",
")",
")",
":",
"qq",
"=",
"q",
"else",
":",
"raise",
"ValueError",
"(",
"\"Quantile type {} not understood\"",
".",
"format",
"(",
"type",
"(",
"q",
")",
")",
")",
"if",
"isinstance",
"(",
"a",
",",
"(",
"float",
",",
"int",
")",
")",
":",
"a",
"=",
"[",
"a",
"]",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"qq",
")",
")",
":",
"if",
"qq",
"[",
"i",
"]",
"<",
"0.",
"or",
"qq",
"[",
"i",
"]",
">",
"100.",
":",
"raise",
"ValueError",
"(",
"\"Percentiles must be in the range [0,100]\"",
")",
"qq",
"[",
"i",
"]",
"/=",
"100.",
"a",
"=",
"sorted",
"(",
"flatten",
"(",
"a",
")",
")",
"r",
"=",
"[",
"]",
"for",
"q",
"in",
"qq",
":",
"k",
"=",
"(",
"len",
"(",
"a",
")",
"-",
"1",
")",
"*",
"q",
"f",
"=",
"math",
".",
"floor",
"(",
"k",
")",
"c",
"=",
"math",
".",
"ceil",
"(",
"k",
")",
"if",
"f",
"==",
"c",
":",
"r",
".",
"append",
"(",
"float",
"(",
"a",
"[",
"int",
"(",
"k",
")",
"]",
")",
")",
"continue",
"d0",
"=",
"a",
"[",
"int",
"(",
"f",
")",
"]",
"*",
"(",
"c",
"-",
"k",
")",
"d1",
"=",
"a",
"[",
"int",
"(",
"c",
")",
"]",
"*",
"(",
"k",
"-",
"f",
")",
"r",
".",
"append",
"(",
"float",
"(",
"d0",
"+",
"d1",
")",
")",
"if",
"len",
"(",
"r",
")",
"==",
"1",
":",
"return",
"r",
"[",
"0",
"]",
"return",
"r"
] | Compute the qth percentile of the data along the specified axis.
Simpler version than the numpy version that always flattens input arrays.
Examples
--------
>>> a = [[10, 7, 4], [3, 2, 1]]
>>> percentile(a, 20)
2.0
>>> percentile(a, 50)
3.5
>>> percentile(a, [20, 80])
[2.0, 7.0]
>>> a = list(range(40))
>>> percentile(a, 25)
9.75
:param a: Input array or object that can be converted to an array.
:param q: Percentile to compute, which must be between 0 and 100 inclusive.
:return: the qth percentile(s) of the array elements. | [
"Compute",
"the",
"qth",
"percentile",
"of",
"the",
"data",
"along",
"the",
"specified",
"axis",
".",
"Simpler",
"version",
"than",
"the",
"numpy",
"version",
"that",
"always",
"flattens",
"input",
"arrays",
"."
] | 98478f4d31ed938f4aa7c958ed0d4c3ffcb2e780 | https://github.com/IRC-SPHERE/HyperStream/blob/98478f4d31ed938f4aa7c958ed0d4c3ffcb2e780/hyperstream/utils/statistics/percentile.py#L33-L90 | train |
dgomes/pyipma | pyipma/station.py | Station._filter_closest | def _filter_closest(self, lat, lon, stations):
"""Helper to filter the closest station to a given location."""
current_location = (lat, lon)
closest = None
closest_distance = None
for station in stations:
station_loc = (station.latitude, station.longitude)
station_distance = distance.distance(current_location,
station_loc).km
if not closest or station_distance < closest_distance:
closest = station
closest_distance = station_distance
return closest | python | def _filter_closest(self, lat, lon, stations):
"""Helper to filter the closest station to a given location."""
current_location = (lat, lon)
closest = None
closest_distance = None
for station in stations:
station_loc = (station.latitude, station.longitude)
station_distance = distance.distance(current_location,
station_loc).km
if not closest or station_distance < closest_distance:
closest = station
closest_distance = station_distance
return closest | [
"def",
"_filter_closest",
"(",
"self",
",",
"lat",
",",
"lon",
",",
"stations",
")",
":",
"current_location",
"=",
"(",
"lat",
",",
"lon",
")",
"closest",
"=",
"None",
"closest_distance",
"=",
"None",
"for",
"station",
"in",
"stations",
":",
"station_loc",
"=",
"(",
"station",
".",
"latitude",
",",
"station",
".",
"longitude",
")",
"station_distance",
"=",
"distance",
".",
"distance",
"(",
"current_location",
",",
"station_loc",
")",
".",
"km",
"if",
"not",
"closest",
"or",
"station_distance",
"<",
"closest_distance",
":",
"closest",
"=",
"station",
"closest_distance",
"=",
"station_distance",
"return",
"closest"
] | Helper to filter the closest station to a given location. | [
"Helper",
"to",
"filter",
"the",
"closest",
"station",
"to",
"a",
"given",
"location",
"."
] | cd808abeb70dca0e336afdf55bef3f73973eaa71 | https://github.com/dgomes/pyipma/blob/cd808abeb70dca0e336afdf55bef3f73973eaa71/pyipma/station.py#L20-L34 | train |
dgomes/pyipma | pyipma/station.py | Station.get | async def get(cls, websession, lat, lon):
"""Retrieve the nearest station."""
self = Station(websession)
stations = await self.api.stations()
self.station = self._filter_closest(lat, lon, stations)
logger.info("Using %s as weather station", self.station.local)
return self | python | async def get(cls, websession, lat, lon):
"""Retrieve the nearest station."""
self = Station(websession)
stations = await self.api.stations()
self.station = self._filter_closest(lat, lon, stations)
logger.info("Using %s as weather station", self.station.local)
return self | [
"async",
"def",
"get",
"(",
"cls",
",",
"websession",
",",
"lat",
",",
"lon",
")",
":",
"self",
"=",
"Station",
"(",
"websession",
")",
"stations",
"=",
"await",
"self",
".",
"api",
".",
"stations",
"(",
")",
"self",
".",
"station",
"=",
"self",
".",
"_filter_closest",
"(",
"lat",
",",
"lon",
",",
"stations",
")",
"logger",
".",
"info",
"(",
"\"Using %s as weather station\"",
",",
"self",
".",
"station",
".",
"local",
")",
"return",
"self"
] | Retrieve the nearest station. | [
"Retrieve",
"the",
"nearest",
"station",
"."
] | cd808abeb70dca0e336afdf55bef3f73973eaa71 | https://github.com/dgomes/pyipma/blob/cd808abeb70dca0e336afdf55bef3f73973eaa71/pyipma/station.py#L37-L48 | train |
IRC-SPHERE/HyperStream | hyperstream/plugin_manager.py | Plugin.load_channels | def load_channels(self):
"""
Loads the channels and tools given the plugin path specified
:return: The loaded channels, including a tool channel, for the tools found.
"""
channels = []
# Try to get channels
for channel_name in self.channel_names:
channel_path = os.path.join(self.path, "channels")
sys.path.append(self.path)
mod = imp.load_module(channel_name, *imp.find_module(channel_name, [channel_path]))
cls = getattr(mod, channel_name.title().replace("_", ""))
channel_id = channel_name.split("_")[0]
# TODO: what about up_to_timestamp?
try:
channels.append(cls(channel_id, up_to_timestamp=None))
except TypeError:
channels.append(cls(channel_id))
# Try to get tools
if self.has_tools:
tool_path = os.path.join(self.path, "tools")
# Create a tool channel using this path
channel_id = self.channel_id_prefix + "_" + "tools"
channel = ToolChannel(channel_id, tool_path, up_to_timestamp=utcnow())
channels.append(channel)
if self.has_assets:
assset_path = os.path.join(os.path.abspath(self.path), "assets")
channel_id = self.channel_id_prefix + "_" + "assets"
channel = AssetsFileChannel(channel_id, assset_path, up_to_timestamp=utcnow())
channels.append(channel)
#
# from . import TimeInterval
# channel.streams.values()[0].window(TimeInterval.up_to_now()).items()
return channels | python | def load_channels(self):
"""
Loads the channels and tools given the plugin path specified
:return: The loaded channels, including a tool channel, for the tools found.
"""
channels = []
# Try to get channels
for channel_name in self.channel_names:
channel_path = os.path.join(self.path, "channels")
sys.path.append(self.path)
mod = imp.load_module(channel_name, *imp.find_module(channel_name, [channel_path]))
cls = getattr(mod, channel_name.title().replace("_", ""))
channel_id = channel_name.split("_")[0]
# TODO: what about up_to_timestamp?
try:
channels.append(cls(channel_id, up_to_timestamp=None))
except TypeError:
channels.append(cls(channel_id))
# Try to get tools
if self.has_tools:
tool_path = os.path.join(self.path, "tools")
# Create a tool channel using this path
channel_id = self.channel_id_prefix + "_" + "tools"
channel = ToolChannel(channel_id, tool_path, up_to_timestamp=utcnow())
channels.append(channel)
if self.has_assets:
assset_path = os.path.join(os.path.abspath(self.path), "assets")
channel_id = self.channel_id_prefix + "_" + "assets"
channel = AssetsFileChannel(channel_id, assset_path, up_to_timestamp=utcnow())
channels.append(channel)
#
# from . import TimeInterval
# channel.streams.values()[0].window(TimeInterval.up_to_now()).items()
return channels | [
"def",
"load_channels",
"(",
"self",
")",
":",
"channels",
"=",
"[",
"]",
"# Try to get channels",
"for",
"channel_name",
"in",
"self",
".",
"channel_names",
":",
"channel_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"path",
",",
"\"channels\"",
")",
"sys",
".",
"path",
".",
"append",
"(",
"self",
".",
"path",
")",
"mod",
"=",
"imp",
".",
"load_module",
"(",
"channel_name",
",",
"*",
"imp",
".",
"find_module",
"(",
"channel_name",
",",
"[",
"channel_path",
"]",
")",
")",
"cls",
"=",
"getattr",
"(",
"mod",
",",
"channel_name",
".",
"title",
"(",
")",
".",
"replace",
"(",
"\"_\"",
",",
"\"\"",
")",
")",
"channel_id",
"=",
"channel_name",
".",
"split",
"(",
"\"_\"",
")",
"[",
"0",
"]",
"# TODO: what about up_to_timestamp?",
"try",
":",
"channels",
".",
"append",
"(",
"cls",
"(",
"channel_id",
",",
"up_to_timestamp",
"=",
"None",
")",
")",
"except",
"TypeError",
":",
"channels",
".",
"append",
"(",
"cls",
"(",
"channel_id",
")",
")",
"# Try to get tools",
"if",
"self",
".",
"has_tools",
":",
"tool_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"path",
",",
"\"tools\"",
")",
"# Create a tool channel using this path",
"channel_id",
"=",
"self",
".",
"channel_id_prefix",
"+",
"\"_\"",
"+",
"\"tools\"",
"channel",
"=",
"ToolChannel",
"(",
"channel_id",
",",
"tool_path",
",",
"up_to_timestamp",
"=",
"utcnow",
"(",
")",
")",
"channels",
".",
"append",
"(",
"channel",
")",
"if",
"self",
".",
"has_assets",
":",
"assset_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"abspath",
"(",
"self",
".",
"path",
")",
",",
"\"assets\"",
")",
"channel_id",
"=",
"self",
".",
"channel_id_prefix",
"+",
"\"_\"",
"+",
"\"assets\"",
"channel",
"=",
"AssetsFileChannel",
"(",
"channel_id",
",",
"assset_path",
",",
"up_to_timestamp",
"=",
"utcnow",
"(",
")",
")",
"channels",
".",
"append",
"(",
"channel",
")",
"#",
"# from . import TimeInterval",
"# channel.streams.values()[0].window(TimeInterval.up_to_now()).items()",
"return",
"channels"
] | Loads the channels and tools given the plugin path specified
:return: The loaded channels, including a tool channel, for the tools found. | [
"Loads",
"the",
"channels",
"and",
"tools",
"given",
"the",
"plugin",
"path",
"specified"
] | 98478f4d31ed938f4aa7c958ed0d4c3ffcb2e780 | https://github.com/IRC-SPHERE/HyperStream/blob/98478f4d31ed938f4aa7c958ed0d4c3ffcb2e780/hyperstream/plugin_manager.py#L38-L76 | train |
nsfmc/swatch | swatch/writer.py | chunk_count | def chunk_count(swatch):
"""return the number of byte-chunks in a swatch object
this recursively walks the swatch list, returning 1 for a single color &
returns 2 for each folder plus 1 for each color it contains
"""
if type(swatch) is dict:
if 'data' in swatch:
return 1
if 'swatches' in swatch:
return 2 + len(swatch['swatches'])
else:
return sum(map(chunk_count, swatch)) | python | def chunk_count(swatch):
"""return the number of byte-chunks in a swatch object
this recursively walks the swatch list, returning 1 for a single color &
returns 2 for each folder plus 1 for each color it contains
"""
if type(swatch) is dict:
if 'data' in swatch:
return 1
if 'swatches' in swatch:
return 2 + len(swatch['swatches'])
else:
return sum(map(chunk_count, swatch)) | [
"def",
"chunk_count",
"(",
"swatch",
")",
":",
"if",
"type",
"(",
"swatch",
")",
"is",
"dict",
":",
"if",
"'data'",
"in",
"swatch",
":",
"return",
"1",
"if",
"'swatches'",
"in",
"swatch",
":",
"return",
"2",
"+",
"len",
"(",
"swatch",
"[",
"'swatches'",
"]",
")",
"else",
":",
"return",
"sum",
"(",
"map",
"(",
"chunk_count",
",",
"swatch",
")",
")"
] | return the number of byte-chunks in a swatch object
this recursively walks the swatch list, returning 1 for a single color &
returns 2 for each folder plus 1 for each color it contains | [
"return",
"the",
"number",
"of",
"byte",
"-",
"chunks",
"in",
"a",
"swatch",
"object"
] | 8654edf4f1aeef37d42211ff3fe6a3e9e4325859 | https://github.com/nsfmc/swatch/blob/8654edf4f1aeef37d42211ff3fe6a3e9e4325859/swatch/writer.py#L18-L30 | train |
nsfmc/swatch | swatch/writer.py | chunk_for_color | def chunk_for_color(obj):
"""builds up a byte-chunk for a color
the format for this is
b'\x00\x01' +
Big-Endian Unsigned Int == len(bytes that follow in this block)
• Big-Endian Unsigned Short == len(color_name)
in practice, because utf-16 takes up 2 bytes per letter
this will be 2 * (len(name) + 1)
so a color named 'foo' would be 8 bytes long
• UTF-16BE Encoded color_name terminated with '\0'
using 'foo', this yields '\x00f\x00o\x00o\x00\x00'
• A 4-byte char for Color mode ('RGB ', 'Gray', 'CMYK', 'LAB ')
note the trailing spaces
• a variable-length number of 4-byte length floats
this depends entirely on the color mode of the color.
• A Big-Endian short int for either a global, spot, or process color
global == 0, spot == 1, process == 2
the chunk has no terminating string although other sites have indicated
that the global/spot/process short is a terminator, it's actually used
to indicate how illustrator should deal with the color.
"""
title = obj['name'] + '\0'
title_length = len(title)
chunk = struct.pack('>H', title_length)
chunk += title.encode('utf-16be')
mode = obj['data']['mode'].encode()
values = obj['data']['values']
color_type = obj['type']
fmt = {b'RGB': '!fff', b'Gray': '!f', b'CMYK': '!ffff', b'LAB': '!fff'}
if mode in fmt:
padded_mode = mode.decode().ljust(4).encode()
chunk += struct.pack('!4s', padded_mode) # the color mode
chunk += struct.pack(fmt[mode], *values) # the color values
color_types = ['Global', 'Spot', 'Process']
if color_type in color_types:
color_int = color_types.index(color_type)
chunk += struct.pack('>h', color_int) # append swatch mode
chunk = struct.pack('>I', len(chunk)) + chunk # prepend the chunk size
return b'\x00\x01' + chunk | python | def chunk_for_color(obj):
"""builds up a byte-chunk for a color
the format for this is
b'\x00\x01' +
Big-Endian Unsigned Int == len(bytes that follow in this block)
• Big-Endian Unsigned Short == len(color_name)
in practice, because utf-16 takes up 2 bytes per letter
this will be 2 * (len(name) + 1)
so a color named 'foo' would be 8 bytes long
• UTF-16BE Encoded color_name terminated with '\0'
using 'foo', this yields '\x00f\x00o\x00o\x00\x00'
• A 4-byte char for Color mode ('RGB ', 'Gray', 'CMYK', 'LAB ')
note the trailing spaces
• a variable-length number of 4-byte length floats
this depends entirely on the color mode of the color.
• A Big-Endian short int for either a global, spot, or process color
global == 0, spot == 1, process == 2
the chunk has no terminating string although other sites have indicated
that the global/spot/process short is a terminator, it's actually used
to indicate how illustrator should deal with the color.
"""
title = obj['name'] + '\0'
title_length = len(title)
chunk = struct.pack('>H', title_length)
chunk += title.encode('utf-16be')
mode = obj['data']['mode'].encode()
values = obj['data']['values']
color_type = obj['type']
fmt = {b'RGB': '!fff', b'Gray': '!f', b'CMYK': '!ffff', b'LAB': '!fff'}
if mode in fmt:
padded_mode = mode.decode().ljust(4).encode()
chunk += struct.pack('!4s', padded_mode) # the color mode
chunk += struct.pack(fmt[mode], *values) # the color values
color_types = ['Global', 'Spot', 'Process']
if color_type in color_types:
color_int = color_types.index(color_type)
chunk += struct.pack('>h', color_int) # append swatch mode
chunk = struct.pack('>I', len(chunk)) + chunk # prepend the chunk size
return b'\x00\x01' + chunk | [
"def",
"chunk_for_color",
"(",
"obj",
")",
":",
"title",
"=",
"obj",
"[",
"'name'",
"]",
"+",
"'\\0'",
"title_length",
"=",
"len",
"(",
"title",
")",
"chunk",
"=",
"struct",
".",
"pack",
"(",
"'>H'",
",",
"title_length",
")",
"chunk",
"+=",
"title",
".",
"encode",
"(",
"'utf-16be'",
")",
"mode",
"=",
"obj",
"[",
"'data'",
"]",
"[",
"'mode'",
"]",
".",
"encode",
"(",
")",
"values",
"=",
"obj",
"[",
"'data'",
"]",
"[",
"'values'",
"]",
"color_type",
"=",
"obj",
"[",
"'type'",
"]",
"fmt",
"=",
"{",
"b'RGB'",
":",
"'!fff'",
",",
"b'Gray'",
":",
"'!f'",
",",
"b'CMYK'",
":",
"'!ffff'",
",",
"b'LAB'",
":",
"'!fff'",
"}",
"if",
"mode",
"in",
"fmt",
":",
"padded_mode",
"=",
"mode",
".",
"decode",
"(",
")",
".",
"ljust",
"(",
"4",
")",
".",
"encode",
"(",
")",
"chunk",
"+=",
"struct",
".",
"pack",
"(",
"'!4s'",
",",
"padded_mode",
")",
"# the color mode",
"chunk",
"+=",
"struct",
".",
"pack",
"(",
"fmt",
"[",
"mode",
"]",
",",
"*",
"values",
")",
"# the color values",
"color_types",
"=",
"[",
"'Global'",
",",
"'Spot'",
",",
"'Process'",
"]",
"if",
"color_type",
"in",
"color_types",
":",
"color_int",
"=",
"color_types",
".",
"index",
"(",
"color_type",
")",
"chunk",
"+=",
"struct",
".",
"pack",
"(",
"'>h'",
",",
"color_int",
")",
"# append swatch mode",
"chunk",
"=",
"struct",
".",
"pack",
"(",
"'>I'",
",",
"len",
"(",
"chunk",
")",
")",
"+",
"chunk",
"# prepend the chunk size",
"return",
"b'\\x00\\x01'",
"+",
"chunk"
] | builds up a byte-chunk for a color
the format for this is
b'\x00\x01' +
Big-Endian Unsigned Int == len(bytes that follow in this block)
• Big-Endian Unsigned Short == len(color_name)
in practice, because utf-16 takes up 2 bytes per letter
this will be 2 * (len(name) + 1)
so a color named 'foo' would be 8 bytes long
• UTF-16BE Encoded color_name terminated with '\0'
using 'foo', this yields '\x00f\x00o\x00o\x00\x00'
• A 4-byte char for Color mode ('RGB ', 'Gray', 'CMYK', 'LAB ')
note the trailing spaces
• a variable-length number of 4-byte length floats
this depends entirely on the color mode of the color.
• A Big-Endian short int for either a global, spot, or process color
global == 0, spot == 1, process == 2
the chunk has no terminating string although other sites have indicated
that the global/spot/process short is a terminator, it's actually used
to indicate how illustrator should deal with the color. | [
"builds",
"up",
"a",
"byte",
"-",
"chunk",
"for",
"a",
"color"
] | 8654edf4f1aeef37d42211ff3fe6a3e9e4325859 | https://github.com/nsfmc/swatch/blob/8654edf4f1aeef37d42211ff3fe6a3e9e4325859/swatch/writer.py#L39-L83 | train |
nsfmc/swatch | swatch/writer.py | chunk_for_folder | def chunk_for_folder(obj):
"""produce a byte-chunk for a folder of colors
the structure is very similar to a color's data:
• Header
b'\xC0\x01' +
Big Endian Unsigned Int == len(Bytes in the Header Block)
note _only_ the header, this doesn't include the length of color data
• Big Endian Unsigned Short == len(Folder Name + '\0')
Note that Folder Name is assumed to be utf-16be so this
will always be an even number
• Folder Name + '\0', encoded UTF-16BE
• body
chunks for each color, see chunk_for_color
• folder terminator
b'\xC0\x02' +
b'\x00\x00\x00\x00'
Perhaps the four null bytes represent something, but i'm pretty sure
they're just a terminating string, but there's something nice about
how the b'\xC0\x02' matches with the folder's header
"""
title = obj['name'] + '\0'
title_length = len(title)
chunk_body = struct.pack('>H', title_length) # title length
chunk_body += title.encode('utf-16be') # title
chunk_head = b'\xC0\x01' # folder header
chunk_head += struct.pack('>I', len(chunk_body))
# precede entire chunk by folder header and size of folder
chunk = chunk_head + chunk_body
chunk += b''.join([chunk_for_color(c) for c in obj['swatches']])
chunk += b'\xC0\x02' # folder terminator chunk
chunk += b'\x00\x00\x00\x00' # folder terminator
return chunk | python | def chunk_for_folder(obj):
"""produce a byte-chunk for a folder of colors
the structure is very similar to a color's data:
• Header
b'\xC0\x01' +
Big Endian Unsigned Int == len(Bytes in the Header Block)
note _only_ the header, this doesn't include the length of color data
• Big Endian Unsigned Short == len(Folder Name + '\0')
Note that Folder Name is assumed to be utf-16be so this
will always be an even number
• Folder Name + '\0', encoded UTF-16BE
• body
chunks for each color, see chunk_for_color
• folder terminator
b'\xC0\x02' +
b'\x00\x00\x00\x00'
Perhaps the four null bytes represent something, but i'm pretty sure
they're just a terminating string, but there's something nice about
how the b'\xC0\x02' matches with the folder's header
"""
title = obj['name'] + '\0'
title_length = len(title)
chunk_body = struct.pack('>H', title_length) # title length
chunk_body += title.encode('utf-16be') # title
chunk_head = b'\xC0\x01' # folder header
chunk_head += struct.pack('>I', len(chunk_body))
# precede entire chunk by folder header and size of folder
chunk = chunk_head + chunk_body
chunk += b''.join([chunk_for_color(c) for c in obj['swatches']])
chunk += b'\xC0\x02' # folder terminator chunk
chunk += b'\x00\x00\x00\x00' # folder terminator
return chunk | [
"def",
"chunk_for_folder",
"(",
"obj",
")",
":",
"title",
"=",
"obj",
"[",
"'name'",
"]",
"+",
"'\\0'",
"title_length",
"=",
"len",
"(",
"title",
")",
"chunk_body",
"=",
"struct",
".",
"pack",
"(",
"'>H'",
",",
"title_length",
")",
"# title length",
"chunk_body",
"+=",
"title",
".",
"encode",
"(",
"'utf-16be'",
")",
"# title",
"chunk_head",
"=",
"b'\\xC0\\x01'",
"# folder header",
"chunk_head",
"+=",
"struct",
".",
"pack",
"(",
"'>I'",
",",
"len",
"(",
"chunk_body",
")",
")",
"# precede entire chunk by folder header and size of folder",
"chunk",
"=",
"chunk_head",
"+",
"chunk_body",
"chunk",
"+=",
"b''",
".",
"join",
"(",
"[",
"chunk_for_color",
"(",
"c",
")",
"for",
"c",
"in",
"obj",
"[",
"'swatches'",
"]",
"]",
")",
"chunk",
"+=",
"b'\\xC0\\x02'",
"# folder terminator chunk",
"chunk",
"+=",
"b'\\x00\\x00\\x00\\x00'",
"# folder terminator",
"return",
"chunk"
] | produce a byte-chunk for a folder of colors
the structure is very similar to a color's data:
• Header
b'\xC0\x01' +
Big Endian Unsigned Int == len(Bytes in the Header Block)
note _only_ the header, this doesn't include the length of color data
• Big Endian Unsigned Short == len(Folder Name + '\0')
Note that Folder Name is assumed to be utf-16be so this
will always be an even number
• Folder Name + '\0', encoded UTF-16BE
• body
chunks for each color, see chunk_for_color
• folder terminator
b'\xC0\x02' +
b'\x00\x00\x00\x00'
Perhaps the four null bytes represent something, but i'm pretty sure
they're just a terminating string, but there's something nice about
how the b'\xC0\x02' matches with the folder's header | [
"produce",
"a",
"byte",
"-",
"chunk",
"for",
"a",
"folder",
"of",
"colors"
] | 8654edf4f1aeef37d42211ff3fe6a3e9e4325859 | https://github.com/nsfmc/swatch/blob/8654edf4f1aeef37d42211ff3fe6a3e9e4325859/swatch/writer.py#L85-L121 | train |
tapilab/brandelion | brandelion/cli/collect.py | iter_lines | def iter_lines(filename):
""" Iterate over screen names in a file, one per line."""
with open(filename, 'rt') as idfile:
for line in idfile:
screen_name = line.strip()
if len(screen_name) > 0:
yield screen_name.split()[0] | python | def iter_lines(filename):
""" Iterate over screen names in a file, one per line."""
with open(filename, 'rt') as idfile:
for line in idfile:
screen_name = line.strip()
if len(screen_name) > 0:
yield screen_name.split()[0] | [
"def",
"iter_lines",
"(",
"filename",
")",
":",
"with",
"open",
"(",
"filename",
",",
"'rt'",
")",
"as",
"idfile",
":",
"for",
"line",
"in",
"idfile",
":",
"screen_name",
"=",
"line",
".",
"strip",
"(",
")",
"if",
"len",
"(",
"screen_name",
")",
">",
"0",
":",
"yield",
"screen_name",
".",
"split",
"(",
")",
"[",
"0",
"]"
] | Iterate over screen names in a file, one per line. | [
"Iterate",
"over",
"screen",
"names",
"in",
"a",
"file",
"one",
"per",
"line",
"."
] | 40a5a5333cf704182c8666d1fbbbdadc7ff88546 | https://github.com/tapilab/brandelion/blob/40a5a5333cf704182c8666d1fbbbdadc7ff88546/brandelion/cli/collect.py#L47-L53 | train |
tapilab/brandelion | brandelion/cli/collect.py | fetch_tweets | def fetch_tweets(account_file, outfile, limit):
""" Fetch up to limit tweets for each account in account_file and write to
outfile. """
print('fetching tweets for accounts in', account_file)
outf = io.open(outfile, 'wt')
for screen_name in iter_lines(account_file):
print('\nFetching tweets for %s' % screen_name)
for tweet in twutil.collect.tweets_for_user(screen_name, limit):
tweet['user']['screen_name'] = screen_name
outf.write('%s\n' % json.dumps(tweet, ensure_ascii=False))
outf.flush() | python | def fetch_tweets(account_file, outfile, limit):
""" Fetch up to limit tweets for each account in account_file and write to
outfile. """
print('fetching tweets for accounts in', account_file)
outf = io.open(outfile, 'wt')
for screen_name in iter_lines(account_file):
print('\nFetching tweets for %s' % screen_name)
for tweet in twutil.collect.tweets_for_user(screen_name, limit):
tweet['user']['screen_name'] = screen_name
outf.write('%s\n' % json.dumps(tweet, ensure_ascii=False))
outf.flush() | [
"def",
"fetch_tweets",
"(",
"account_file",
",",
"outfile",
",",
"limit",
")",
":",
"print",
"(",
"'fetching tweets for accounts in'",
",",
"account_file",
")",
"outf",
"=",
"io",
".",
"open",
"(",
"outfile",
",",
"'wt'",
")",
"for",
"screen_name",
"in",
"iter_lines",
"(",
"account_file",
")",
":",
"print",
"(",
"'\\nFetching tweets for %s'",
"%",
"screen_name",
")",
"for",
"tweet",
"in",
"twutil",
".",
"collect",
".",
"tweets_for_user",
"(",
"screen_name",
",",
"limit",
")",
":",
"tweet",
"[",
"'user'",
"]",
"[",
"'screen_name'",
"]",
"=",
"screen_name",
"outf",
".",
"write",
"(",
"'%s\\n'",
"%",
"json",
".",
"dumps",
"(",
"tweet",
",",
"ensure_ascii",
"=",
"False",
")",
")",
"outf",
".",
"flush",
"(",
")"
] | Fetch up to limit tweets for each account in account_file and write to
outfile. | [
"Fetch",
"up",
"to",
"limit",
"tweets",
"for",
"each",
"account",
"in",
"account_file",
"and",
"write",
"to",
"outfile",
"."
] | 40a5a5333cf704182c8666d1fbbbdadc7ff88546 | https://github.com/tapilab/brandelion/blob/40a5a5333cf704182c8666d1fbbbdadc7ff88546/brandelion/cli/collect.py#L85-L95 | train |
tapilab/brandelion | brandelion/cli/collect.py | fetch_exemplars | def fetch_exemplars(keyword, outfile, n=50):
""" Fetch top lists matching this keyword, then return Twitter screen
names along with the number of different lists on which each appers.. """
list_urls = fetch_lists(keyword, n)
print('found %d lists for %s' % (len(list_urls), keyword))
counts = Counter()
for list_url in list_urls:
counts.update(fetch_list_members(list_url))
# Write to file.
outf = io.open(outfile, 'wt')
for handle in sorted(counts):
outf.write('%s\t%d\n' % (handle, counts[handle]))
outf.close()
print('saved exemplars to', outfile) | python | def fetch_exemplars(keyword, outfile, n=50):
""" Fetch top lists matching this keyword, then return Twitter screen
names along with the number of different lists on which each appers.. """
list_urls = fetch_lists(keyword, n)
print('found %d lists for %s' % (len(list_urls), keyword))
counts = Counter()
for list_url in list_urls:
counts.update(fetch_list_members(list_url))
# Write to file.
outf = io.open(outfile, 'wt')
for handle in sorted(counts):
outf.write('%s\t%d\n' % (handle, counts[handle]))
outf.close()
print('saved exemplars to', outfile) | [
"def",
"fetch_exemplars",
"(",
"keyword",
",",
"outfile",
",",
"n",
"=",
"50",
")",
":",
"list_urls",
"=",
"fetch_lists",
"(",
"keyword",
",",
"n",
")",
"print",
"(",
"'found %d lists for %s'",
"%",
"(",
"len",
"(",
"list_urls",
")",
",",
"keyword",
")",
")",
"counts",
"=",
"Counter",
"(",
")",
"for",
"list_url",
"in",
"list_urls",
":",
"counts",
".",
"update",
"(",
"fetch_list_members",
"(",
"list_url",
")",
")",
"# Write to file.",
"outf",
"=",
"io",
".",
"open",
"(",
"outfile",
",",
"'wt'",
")",
"for",
"handle",
"in",
"sorted",
"(",
"counts",
")",
":",
"outf",
".",
"write",
"(",
"'%s\\t%d\\n'",
"%",
"(",
"handle",
",",
"counts",
"[",
"handle",
"]",
")",
")",
"outf",
".",
"close",
"(",
")",
"print",
"(",
"'saved exemplars to'",
",",
"outfile",
")"
] | Fetch top lists matching this keyword, then return Twitter screen
names along with the number of different lists on which each appers.. | [
"Fetch",
"top",
"lists",
"matching",
"this",
"keyword",
"then",
"return",
"Twitter",
"screen",
"names",
"along",
"with",
"the",
"number",
"of",
"different",
"lists",
"on",
"which",
"each",
"appers",
".."
] | 40a5a5333cf704182c8666d1fbbbdadc7ff88546 | https://github.com/tapilab/brandelion/blob/40a5a5333cf704182c8666d1fbbbdadc7ff88546/brandelion/cli/collect.py#L171-L184 | train |
tamasgal/km3pipe | km3pipe/io/ch.py | CHPump._init_controlhost | def _init_controlhost(self):
"""Set up the controlhost connection"""
log.debug("Connecting to JLigier")
self.client = Client(self.host, self.port)
self.client._connect()
log.debug("Subscribing to tags: {0}".format(self.tags))
for tag in self.tags.split(','):
self.client.subscribe(tag.strip(), mode=self.subscription_mode)
log.debug("Controlhost initialisation done.") | python | def _init_controlhost(self):
"""Set up the controlhost connection"""
log.debug("Connecting to JLigier")
self.client = Client(self.host, self.port)
self.client._connect()
log.debug("Subscribing to tags: {0}".format(self.tags))
for tag in self.tags.split(','):
self.client.subscribe(tag.strip(), mode=self.subscription_mode)
log.debug("Controlhost initialisation done.") | [
"def",
"_init_controlhost",
"(",
"self",
")",
":",
"log",
".",
"debug",
"(",
"\"Connecting to JLigier\"",
")",
"self",
".",
"client",
"=",
"Client",
"(",
"self",
".",
"host",
",",
"self",
".",
"port",
")",
"self",
".",
"client",
".",
"_connect",
"(",
")",
"log",
".",
"debug",
"(",
"\"Subscribing to tags: {0}\"",
".",
"format",
"(",
"self",
".",
"tags",
")",
")",
"for",
"tag",
"in",
"self",
".",
"tags",
".",
"split",
"(",
"','",
")",
":",
"self",
".",
"client",
".",
"subscribe",
"(",
"tag",
".",
"strip",
"(",
")",
",",
"mode",
"=",
"self",
".",
"subscription_mode",
")",
"log",
".",
"debug",
"(",
"\"Controlhost initialisation done.\"",
")"
] | Set up the controlhost connection | [
"Set",
"up",
"the",
"controlhost",
"connection"
] | 7a9b59ac899a28775b5bdc5d391d9a5340d08040 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/io/ch.py#L88-L96 | train |
tamasgal/km3pipe | km3pipe/io/ch.py | CHPump.process | def process(self, blob):
"""Wait for the next packet and put it in the blob"""
# self._add_process_dt()
try:
log.debug("Waiting for queue items.")
prefix, data = self.queue.get(timeout=self.timeout)
log.debug("Got {0} bytes from queue.".format(len(data)))
except Empty:
log.warning(
"ControlHost timeout ({0}s) reached".format(self.timeout)
)
raise StopIteration("ControlHost timeout reached.")
blob[self.key_for_prefix] = prefix
blob[self.key_for_data] = data
return blob | python | def process(self, blob):
"""Wait for the next packet and put it in the blob"""
# self._add_process_dt()
try:
log.debug("Waiting for queue items.")
prefix, data = self.queue.get(timeout=self.timeout)
log.debug("Got {0} bytes from queue.".format(len(data)))
except Empty:
log.warning(
"ControlHost timeout ({0}s) reached".format(self.timeout)
)
raise StopIteration("ControlHost timeout reached.")
blob[self.key_for_prefix] = prefix
blob[self.key_for_data] = data
return blob | [
"def",
"process",
"(",
"self",
",",
"blob",
")",
":",
"# self._add_process_dt()",
"try",
":",
"log",
".",
"debug",
"(",
"\"Waiting for queue items.\"",
")",
"prefix",
",",
"data",
"=",
"self",
".",
"queue",
".",
"get",
"(",
"timeout",
"=",
"self",
".",
"timeout",
")",
"log",
".",
"debug",
"(",
"\"Got {0} bytes from queue.\"",
".",
"format",
"(",
"len",
"(",
"data",
")",
")",
")",
"except",
"Empty",
":",
"log",
".",
"warning",
"(",
"\"ControlHost timeout ({0}s) reached\"",
".",
"format",
"(",
"self",
".",
"timeout",
")",
")",
"raise",
"StopIteration",
"(",
"\"ControlHost timeout reached.\"",
")",
"blob",
"[",
"self",
".",
"key_for_prefix",
"]",
"=",
"prefix",
"blob",
"[",
"self",
".",
"key_for_data",
"]",
"=",
"data",
"return",
"blob"
] | Wait for the next packet and put it in the blob | [
"Wait",
"for",
"the",
"next",
"packet",
"and",
"put",
"it",
"in",
"the",
"blob"
] | 7a9b59ac899a28775b5bdc5d391d9a5340d08040 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/io/ch.py#L140-L154 | train |
tamasgal/km3pipe | km3pipe/io/ch.py | CHPump.finish | def finish(self):
"""Clean up the JLigier controlhost connection"""
log.debug("Disconnecting from JLigier.")
self.client.socket.shutdown(socket.SHUT_RDWR)
self.client._disconnect() | python | def finish(self):
"""Clean up the JLigier controlhost connection"""
log.debug("Disconnecting from JLigier.")
self.client.socket.shutdown(socket.SHUT_RDWR)
self.client._disconnect() | [
"def",
"finish",
"(",
"self",
")",
":",
"log",
".",
"debug",
"(",
"\"Disconnecting from JLigier.\"",
")",
"self",
".",
"client",
".",
"socket",
".",
"shutdown",
"(",
"socket",
".",
"SHUT_RDWR",
")",
"self",
".",
"client",
".",
"_disconnect",
"(",
")"
] | Clean up the JLigier controlhost connection | [
"Clean",
"up",
"the",
"JLigier",
"controlhost",
"connection"
] | 7a9b59ac899a28775b5bdc5d391d9a5340d08040 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/io/ch.py#L176-L180 | train |
ioos/pyoos | pyoos/collectors/hads/hads.py | Hads.list_variables | def list_variables(self):
"""
List available variables and applies any filters.
"""
station_codes = self._get_station_codes()
station_codes = self._apply_features_filter(station_codes)
variables = self._list_variables(station_codes)
if hasattr(self, "_variables") and self.variables is not None:
variables.intersection_update(set(self.variables))
return list(variables) | python | def list_variables(self):
"""
List available variables and applies any filters.
"""
station_codes = self._get_station_codes()
station_codes = self._apply_features_filter(station_codes)
variables = self._list_variables(station_codes)
if hasattr(self, "_variables") and self.variables is not None:
variables.intersection_update(set(self.variables))
return list(variables) | [
"def",
"list_variables",
"(",
"self",
")",
":",
"station_codes",
"=",
"self",
".",
"_get_station_codes",
"(",
")",
"station_codes",
"=",
"self",
".",
"_apply_features_filter",
"(",
"station_codes",
")",
"variables",
"=",
"self",
".",
"_list_variables",
"(",
"station_codes",
")",
"if",
"hasattr",
"(",
"self",
",",
"\"_variables\"",
")",
"and",
"self",
".",
"variables",
"is",
"not",
"None",
":",
"variables",
".",
"intersection_update",
"(",
"set",
"(",
"self",
".",
"variables",
")",
")",
"return",
"list",
"(",
"variables",
")"
] | List available variables and applies any filters. | [
"List",
"available",
"variables",
"and",
"applies",
"any",
"filters",
"."
] | 908660385029ecd8eccda8ab3a6b20b47b915c77 | https://github.com/ioos/pyoos/blob/908660385029ecd8eccda8ab3a6b20b47b915c77/pyoos/collectors/hads/hads.py#L50-L61 | train |
ioos/pyoos | pyoos/collectors/hads/hads.py | Hads._list_variables | def _list_variables(self, station_codes):
"""
Internal helper to list the variables for the given station codes.
"""
# sample output from obs retrieval:
#
# DD9452D0
# HP(SRBM5)
# 2013-07-22 19:30 45.97
# HT(SRBM5)
# 2013-07-22 19:30 44.29
# PC(SRBM5)
# 2013-07-22 19:30 36.19
#
rvar = re.compile(r"\n\s([A-Z]{2}[A-Z0-9]{0,1})\(\w+\)")
variables = set()
resp = requests.post(
self.obs_retrieval_url,
data={
"state": "nil",
"hsa": "nil",
"of": "3",
"extraids": " ".join(station_codes),
"sinceday": -1,
},
)
resp.raise_for_status()
list(map(variables.add, rvar.findall(resp.text)))
return variables | python | def _list_variables(self, station_codes):
"""
Internal helper to list the variables for the given station codes.
"""
# sample output from obs retrieval:
#
# DD9452D0
# HP(SRBM5)
# 2013-07-22 19:30 45.97
# HT(SRBM5)
# 2013-07-22 19:30 44.29
# PC(SRBM5)
# 2013-07-22 19:30 36.19
#
rvar = re.compile(r"\n\s([A-Z]{2}[A-Z0-9]{0,1})\(\w+\)")
variables = set()
resp = requests.post(
self.obs_retrieval_url,
data={
"state": "nil",
"hsa": "nil",
"of": "3",
"extraids": " ".join(station_codes),
"sinceday": -1,
},
)
resp.raise_for_status()
list(map(variables.add, rvar.findall(resp.text)))
return variables | [
"def",
"_list_variables",
"(",
"self",
",",
"station_codes",
")",
":",
"# sample output from obs retrieval:",
"#",
"# DD9452D0",
"# HP(SRBM5)",
"# 2013-07-22 19:30 45.97",
"# HT(SRBM5)",
"# 2013-07-22 19:30 44.29",
"# PC(SRBM5)",
"# 2013-07-22 19:30 36.19",
"#",
"rvar",
"=",
"re",
".",
"compile",
"(",
"r\"\\n\\s([A-Z]{2}[A-Z0-9]{0,1})\\(\\w+\\)\"",
")",
"variables",
"=",
"set",
"(",
")",
"resp",
"=",
"requests",
".",
"post",
"(",
"self",
".",
"obs_retrieval_url",
",",
"data",
"=",
"{",
"\"state\"",
":",
"\"nil\"",
",",
"\"hsa\"",
":",
"\"nil\"",
",",
"\"of\"",
":",
"\"3\"",
",",
"\"extraids\"",
":",
"\" \"",
".",
"join",
"(",
"station_codes",
")",
",",
"\"sinceday\"",
":",
"-",
"1",
",",
"}",
",",
")",
"resp",
".",
"raise_for_status",
"(",
")",
"list",
"(",
"map",
"(",
"variables",
".",
"add",
",",
"rvar",
".",
"findall",
"(",
"resp",
".",
"text",
")",
")",
")",
"return",
"variables"
] | Internal helper to list the variables for the given station codes. | [
"Internal",
"helper",
"to",
"list",
"the",
"variables",
"for",
"the",
"given",
"station",
"codes",
"."
] | 908660385029ecd8eccda8ab3a6b20b47b915c77 | https://github.com/ioos/pyoos/blob/908660385029ecd8eccda8ab3a6b20b47b915c77/pyoos/collectors/hads/hads.py#L63-L93 | train |
ioos/pyoos | pyoos/collectors/hads/hads.py | Hads._apply_features_filter | def _apply_features_filter(self, station_codes):
"""
If the features filter is set, this will return the intersection of
those filter items and the given station codes.
"""
# apply features filter
if hasattr(self, "features") and self.features is not None:
station_codes = set(station_codes)
station_codes = list(
station_codes.intersection(set(self.features))
)
return station_codes | python | def _apply_features_filter(self, station_codes):
"""
If the features filter is set, this will return the intersection of
those filter items and the given station codes.
"""
# apply features filter
if hasattr(self, "features") and self.features is not None:
station_codes = set(station_codes)
station_codes = list(
station_codes.intersection(set(self.features))
)
return station_codes | [
"def",
"_apply_features_filter",
"(",
"self",
",",
"station_codes",
")",
":",
"# apply features filter",
"if",
"hasattr",
"(",
"self",
",",
"\"features\"",
")",
"and",
"self",
".",
"features",
"is",
"not",
"None",
":",
"station_codes",
"=",
"set",
"(",
"station_codes",
")",
"station_codes",
"=",
"list",
"(",
"station_codes",
".",
"intersection",
"(",
"set",
"(",
"self",
".",
"features",
")",
")",
")",
"return",
"station_codes"
] | If the features filter is set, this will return the intersection of
those filter items and the given station codes. | [
"If",
"the",
"features",
"filter",
"is",
"set",
"this",
"will",
"return",
"the",
"intersection",
"of",
"those",
"filter",
"items",
"and",
"the",
"given",
"station",
"codes",
"."
] | 908660385029ecd8eccda8ab3a6b20b47b915c77 | https://github.com/ioos/pyoos/blob/908660385029ecd8eccda8ab3a6b20b47b915c77/pyoos/collectors/hads/hads.py#L124-L136 | train |
ioos/pyoos | pyoos/collectors/hads/hads.py | Hads._get_station_codes | def _get_station_codes(self, force=False):
"""
Gets and caches a list of station codes optionally within a bbox.
Will return the cached version if it exists unless force is True.
"""
if not force and self.station_codes is not None:
return self.station_codes
state_urls = self._get_state_urls()
# filter by bounding box against a shapefile
state_matches = None
if self.bbox:
with collection(
os.path.join(
"resources",
"ne_50m_admin_1_states_provinces_lakes_shp.shp",
),
"r",
) as c:
geom_matches = [
x["properties"] for x in c.filter(bbox=self.bbox)
]
state_matches = [
x["postal"] if x["admin"] != "Canada" else "CN"
for x in geom_matches
]
self.station_codes = []
for state_url in state_urls:
if state_matches is not None:
state_abbr = state_url.split("/")[-1].split(".")[0]
if state_abbr not in state_matches:
continue
self.station_codes.extend(self._get_stations_for_state(state_url))
if self.bbox:
# retrieve metadata for all stations to properly filter them
metadata = self._get_metadata(self.station_codes)
parsed_metadata = self.parser._parse_metadata(metadata)
def in_bbox(code):
lat = parsed_metadata[code]["latitude"]
lon = parsed_metadata[code]["longitude"]
return (
lon >= self.bbox[0]
and lon <= self.bbox[2]
and lat >= self.bbox[1]
and lat <= self.bbox[3]
)
self.station_codes = list(filter(in_bbox, self.station_codes))
return self.station_codes | python | def _get_station_codes(self, force=False):
"""
Gets and caches a list of station codes optionally within a bbox.
Will return the cached version if it exists unless force is True.
"""
if not force and self.station_codes is not None:
return self.station_codes
state_urls = self._get_state_urls()
# filter by bounding box against a shapefile
state_matches = None
if self.bbox:
with collection(
os.path.join(
"resources",
"ne_50m_admin_1_states_provinces_lakes_shp.shp",
),
"r",
) as c:
geom_matches = [
x["properties"] for x in c.filter(bbox=self.bbox)
]
state_matches = [
x["postal"] if x["admin"] != "Canada" else "CN"
for x in geom_matches
]
self.station_codes = []
for state_url in state_urls:
if state_matches is not None:
state_abbr = state_url.split("/")[-1].split(".")[0]
if state_abbr not in state_matches:
continue
self.station_codes.extend(self._get_stations_for_state(state_url))
if self.bbox:
# retrieve metadata for all stations to properly filter them
metadata = self._get_metadata(self.station_codes)
parsed_metadata = self.parser._parse_metadata(metadata)
def in_bbox(code):
lat = parsed_metadata[code]["latitude"]
lon = parsed_metadata[code]["longitude"]
return (
lon >= self.bbox[0]
and lon <= self.bbox[2]
and lat >= self.bbox[1]
and lat <= self.bbox[3]
)
self.station_codes = list(filter(in_bbox, self.station_codes))
return self.station_codes | [
"def",
"_get_station_codes",
"(",
"self",
",",
"force",
"=",
"False",
")",
":",
"if",
"not",
"force",
"and",
"self",
".",
"station_codes",
"is",
"not",
"None",
":",
"return",
"self",
".",
"station_codes",
"state_urls",
"=",
"self",
".",
"_get_state_urls",
"(",
")",
"# filter by bounding box against a shapefile",
"state_matches",
"=",
"None",
"if",
"self",
".",
"bbox",
":",
"with",
"collection",
"(",
"os",
".",
"path",
".",
"join",
"(",
"\"resources\"",
",",
"\"ne_50m_admin_1_states_provinces_lakes_shp.shp\"",
",",
")",
",",
"\"r\"",
",",
")",
"as",
"c",
":",
"geom_matches",
"=",
"[",
"x",
"[",
"\"properties\"",
"]",
"for",
"x",
"in",
"c",
".",
"filter",
"(",
"bbox",
"=",
"self",
".",
"bbox",
")",
"]",
"state_matches",
"=",
"[",
"x",
"[",
"\"postal\"",
"]",
"if",
"x",
"[",
"\"admin\"",
"]",
"!=",
"\"Canada\"",
"else",
"\"CN\"",
"for",
"x",
"in",
"geom_matches",
"]",
"self",
".",
"station_codes",
"=",
"[",
"]",
"for",
"state_url",
"in",
"state_urls",
":",
"if",
"state_matches",
"is",
"not",
"None",
":",
"state_abbr",
"=",
"state_url",
".",
"split",
"(",
"\"/\"",
")",
"[",
"-",
"1",
"]",
".",
"split",
"(",
"\".\"",
")",
"[",
"0",
"]",
"if",
"state_abbr",
"not",
"in",
"state_matches",
":",
"continue",
"self",
".",
"station_codes",
".",
"extend",
"(",
"self",
".",
"_get_stations_for_state",
"(",
"state_url",
")",
")",
"if",
"self",
".",
"bbox",
":",
"# retrieve metadata for all stations to properly filter them",
"metadata",
"=",
"self",
".",
"_get_metadata",
"(",
"self",
".",
"station_codes",
")",
"parsed_metadata",
"=",
"self",
".",
"parser",
".",
"_parse_metadata",
"(",
"metadata",
")",
"def",
"in_bbox",
"(",
"code",
")",
":",
"lat",
"=",
"parsed_metadata",
"[",
"code",
"]",
"[",
"\"latitude\"",
"]",
"lon",
"=",
"parsed_metadata",
"[",
"code",
"]",
"[",
"\"longitude\"",
"]",
"return",
"(",
"lon",
">=",
"self",
".",
"bbox",
"[",
"0",
"]",
"and",
"lon",
"<=",
"self",
".",
"bbox",
"[",
"2",
"]",
"and",
"lat",
">=",
"self",
".",
"bbox",
"[",
"1",
"]",
"and",
"lat",
"<=",
"self",
".",
"bbox",
"[",
"3",
"]",
")",
"self",
".",
"station_codes",
"=",
"list",
"(",
"filter",
"(",
"in_bbox",
",",
"self",
".",
"station_codes",
")",
")",
"return",
"self",
".",
"station_codes"
] | Gets and caches a list of station codes optionally within a bbox.
Will return the cached version if it exists unless force is True. | [
"Gets",
"and",
"caches",
"a",
"list",
"of",
"station",
"codes",
"optionally",
"within",
"a",
"bbox",
"."
] | 908660385029ecd8eccda8ab3a6b20b47b915c77 | https://github.com/ioos/pyoos/blob/908660385029ecd8eccda8ab3a6b20b47b915c77/pyoos/collectors/hads/hads.py#L158-L216 | train |
SentimensRG/txCelery | txcelery/defer.py | DeferredTask._monitor_task | def _monitor_task(self):
"""Wrapper that handles the actual asynchronous monitoring of the task
state.
"""
if self.task.state in states.UNREADY_STATES:
reactor.callLater(self.POLL_PERIOD, self._monitor_task)
return
if self.task.state == 'SUCCESS':
self.callback(self.task.result)
elif self.task.state == 'FAILURE':
self.errback(Failure(self.task.result))
elif self.task.state == 'REVOKED':
self.errback(
Failure(defer.CancelledError('Task {0}'.format(self.task.id))))
else:
self.errback(ValueError(
'Cannot respond to `{}` state'.format(self.task.state)
)) | python | def _monitor_task(self):
"""Wrapper that handles the actual asynchronous monitoring of the task
state.
"""
if self.task.state in states.UNREADY_STATES:
reactor.callLater(self.POLL_PERIOD, self._monitor_task)
return
if self.task.state == 'SUCCESS':
self.callback(self.task.result)
elif self.task.state == 'FAILURE':
self.errback(Failure(self.task.result))
elif self.task.state == 'REVOKED':
self.errback(
Failure(defer.CancelledError('Task {0}'.format(self.task.id))))
else:
self.errback(ValueError(
'Cannot respond to `{}` state'.format(self.task.state)
)) | [
"def",
"_monitor_task",
"(",
"self",
")",
":",
"if",
"self",
".",
"task",
".",
"state",
"in",
"states",
".",
"UNREADY_STATES",
":",
"reactor",
".",
"callLater",
"(",
"self",
".",
"POLL_PERIOD",
",",
"self",
".",
"_monitor_task",
")",
"return",
"if",
"self",
".",
"task",
".",
"state",
"==",
"'SUCCESS'",
":",
"self",
".",
"callback",
"(",
"self",
".",
"task",
".",
"result",
")",
"elif",
"self",
".",
"task",
".",
"state",
"==",
"'FAILURE'",
":",
"self",
".",
"errback",
"(",
"Failure",
"(",
"self",
".",
"task",
".",
"result",
")",
")",
"elif",
"self",
".",
"task",
".",
"state",
"==",
"'REVOKED'",
":",
"self",
".",
"errback",
"(",
"Failure",
"(",
"defer",
".",
"CancelledError",
"(",
"'Task {0}'",
".",
"format",
"(",
"self",
".",
"task",
".",
"id",
")",
")",
")",
")",
"else",
":",
"self",
".",
"errback",
"(",
"ValueError",
"(",
"'Cannot respond to `{}` state'",
".",
"format",
"(",
"self",
".",
"task",
".",
"state",
")",
")",
")"
] | Wrapper that handles the actual asynchronous monitoring of the task
state. | [
"Wrapper",
"that",
"handles",
"the",
"actual",
"asynchronous",
"monitoring",
"of",
"the",
"task",
"state",
"."
] | 15b9705198009f5ce6db1bfd0a8af9b8949d6277 | https://github.com/SentimensRG/txCelery/blob/15b9705198009f5ce6db1bfd0a8af9b8949d6277/txcelery/defer.py#L52-L71 | train |
materials-data-facility/toolbox | mdf_toolbox/search_helper.py | _clean_query_string | def _clean_query_string(q):
"""Clean up a query string for searching.
Removes unmatched parentheses and joining operators.
Arguments:
q (str): Query string to be cleaned
Returns:
str: The clean query string.
"""
q = q.replace("()", "").strip()
if q.endswith("("):
q = q[:-1].strip()
# Remove misplaced AND/OR/NOT at end
if q[-3:] == "AND" or q[-3:] == "NOT":
q = q[:-3]
elif q[-2:] == "OR":
q = q[:-2]
# Balance parentheses
while q.count("(") > q.count(")"):
q += ")"
while q.count(")") > q.count("("):
q = "(" + q
return q.strip() | python | def _clean_query_string(q):
"""Clean up a query string for searching.
Removes unmatched parentheses and joining operators.
Arguments:
q (str): Query string to be cleaned
Returns:
str: The clean query string.
"""
q = q.replace("()", "").strip()
if q.endswith("("):
q = q[:-1].strip()
# Remove misplaced AND/OR/NOT at end
if q[-3:] == "AND" or q[-3:] == "NOT":
q = q[:-3]
elif q[-2:] == "OR":
q = q[:-2]
# Balance parentheses
while q.count("(") > q.count(")"):
q += ")"
while q.count(")") > q.count("("):
q = "(" + q
return q.strip() | [
"def",
"_clean_query_string",
"(",
"q",
")",
":",
"q",
"=",
"q",
".",
"replace",
"(",
"\"()\"",
",",
"\"\"",
")",
".",
"strip",
"(",
")",
"if",
"q",
".",
"endswith",
"(",
"\"(\"",
")",
":",
"q",
"=",
"q",
"[",
":",
"-",
"1",
"]",
".",
"strip",
"(",
")",
"# Remove misplaced AND/OR/NOT at end",
"if",
"q",
"[",
"-",
"3",
":",
"]",
"==",
"\"AND\"",
"or",
"q",
"[",
"-",
"3",
":",
"]",
"==",
"\"NOT\"",
":",
"q",
"=",
"q",
"[",
":",
"-",
"3",
"]",
"elif",
"q",
"[",
"-",
"2",
":",
"]",
"==",
"\"OR\"",
":",
"q",
"=",
"q",
"[",
":",
"-",
"2",
"]",
"# Balance parentheses",
"while",
"q",
".",
"count",
"(",
"\"(\"",
")",
">",
"q",
".",
"count",
"(",
"\")\"",
")",
":",
"q",
"+=",
"\")\"",
"while",
"q",
".",
"count",
"(",
"\")\"",
")",
">",
"q",
".",
"count",
"(",
"\"(\"",
")",
":",
"q",
"=",
"\"(\"",
"+",
"q",
"return",
"q",
".",
"strip",
"(",
")"
] | Clean up a query string for searching.
Removes unmatched parentheses and joining operators.
Arguments:
q (str): Query string to be cleaned
Returns:
str: The clean query string. | [
"Clean",
"up",
"a",
"query",
"string",
"for",
"searching",
"."
] | 2a4ac2b6a892238263008efa6a5f3923d9a83505 | https://github.com/materials-data-facility/toolbox/blob/2a4ac2b6a892238263008efa6a5f3923d9a83505/mdf_toolbox/search_helper.py#L40-L66 | train |
materials-data-facility/toolbox | mdf_toolbox/search_helper.py | _validate_query | def _validate_query(query):
"""Validate and clean up a query to be sent to Search.
Cleans the query string, removes unneeded parameters, and validates for correctness.
Does not modify the original argument.
Raises an Exception on invalid input.
Arguments:
query (dict): The query to validate.
Returns:
dict: The validated query.
"""
query = deepcopy(query)
# q is always required
if query["q"] == BLANK_QUERY["q"]:
raise ValueError("No query specified.")
query["q"] = _clean_query_string(query["q"])
# limit should be set to appropriate default if not specified
if query["limit"] is None:
query["limit"] = SEARCH_LIMIT if query["advanced"] else NONADVANCED_LIMIT
# If specified, the limit should not be greater than the Search maximum
elif query["limit"] > SEARCH_LIMIT:
warnings.warn('Reduced result limit from {} to the Search maximum: {}'
.format(query["limit"], SEARCH_LIMIT), RuntimeWarning)
query["limit"] = SEARCH_LIMIT
# Remove all blank/default values
for key, val in BLANK_QUERY.items():
# Default for get is NaN so comparison is always False
if query.get(key, float('nan')) == val:
query.pop(key)
# Remove unsupported fields
to_remove = [field for field in query.keys() if field not in BLANK_QUERY.keys()]
[query.pop(field) for field in to_remove]
return query | python | def _validate_query(query):
"""Validate and clean up a query to be sent to Search.
Cleans the query string, removes unneeded parameters, and validates for correctness.
Does not modify the original argument.
Raises an Exception on invalid input.
Arguments:
query (dict): The query to validate.
Returns:
dict: The validated query.
"""
query = deepcopy(query)
# q is always required
if query["q"] == BLANK_QUERY["q"]:
raise ValueError("No query specified.")
query["q"] = _clean_query_string(query["q"])
# limit should be set to appropriate default if not specified
if query["limit"] is None:
query["limit"] = SEARCH_LIMIT if query["advanced"] else NONADVANCED_LIMIT
# If specified, the limit should not be greater than the Search maximum
elif query["limit"] > SEARCH_LIMIT:
warnings.warn('Reduced result limit from {} to the Search maximum: {}'
.format(query["limit"], SEARCH_LIMIT), RuntimeWarning)
query["limit"] = SEARCH_LIMIT
# Remove all blank/default values
for key, val in BLANK_QUERY.items():
# Default for get is NaN so comparison is always False
if query.get(key, float('nan')) == val:
query.pop(key)
# Remove unsupported fields
to_remove = [field for field in query.keys() if field not in BLANK_QUERY.keys()]
[query.pop(field) for field in to_remove]
return query | [
"def",
"_validate_query",
"(",
"query",
")",
":",
"query",
"=",
"deepcopy",
"(",
"query",
")",
"# q is always required",
"if",
"query",
"[",
"\"q\"",
"]",
"==",
"BLANK_QUERY",
"[",
"\"q\"",
"]",
":",
"raise",
"ValueError",
"(",
"\"No query specified.\"",
")",
"query",
"[",
"\"q\"",
"]",
"=",
"_clean_query_string",
"(",
"query",
"[",
"\"q\"",
"]",
")",
"# limit should be set to appropriate default if not specified",
"if",
"query",
"[",
"\"limit\"",
"]",
"is",
"None",
":",
"query",
"[",
"\"limit\"",
"]",
"=",
"SEARCH_LIMIT",
"if",
"query",
"[",
"\"advanced\"",
"]",
"else",
"NONADVANCED_LIMIT",
"# If specified, the limit should not be greater than the Search maximum",
"elif",
"query",
"[",
"\"limit\"",
"]",
">",
"SEARCH_LIMIT",
":",
"warnings",
".",
"warn",
"(",
"'Reduced result limit from {} to the Search maximum: {}'",
".",
"format",
"(",
"query",
"[",
"\"limit\"",
"]",
",",
"SEARCH_LIMIT",
")",
",",
"RuntimeWarning",
")",
"query",
"[",
"\"limit\"",
"]",
"=",
"SEARCH_LIMIT",
"# Remove all blank/default values",
"for",
"key",
",",
"val",
"in",
"BLANK_QUERY",
".",
"items",
"(",
")",
":",
"# Default for get is NaN so comparison is always False",
"if",
"query",
".",
"get",
"(",
"key",
",",
"float",
"(",
"'nan'",
")",
")",
"==",
"val",
":",
"query",
".",
"pop",
"(",
"key",
")",
"# Remove unsupported fields",
"to_remove",
"=",
"[",
"field",
"for",
"field",
"in",
"query",
".",
"keys",
"(",
")",
"if",
"field",
"not",
"in",
"BLANK_QUERY",
".",
"keys",
"(",
")",
"]",
"[",
"query",
".",
"pop",
"(",
"field",
")",
"for",
"field",
"in",
"to_remove",
"]",
"return",
"query"
] | Validate and clean up a query to be sent to Search.
Cleans the query string, removes unneeded parameters, and validates for correctness.
Does not modify the original argument.
Raises an Exception on invalid input.
Arguments:
query (dict): The query to validate.
Returns:
dict: The validated query. | [
"Validate",
"and",
"clean",
"up",
"a",
"query",
"to",
"be",
"sent",
"to",
"Search",
".",
"Cleans",
"the",
"query",
"string",
"removes",
"unneeded",
"parameters",
"and",
"validates",
"for",
"correctness",
".",
"Does",
"not",
"modify",
"the",
"original",
"argument",
".",
"Raises",
"an",
"Exception",
"on",
"invalid",
"input",
"."
] | 2a4ac2b6a892238263008efa6a5f3923d9a83505 | https://github.com/materials-data-facility/toolbox/blob/2a4ac2b6a892238263008efa6a5f3923d9a83505/mdf_toolbox/search_helper.py#L69-L107 | train |
materials-data-facility/toolbox | mdf_toolbox/search_helper.py | SearchHelper._term | def _term(self, term):
"""Add a term to the query.
Arguments:
term (str): The term to add.
Returns:
SearchHelper: Self
"""
# All terms must be strings for Elasticsearch
term = str(term)
if term:
self.__query["q"] += term
return self | python | def _term(self, term):
"""Add a term to the query.
Arguments:
term (str): The term to add.
Returns:
SearchHelper: Self
"""
# All terms must be strings for Elasticsearch
term = str(term)
if term:
self.__query["q"] += term
return self | [
"def",
"_term",
"(",
"self",
",",
"term",
")",
":",
"# All terms must be strings for Elasticsearch",
"term",
"=",
"str",
"(",
"term",
")",
"if",
"term",
":",
"self",
".",
"__query",
"[",
"\"q\"",
"]",
"+=",
"term",
"return",
"self"
] | Add a term to the query.
Arguments:
term (str): The term to add.
Returns:
SearchHelper: Self | [
"Add",
"a",
"term",
"to",
"the",
"query",
"."
] | 2a4ac2b6a892238263008efa6a5f3923d9a83505 | https://github.com/materials-data-facility/toolbox/blob/2a4ac2b6a892238263008efa6a5f3923d9a83505/mdf_toolbox/search_helper.py#L197-L210 | train |
materials-data-facility/toolbox | mdf_toolbox/search_helper.py | SearchHelper._operator | def _operator(self, op, close_group=False):
"""Add an operator between terms.
There must be a term added before using this method.
All operators have helpers, so this method is usually not necessary to directly invoke.
Arguments:
op (str): The operator to add. Must be in the OP_LIST.
close_group (bool): If ``True``, will end the current parenthetical
group and start a new one.
If ``False``, will continue current group.
Example::
"(foo AND bar)" is one group.
"(foo) AND (bar)" is two groups.
Returns:
SearchHelper: Self
"""
op = op.upper().strip()
if op not in OP_LIST:
raise ValueError("Error: '{}' is not a valid operator.".format(op))
else:
if close_group:
op = ") " + op + " ("
else:
op = " " + op + " "
self.__query["q"] += op
return self | python | def _operator(self, op, close_group=False):
"""Add an operator between terms.
There must be a term added before using this method.
All operators have helpers, so this method is usually not necessary to directly invoke.
Arguments:
op (str): The operator to add. Must be in the OP_LIST.
close_group (bool): If ``True``, will end the current parenthetical
group and start a new one.
If ``False``, will continue current group.
Example::
"(foo AND bar)" is one group.
"(foo) AND (bar)" is two groups.
Returns:
SearchHelper: Self
"""
op = op.upper().strip()
if op not in OP_LIST:
raise ValueError("Error: '{}' is not a valid operator.".format(op))
else:
if close_group:
op = ") " + op + " ("
else:
op = " " + op + " "
self.__query["q"] += op
return self | [
"def",
"_operator",
"(",
"self",
",",
"op",
",",
"close_group",
"=",
"False",
")",
":",
"op",
"=",
"op",
".",
"upper",
"(",
")",
".",
"strip",
"(",
")",
"if",
"op",
"not",
"in",
"OP_LIST",
":",
"raise",
"ValueError",
"(",
"\"Error: '{}' is not a valid operator.\"",
".",
"format",
"(",
"op",
")",
")",
"else",
":",
"if",
"close_group",
":",
"op",
"=",
"\") \"",
"+",
"op",
"+",
"\" (\"",
"else",
":",
"op",
"=",
"\" \"",
"+",
"op",
"+",
"\" \"",
"self",
".",
"__query",
"[",
"\"q\"",
"]",
"+=",
"op",
"return",
"self"
] | Add an operator between terms.
There must be a term added before using this method.
All operators have helpers, so this method is usually not necessary to directly invoke.
Arguments:
op (str): The operator to add. Must be in the OP_LIST.
close_group (bool): If ``True``, will end the current parenthetical
group and start a new one.
If ``False``, will continue current group.
Example::
"(foo AND bar)" is one group.
"(foo) AND (bar)" is two groups.
Returns:
SearchHelper: Self | [
"Add",
"an",
"operator",
"between",
"terms",
".",
"There",
"must",
"be",
"a",
"term",
"added",
"before",
"using",
"this",
"method",
".",
"All",
"operators",
"have",
"helpers",
"so",
"this",
"method",
"is",
"usually",
"not",
"necessary",
"to",
"directly",
"invoke",
"."
] | 2a4ac2b6a892238263008efa6a5f3923d9a83505 | https://github.com/materials-data-facility/toolbox/blob/2a4ac2b6a892238263008efa6a5f3923d9a83505/mdf_toolbox/search_helper.py#L244-L271 | train |
materials-data-facility/toolbox | mdf_toolbox/search_helper.py | SearchHelper._and_join | def _and_join(self, close_group=False):
"""Combine terms with AND.
There must be a term added before using this method.
Arguments:
close_group (bool): If ``True``, will end the current group and start a new one.
If ``False``, will continue current group.
Example::
If the current query is "(term1"
.and(close_group=True) => "(term1) AND ("
.and(close_group=False) => "(term1 AND "
Returns:
SearchHelper: Self
"""
if not self.initialized:
raise ValueError("You must add a search term before adding an operator.")
else:
self._operator("AND", close_group=close_group)
return self | python | def _and_join(self, close_group=False):
"""Combine terms with AND.
There must be a term added before using this method.
Arguments:
close_group (bool): If ``True``, will end the current group and start a new one.
If ``False``, will continue current group.
Example::
If the current query is "(term1"
.and(close_group=True) => "(term1) AND ("
.and(close_group=False) => "(term1 AND "
Returns:
SearchHelper: Self
"""
if not self.initialized:
raise ValueError("You must add a search term before adding an operator.")
else:
self._operator("AND", close_group=close_group)
return self | [
"def",
"_and_join",
"(",
"self",
",",
"close_group",
"=",
"False",
")",
":",
"if",
"not",
"self",
".",
"initialized",
":",
"raise",
"ValueError",
"(",
"\"You must add a search term before adding an operator.\"",
")",
"else",
":",
"self",
".",
"_operator",
"(",
"\"AND\"",
",",
"close_group",
"=",
"close_group",
")",
"return",
"self"
] | Combine terms with AND.
There must be a term added before using this method.
Arguments:
close_group (bool): If ``True``, will end the current group and start a new one.
If ``False``, will continue current group.
Example::
If the current query is "(term1"
.and(close_group=True) => "(term1) AND ("
.and(close_group=False) => "(term1 AND "
Returns:
SearchHelper: Self | [
"Combine",
"terms",
"with",
"AND",
".",
"There",
"must",
"be",
"a",
"term",
"added",
"before",
"using",
"this",
"method",
"."
] | 2a4ac2b6a892238263008efa6a5f3923d9a83505 | https://github.com/materials-data-facility/toolbox/blob/2a4ac2b6a892238263008efa6a5f3923d9a83505/mdf_toolbox/search_helper.py#L273-L294 | train |
materials-data-facility/toolbox | mdf_toolbox/search_helper.py | SearchHelper._or_join | def _or_join(self, close_group=False):
"""Combine terms with OR.
There must be a term added before using this method.
Arguments:
close_group (bool): If ``True``, will end the current group and start a new one.
If ``False``, will continue current group.
Example:
If the current query is "(term1"
.or(close_group=True) => "(term1) OR("
.or(close_group=False) => "(term1 OR "
Returns:
SearchHelper: Self
"""
if not self.initialized:
raise ValueError("You must add a search term before adding an operator.")
else:
self._operator("OR", close_group=close_group)
return self | python | def _or_join(self, close_group=False):
"""Combine terms with OR.
There must be a term added before using this method.
Arguments:
close_group (bool): If ``True``, will end the current group and start a new one.
If ``False``, will continue current group.
Example:
If the current query is "(term1"
.or(close_group=True) => "(term1) OR("
.or(close_group=False) => "(term1 OR "
Returns:
SearchHelper: Self
"""
if not self.initialized:
raise ValueError("You must add a search term before adding an operator.")
else:
self._operator("OR", close_group=close_group)
return self | [
"def",
"_or_join",
"(",
"self",
",",
"close_group",
"=",
"False",
")",
":",
"if",
"not",
"self",
".",
"initialized",
":",
"raise",
"ValueError",
"(",
"\"You must add a search term before adding an operator.\"",
")",
"else",
":",
"self",
".",
"_operator",
"(",
"\"OR\"",
",",
"close_group",
"=",
"close_group",
")",
"return",
"self"
] | Combine terms with OR.
There must be a term added before using this method.
Arguments:
close_group (bool): If ``True``, will end the current group and start a new one.
If ``False``, will continue current group.
Example:
If the current query is "(term1"
.or(close_group=True) => "(term1) OR("
.or(close_group=False) => "(term1 OR "
Returns:
SearchHelper: Self | [
"Combine",
"terms",
"with",
"OR",
".",
"There",
"must",
"be",
"a",
"term",
"added",
"before",
"using",
"this",
"method",
"."
] | 2a4ac2b6a892238263008efa6a5f3923d9a83505 | https://github.com/materials-data-facility/toolbox/blob/2a4ac2b6a892238263008efa6a5f3923d9a83505/mdf_toolbox/search_helper.py#L296-L317 | train |
materials-data-facility/toolbox | mdf_toolbox/search_helper.py | SearchHelper._mapping | def _mapping(self):
"""Fetch the entire mapping for the specified index.
Returns:
dict: The full mapping for the index.
"""
return (self.__search_client.get(
"/unstable/index/{}/mapping".format(mdf_toolbox.translate_index(self.index)))
["mappings"]) | python | def _mapping(self):
"""Fetch the entire mapping for the specified index.
Returns:
dict: The full mapping for the index.
"""
return (self.__search_client.get(
"/unstable/index/{}/mapping".format(mdf_toolbox.translate_index(self.index)))
["mappings"]) | [
"def",
"_mapping",
"(",
"self",
")",
":",
"return",
"(",
"self",
".",
"__search_client",
".",
"get",
"(",
"\"/unstable/index/{}/mapping\"",
".",
"format",
"(",
"mdf_toolbox",
".",
"translate_index",
"(",
"self",
".",
"index",
")",
")",
")",
"[",
"\"mappings\"",
"]",
")"
] | Fetch the entire mapping for the specified index.
Returns:
dict: The full mapping for the index. | [
"Fetch",
"the",
"entire",
"mapping",
"for",
"the",
"specified",
"index",
"."
] | 2a4ac2b6a892238263008efa6a5f3923d9a83505 | https://github.com/materials-data-facility/toolbox/blob/2a4ac2b6a892238263008efa6a5f3923d9a83505/mdf_toolbox/search_helper.py#L418-L426 | train |
materials-data-facility/toolbox | mdf_toolbox/search_helper.py | SearchHelper.match_term | def match_term(self, value, required=True, new_group=False):
"""Add a fulltext search term to the query.
Warning:
Do not use this method with any other query-building helpers. This method
is only for building fulltext queries (in non-advanced mode). Using other
helpers, such as ``match_field()``, will cause the query to run in advanced mode.
If a fulltext term query is run in advanced mode, it will have unexpected
results.
Arguments:
value (str): The term to match.
required (bool): If ``True``, will add term with ``AND``.
If ``False``, will use ``OR``. **Default:** ``True``.
new_group (bool): If ``True``, will separate the term into a new parenthetical group.
If ``False``, will not.
**Default:** ``False``.
Returns:
SearchHelper: Self
"""
# If not the start of the query string, add an AND or OR
if self.initialized:
if required:
self._and_join(new_group)
else:
self._or_join(new_group)
self._term(value)
return self | python | def match_term(self, value, required=True, new_group=False):
"""Add a fulltext search term to the query.
Warning:
Do not use this method with any other query-building helpers. This method
is only for building fulltext queries (in non-advanced mode). Using other
helpers, such as ``match_field()``, will cause the query to run in advanced mode.
If a fulltext term query is run in advanced mode, it will have unexpected
results.
Arguments:
value (str): The term to match.
required (bool): If ``True``, will add term with ``AND``.
If ``False``, will use ``OR``. **Default:** ``True``.
new_group (bool): If ``True``, will separate the term into a new parenthetical group.
If ``False``, will not.
**Default:** ``False``.
Returns:
SearchHelper: Self
"""
# If not the start of the query string, add an AND or OR
if self.initialized:
if required:
self._and_join(new_group)
else:
self._or_join(new_group)
self._term(value)
return self | [
"def",
"match_term",
"(",
"self",
",",
"value",
",",
"required",
"=",
"True",
",",
"new_group",
"=",
"False",
")",
":",
"# If not the start of the query string, add an AND or OR",
"if",
"self",
".",
"initialized",
":",
"if",
"required",
":",
"self",
".",
"_and_join",
"(",
"new_group",
")",
"else",
":",
"self",
".",
"_or_join",
"(",
"new_group",
")",
"self",
".",
"_term",
"(",
"value",
")",
"return",
"self"
] | Add a fulltext search term to the query.
Warning:
Do not use this method with any other query-building helpers. This method
is only for building fulltext queries (in non-advanced mode). Using other
helpers, such as ``match_field()``, will cause the query to run in advanced mode.
If a fulltext term query is run in advanced mode, it will have unexpected
results.
Arguments:
value (str): The term to match.
required (bool): If ``True``, will add term with ``AND``.
If ``False``, will use ``OR``. **Default:** ``True``.
new_group (bool): If ``True``, will separate the term into a new parenthetical group.
If ``False``, will not.
**Default:** ``False``.
Returns:
SearchHelper: Self | [
"Add",
"a",
"fulltext",
"search",
"term",
"to",
"the",
"query",
"."
] | 2a4ac2b6a892238263008efa6a5f3923d9a83505 | https://github.com/materials-data-facility/toolbox/blob/2a4ac2b6a892238263008efa6a5f3923d9a83505/mdf_toolbox/search_helper.py#L435-L463 | train |
materials-data-facility/toolbox | mdf_toolbox/search_helper.py | SearchHelper.match_exists | def match_exists(self, field, required=True, new_group=False):
"""Require a field to exist in the results.
Matches will have some value in ``field``.
Arguments:
field (str): The field to check.
The field must be namespaced according to Elasticsearch rules
using the dot syntax.
For example, ``"mdf.source_name"`` is the ``source_name`` field
of the ``mdf`` dictionary.
required (bool): If ``True``, will add term with ``AND``.
If ``False``, will use ``OR``. **Default:** ``True``.
new_group (bool): If ``True``, will separate the term into a new parenthetical group.
If ``False``, will not.
**Default:** ``False``.
Returns:
SearchHelper: Self
"""
return self.match_field(field, "*", required=required, new_group=new_group) | python | def match_exists(self, field, required=True, new_group=False):
"""Require a field to exist in the results.
Matches will have some value in ``field``.
Arguments:
field (str): The field to check.
The field must be namespaced according to Elasticsearch rules
using the dot syntax.
For example, ``"mdf.source_name"`` is the ``source_name`` field
of the ``mdf`` dictionary.
required (bool): If ``True``, will add term with ``AND``.
If ``False``, will use ``OR``. **Default:** ``True``.
new_group (bool): If ``True``, will separate the term into a new parenthetical group.
If ``False``, will not.
**Default:** ``False``.
Returns:
SearchHelper: Self
"""
return self.match_field(field, "*", required=required, new_group=new_group) | [
"def",
"match_exists",
"(",
"self",
",",
"field",
",",
"required",
"=",
"True",
",",
"new_group",
"=",
"False",
")",
":",
"return",
"self",
".",
"match_field",
"(",
"field",
",",
"\"*\"",
",",
"required",
"=",
"required",
",",
"new_group",
"=",
"new_group",
")"
] | Require a field to exist in the results.
Matches will have some value in ``field``.
Arguments:
field (str): The field to check.
The field must be namespaced according to Elasticsearch rules
using the dot syntax.
For example, ``"mdf.source_name"`` is the ``source_name`` field
of the ``mdf`` dictionary.
required (bool): If ``True``, will add term with ``AND``.
If ``False``, will use ``OR``. **Default:** ``True``.
new_group (bool): If ``True``, will separate the term into a new parenthetical group.
If ``False``, will not.
**Default:** ``False``.
Returns:
SearchHelper: Self | [
"Require",
"a",
"field",
"to",
"exist",
"in",
"the",
"results",
".",
"Matches",
"will",
"have",
"some",
"value",
"in",
"field",
"."
] | 2a4ac2b6a892238263008efa6a5f3923d9a83505 | https://github.com/materials-data-facility/toolbox/blob/2a4ac2b6a892238263008efa6a5f3923d9a83505/mdf_toolbox/search_helper.py#L522-L541 | train |
materials-data-facility/toolbox | mdf_toolbox/search_helper.py | SearchHelper.match_not_exists | def match_not_exists(self, field, new_group=False):
"""Require a field to not exist in the results.
Matches will not have ``field`` present.
Arguments:
field (str): The field to check.
The field must be namespaced according to Elasticsearch rules
using the dot syntax.
For example, ``"mdf.source_name"`` is the ``source_name`` field
of the ``mdf`` dictionary.
new_group (bool): If ``True``, will separate the term into a new parenthetical group.
If ``False``, will not.
**Default:** ``False``.
Returns:
SearchHelper: Self
"""
return self.exclude_field(field, "*", new_group=new_group) | python | def match_not_exists(self, field, new_group=False):
"""Require a field to not exist in the results.
Matches will not have ``field`` present.
Arguments:
field (str): The field to check.
The field must be namespaced according to Elasticsearch rules
using the dot syntax.
For example, ``"mdf.source_name"`` is the ``source_name`` field
of the ``mdf`` dictionary.
new_group (bool): If ``True``, will separate the term into a new parenthetical group.
If ``False``, will not.
**Default:** ``False``.
Returns:
SearchHelper: Self
"""
return self.exclude_field(field, "*", new_group=new_group) | [
"def",
"match_not_exists",
"(",
"self",
",",
"field",
",",
"new_group",
"=",
"False",
")",
":",
"return",
"self",
".",
"exclude_field",
"(",
"field",
",",
"\"*\"",
",",
"new_group",
"=",
"new_group",
")"
] | Require a field to not exist in the results.
Matches will not have ``field`` present.
Arguments:
field (str): The field to check.
The field must be namespaced according to Elasticsearch rules
using the dot syntax.
For example, ``"mdf.source_name"`` is the ``source_name`` field
of the ``mdf`` dictionary.
new_group (bool): If ``True``, will separate the term into a new parenthetical group.
If ``False``, will not.
**Default:** ``False``.
Returns:
SearchHelper: Self | [
"Require",
"a",
"field",
"to",
"not",
"exist",
"in",
"the",
"results",
".",
"Matches",
"will",
"not",
"have",
"field",
"present",
"."
] | 2a4ac2b6a892238263008efa6a5f3923d9a83505 | https://github.com/materials-data-facility/toolbox/blob/2a4ac2b6a892238263008efa6a5f3923d9a83505/mdf_toolbox/search_helper.py#L543-L560 | train |
materials-data-facility/toolbox | mdf_toolbox/search_helper.py | SearchHelper.show_fields | def show_fields(self, block=None):
"""Retrieve and return the mapping for the given metadata block.
Arguments:
block (str): The top-level field to fetch the mapping for (for example, ``"mdf"``),
or the special values ``None`` for everything or ``"top"`` for just the
top-level fields.
**Default:** ``None``.
index (str): The Search index to map. **Default:** The current index.
Returns:
dict: ``field:datatype`` pairs.
"""
mapping = self._mapping()
if block is None:
return mapping
elif block == "top":
blocks = set()
for key in mapping.keys():
blocks.add(key.split(".")[0])
block_map = {}
for b in blocks:
block_map[b] = "object"
else:
block_map = {}
for key, value in mapping.items():
if key.startswith(block):
block_map[key] = value
return block_map | python | def show_fields(self, block=None):
"""Retrieve and return the mapping for the given metadata block.
Arguments:
block (str): The top-level field to fetch the mapping for (for example, ``"mdf"``),
or the special values ``None`` for everything or ``"top"`` for just the
top-level fields.
**Default:** ``None``.
index (str): The Search index to map. **Default:** The current index.
Returns:
dict: ``field:datatype`` pairs.
"""
mapping = self._mapping()
if block is None:
return mapping
elif block == "top":
blocks = set()
for key in mapping.keys():
blocks.add(key.split(".")[0])
block_map = {}
for b in blocks:
block_map[b] = "object"
else:
block_map = {}
for key, value in mapping.items():
if key.startswith(block):
block_map[key] = value
return block_map | [
"def",
"show_fields",
"(",
"self",
",",
"block",
"=",
"None",
")",
":",
"mapping",
"=",
"self",
".",
"_mapping",
"(",
")",
"if",
"block",
"is",
"None",
":",
"return",
"mapping",
"elif",
"block",
"==",
"\"top\"",
":",
"blocks",
"=",
"set",
"(",
")",
"for",
"key",
"in",
"mapping",
".",
"keys",
"(",
")",
":",
"blocks",
".",
"add",
"(",
"key",
".",
"split",
"(",
"\".\"",
")",
"[",
"0",
"]",
")",
"block_map",
"=",
"{",
"}",
"for",
"b",
"in",
"blocks",
":",
"block_map",
"[",
"b",
"]",
"=",
"\"object\"",
"else",
":",
"block_map",
"=",
"{",
"}",
"for",
"key",
",",
"value",
"in",
"mapping",
".",
"items",
"(",
")",
":",
"if",
"key",
".",
"startswith",
"(",
"block",
")",
":",
"block_map",
"[",
"key",
"]",
"=",
"value",
"return",
"block_map"
] | Retrieve and return the mapping for the given metadata block.
Arguments:
block (str): The top-level field to fetch the mapping for (for example, ``"mdf"``),
or the special values ``None`` for everything or ``"top"`` for just the
top-level fields.
**Default:** ``None``.
index (str): The Search index to map. **Default:** The current index.
Returns:
dict: ``field:datatype`` pairs. | [
"Retrieve",
"and",
"return",
"the",
"mapping",
"for",
"the",
"given",
"metadata",
"block",
"."
] | 2a4ac2b6a892238263008efa6a5f3923d9a83505 | https://github.com/materials-data-facility/toolbox/blob/2a4ac2b6a892238263008efa6a5f3923d9a83505/mdf_toolbox/search_helper.py#L764-L792 | train |
tamasgal/km3pipe | km3pipe/dataclasses.py | inflate_dtype | def inflate_dtype(arr, names):
"""Create structured dtype from a 2d ndarray with unstructured dtype."""
arr = np.asanyarray(arr)
if has_structured_dt(arr):
return arr.dtype
s_dt = arr.dtype
dt = [(n, s_dt) for n in names]
dt = np.dtype(dt)
return dt | python | def inflate_dtype(arr, names):
"""Create structured dtype from a 2d ndarray with unstructured dtype."""
arr = np.asanyarray(arr)
if has_structured_dt(arr):
return arr.dtype
s_dt = arr.dtype
dt = [(n, s_dt) for n in names]
dt = np.dtype(dt)
return dt | [
"def",
"inflate_dtype",
"(",
"arr",
",",
"names",
")",
":",
"arr",
"=",
"np",
".",
"asanyarray",
"(",
"arr",
")",
"if",
"has_structured_dt",
"(",
"arr",
")",
":",
"return",
"arr",
".",
"dtype",
"s_dt",
"=",
"arr",
".",
"dtype",
"dt",
"=",
"[",
"(",
"n",
",",
"s_dt",
")",
"for",
"n",
"in",
"names",
"]",
"dt",
"=",
"np",
".",
"dtype",
"(",
"dt",
")",
"return",
"dt"
] | Create structured dtype from a 2d ndarray with unstructured dtype. | [
"Create",
"structured",
"dtype",
"from",
"a",
"2d",
"ndarray",
"with",
"unstructured",
"dtype",
"."
] | 7a9b59ac899a28775b5bdc5d391d9a5340d08040 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/dataclasses.py#L48-L56 | train |
tamasgal/km3pipe | km3pipe/dataclasses.py | Table.from_dict | def from_dict(cls, arr_dict, dtype=None, fillna=False, **kwargs):
"""Generate a table from a dictionary of arrays.
"""
# i hope order of keys == order or values
if dtype is None:
names = sorted(list(arr_dict.keys()))
else:
dtype = np.dtype(dtype)
dt_names = [f for f in dtype.names]
dict_names = [k for k in arr_dict.keys()]
missing_names = set(dt_names) - set(dict_names)
if missing_names:
if fillna:
dict_names = dt_names
for missing_name in missing_names:
arr_dict[missing_name] = np.nan
else:
raise KeyError(
'Dictionary keys and dtype fields do not match!'
)
names = list(dtype.names)
arr_dict = cls._expand_scalars(arr_dict)
data = [arr_dict[key] for key in names]
return cls(np.rec.fromarrays(data, names=names, dtype=dtype), **kwargs) | python | def from_dict(cls, arr_dict, dtype=None, fillna=False, **kwargs):
"""Generate a table from a dictionary of arrays.
"""
# i hope order of keys == order or values
if dtype is None:
names = sorted(list(arr_dict.keys()))
else:
dtype = np.dtype(dtype)
dt_names = [f for f in dtype.names]
dict_names = [k for k in arr_dict.keys()]
missing_names = set(dt_names) - set(dict_names)
if missing_names:
if fillna:
dict_names = dt_names
for missing_name in missing_names:
arr_dict[missing_name] = np.nan
else:
raise KeyError(
'Dictionary keys and dtype fields do not match!'
)
names = list(dtype.names)
arr_dict = cls._expand_scalars(arr_dict)
data = [arr_dict[key] for key in names]
return cls(np.rec.fromarrays(data, names=names, dtype=dtype), **kwargs) | [
"def",
"from_dict",
"(",
"cls",
",",
"arr_dict",
",",
"dtype",
"=",
"None",
",",
"fillna",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
":",
"# i hope order of keys == order or values",
"if",
"dtype",
"is",
"None",
":",
"names",
"=",
"sorted",
"(",
"list",
"(",
"arr_dict",
".",
"keys",
"(",
")",
")",
")",
"else",
":",
"dtype",
"=",
"np",
".",
"dtype",
"(",
"dtype",
")",
"dt_names",
"=",
"[",
"f",
"for",
"f",
"in",
"dtype",
".",
"names",
"]",
"dict_names",
"=",
"[",
"k",
"for",
"k",
"in",
"arr_dict",
".",
"keys",
"(",
")",
"]",
"missing_names",
"=",
"set",
"(",
"dt_names",
")",
"-",
"set",
"(",
"dict_names",
")",
"if",
"missing_names",
":",
"if",
"fillna",
":",
"dict_names",
"=",
"dt_names",
"for",
"missing_name",
"in",
"missing_names",
":",
"arr_dict",
"[",
"missing_name",
"]",
"=",
"np",
".",
"nan",
"else",
":",
"raise",
"KeyError",
"(",
"'Dictionary keys and dtype fields do not match!'",
")",
"names",
"=",
"list",
"(",
"dtype",
".",
"names",
")",
"arr_dict",
"=",
"cls",
".",
"_expand_scalars",
"(",
"arr_dict",
")",
"data",
"=",
"[",
"arr_dict",
"[",
"key",
"]",
"for",
"key",
"in",
"names",
"]",
"return",
"cls",
"(",
"np",
".",
"rec",
".",
"fromarrays",
"(",
"data",
",",
"names",
"=",
"names",
",",
"dtype",
"=",
"dtype",
")",
",",
"*",
"*",
"kwargs",
")"
] | Generate a table from a dictionary of arrays. | [
"Generate",
"a",
"table",
"from",
"a",
"dictionary",
"of",
"arrays",
"."
] | 7a9b59ac899a28775b5bdc5d391d9a5340d08040 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/dataclasses.py#L246-L270 | train |
tamasgal/km3pipe | km3pipe/dataclasses.py | Table.from_template | def from_template(cls, data, template):
"""Create a table from a predefined datatype.
See the ``templates_avail`` property for available names.
Parameters
----------
data
Data in a format that the ``__init__`` understands.
template: str or dict
Name of the dtype template to use from ``kp.dataclasses_templates``
or a ``dict`` containing the required attributes (see the other
templates for reference).
"""
name = DEFAULT_NAME
if isinstance(template, str):
name = template
table_info = TEMPLATES[name]
else:
table_info = template
if 'name' in table_info:
name = table_info['name']
dt = table_info['dtype']
loc = table_info['h5loc']
split = table_info['split_h5']
h5singleton = table_info['h5singleton']
return cls(
data,
h5loc=loc,
dtype=dt,
split_h5=split,
name=name,
h5singleton=h5singleton
) | python | def from_template(cls, data, template):
"""Create a table from a predefined datatype.
See the ``templates_avail`` property for available names.
Parameters
----------
data
Data in a format that the ``__init__`` understands.
template: str or dict
Name of the dtype template to use from ``kp.dataclasses_templates``
or a ``dict`` containing the required attributes (see the other
templates for reference).
"""
name = DEFAULT_NAME
if isinstance(template, str):
name = template
table_info = TEMPLATES[name]
else:
table_info = template
if 'name' in table_info:
name = table_info['name']
dt = table_info['dtype']
loc = table_info['h5loc']
split = table_info['split_h5']
h5singleton = table_info['h5singleton']
return cls(
data,
h5loc=loc,
dtype=dt,
split_h5=split,
name=name,
h5singleton=h5singleton
) | [
"def",
"from_template",
"(",
"cls",
",",
"data",
",",
"template",
")",
":",
"name",
"=",
"DEFAULT_NAME",
"if",
"isinstance",
"(",
"template",
",",
"str",
")",
":",
"name",
"=",
"template",
"table_info",
"=",
"TEMPLATES",
"[",
"name",
"]",
"else",
":",
"table_info",
"=",
"template",
"if",
"'name'",
"in",
"table_info",
":",
"name",
"=",
"table_info",
"[",
"'name'",
"]",
"dt",
"=",
"table_info",
"[",
"'dtype'",
"]",
"loc",
"=",
"table_info",
"[",
"'h5loc'",
"]",
"split",
"=",
"table_info",
"[",
"'split_h5'",
"]",
"h5singleton",
"=",
"table_info",
"[",
"'h5singleton'",
"]",
"return",
"cls",
"(",
"data",
",",
"h5loc",
"=",
"loc",
",",
"dtype",
"=",
"dt",
",",
"split_h5",
"=",
"split",
",",
"name",
"=",
"name",
",",
"h5singleton",
"=",
"h5singleton",
")"
] | Create a table from a predefined datatype.
See the ``templates_avail`` property for available names.
Parameters
----------
data
Data in a format that the ``__init__`` understands.
template: str or dict
Name of the dtype template to use from ``kp.dataclasses_templates``
or a ``dict`` containing the required attributes (see the other
templates for reference). | [
"Create",
"a",
"table",
"from",
"a",
"predefined",
"datatype",
"."
] | 7a9b59ac899a28775b5bdc5d391d9a5340d08040 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/dataclasses.py#L314-L348 | train |
tamasgal/km3pipe | km3pipe/dataclasses.py | Table.append_columns | def append_columns(self, colnames, values, **kwargs):
"""Append new columns to the table.
When appending a single column, ``values`` can be a scalar or an
array of either length 1 or the same length as this array (the one
it's appended to). In case of multiple columns, values must have
the shape ``list(arrays)``, and the dimension of each array
has to match the length of this array.
See the docs for ``numpy.lib.recfunctions.append_fields`` for an
explanation of the remaining options.
"""
n = len(self)
if np.isscalar(values):
values = np.full(n, values)
values = np.atleast_1d(values)
if not isinstance(colnames, str) and len(colnames) > 1:
values = np.atleast_2d(values)
self._check_column_length(values, n)
if values.ndim == 1:
if len(values) > n:
raise ValueError("New Column is longer than existing table!")
elif len(values) > 1 and len(values) < n:
raise ValueError(
"New Column is shorter than existing table, "
"but not just one element!"
)
elif len(values) == 1:
values = np.full(n, values[0])
new_arr = rfn.append_fields(
self, colnames, values, usemask=False, asrecarray=True, **kwargs
)
return self.__class__(
new_arr,
h5loc=self.h5loc,
split_h5=self.split_h5,
name=self.name,
h5singleton=self.h5singleton
) | python | def append_columns(self, colnames, values, **kwargs):
"""Append new columns to the table.
When appending a single column, ``values`` can be a scalar or an
array of either length 1 or the same length as this array (the one
it's appended to). In case of multiple columns, values must have
the shape ``list(arrays)``, and the dimension of each array
has to match the length of this array.
See the docs for ``numpy.lib.recfunctions.append_fields`` for an
explanation of the remaining options.
"""
n = len(self)
if np.isscalar(values):
values = np.full(n, values)
values = np.atleast_1d(values)
if not isinstance(colnames, str) and len(colnames) > 1:
values = np.atleast_2d(values)
self._check_column_length(values, n)
if values.ndim == 1:
if len(values) > n:
raise ValueError("New Column is longer than existing table!")
elif len(values) > 1 and len(values) < n:
raise ValueError(
"New Column is shorter than existing table, "
"but not just one element!"
)
elif len(values) == 1:
values = np.full(n, values[0])
new_arr = rfn.append_fields(
self, colnames, values, usemask=False, asrecarray=True, **kwargs
)
return self.__class__(
new_arr,
h5loc=self.h5loc,
split_h5=self.split_h5,
name=self.name,
h5singleton=self.h5singleton
) | [
"def",
"append_columns",
"(",
"self",
",",
"colnames",
",",
"values",
",",
"*",
"*",
"kwargs",
")",
":",
"n",
"=",
"len",
"(",
"self",
")",
"if",
"np",
".",
"isscalar",
"(",
"values",
")",
":",
"values",
"=",
"np",
".",
"full",
"(",
"n",
",",
"values",
")",
"values",
"=",
"np",
".",
"atleast_1d",
"(",
"values",
")",
"if",
"not",
"isinstance",
"(",
"colnames",
",",
"str",
")",
"and",
"len",
"(",
"colnames",
")",
">",
"1",
":",
"values",
"=",
"np",
".",
"atleast_2d",
"(",
"values",
")",
"self",
".",
"_check_column_length",
"(",
"values",
",",
"n",
")",
"if",
"values",
".",
"ndim",
"==",
"1",
":",
"if",
"len",
"(",
"values",
")",
">",
"n",
":",
"raise",
"ValueError",
"(",
"\"New Column is longer than existing table!\"",
")",
"elif",
"len",
"(",
"values",
")",
">",
"1",
"and",
"len",
"(",
"values",
")",
"<",
"n",
":",
"raise",
"ValueError",
"(",
"\"New Column is shorter than existing table, \"",
"\"but not just one element!\"",
")",
"elif",
"len",
"(",
"values",
")",
"==",
"1",
":",
"values",
"=",
"np",
".",
"full",
"(",
"n",
",",
"values",
"[",
"0",
"]",
")",
"new_arr",
"=",
"rfn",
".",
"append_fields",
"(",
"self",
",",
"colnames",
",",
"values",
",",
"usemask",
"=",
"False",
",",
"asrecarray",
"=",
"True",
",",
"*",
"*",
"kwargs",
")",
"return",
"self",
".",
"__class__",
"(",
"new_arr",
",",
"h5loc",
"=",
"self",
".",
"h5loc",
",",
"split_h5",
"=",
"self",
".",
"split_h5",
",",
"name",
"=",
"self",
".",
"name",
",",
"h5singleton",
"=",
"self",
".",
"h5singleton",
")"
] | Append new columns to the table.
When appending a single column, ``values`` can be a scalar or an
array of either length 1 or the same length as this array (the one
it's appended to). In case of multiple columns, values must have
the shape ``list(arrays)``, and the dimension of each array
has to match the length of this array.
See the docs for ``numpy.lib.recfunctions.append_fields`` for an
explanation of the remaining options. | [
"Append",
"new",
"columns",
"to",
"the",
"table",
"."
] | 7a9b59ac899a28775b5bdc5d391d9a5340d08040 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/dataclasses.py#L362-L402 | train |
tamasgal/km3pipe | km3pipe/dataclasses.py | Table.drop_columns | def drop_columns(self, colnames, **kwargs):
"""Drop columns from the table.
See the docs for ``numpy.lib.recfunctions.drop_fields`` for an
explanation of the remaining options.
"""
new_arr = rfn.drop_fields(
self, colnames, usemask=False, asrecarray=True, **kwargs
)
return self.__class__(
new_arr,
h5loc=self.h5loc,
split_h5=self.split_h5,
name=self.name,
h5singleton=self.h5singleton
) | python | def drop_columns(self, colnames, **kwargs):
"""Drop columns from the table.
See the docs for ``numpy.lib.recfunctions.drop_fields`` for an
explanation of the remaining options.
"""
new_arr = rfn.drop_fields(
self, colnames, usemask=False, asrecarray=True, **kwargs
)
return self.__class__(
new_arr,
h5loc=self.h5loc,
split_h5=self.split_h5,
name=self.name,
h5singleton=self.h5singleton
) | [
"def",
"drop_columns",
"(",
"self",
",",
"colnames",
",",
"*",
"*",
"kwargs",
")",
":",
"new_arr",
"=",
"rfn",
".",
"drop_fields",
"(",
"self",
",",
"colnames",
",",
"usemask",
"=",
"False",
",",
"asrecarray",
"=",
"True",
",",
"*",
"*",
"kwargs",
")",
"return",
"self",
".",
"__class__",
"(",
"new_arr",
",",
"h5loc",
"=",
"self",
".",
"h5loc",
",",
"split_h5",
"=",
"self",
".",
"split_h5",
",",
"name",
"=",
"self",
".",
"name",
",",
"h5singleton",
"=",
"self",
".",
"h5singleton",
")"
] | Drop columns from the table.
See the docs for ``numpy.lib.recfunctions.drop_fields`` for an
explanation of the remaining options. | [
"Drop",
"columns",
"from",
"the",
"table",
"."
] | 7a9b59ac899a28775b5bdc5d391d9a5340d08040 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/dataclasses.py#L404-L419 | train |
tamasgal/km3pipe | km3pipe/dataclasses.py | Table.sorted | def sorted(self, by, **kwargs):
"""Sort array by a column.
Parameters
==========
by: str
Name of the columns to sort by(e.g. 'time').
"""
sort_idc = np.argsort(self[by], **kwargs)
return self.__class__(
self[sort_idc],
h5loc=self.h5loc,
split_h5=self.split_h5,
name=self.name
) | python | def sorted(self, by, **kwargs):
"""Sort array by a column.
Parameters
==========
by: str
Name of the columns to sort by(e.g. 'time').
"""
sort_idc = np.argsort(self[by], **kwargs)
return self.__class__(
self[sort_idc],
h5loc=self.h5loc,
split_h5=self.split_h5,
name=self.name
) | [
"def",
"sorted",
"(",
"self",
",",
"by",
",",
"*",
"*",
"kwargs",
")",
":",
"sort_idc",
"=",
"np",
".",
"argsort",
"(",
"self",
"[",
"by",
"]",
",",
"*",
"*",
"kwargs",
")",
"return",
"self",
".",
"__class__",
"(",
"self",
"[",
"sort_idc",
"]",
",",
"h5loc",
"=",
"self",
".",
"h5loc",
",",
"split_h5",
"=",
"self",
".",
"split_h5",
",",
"name",
"=",
"self",
".",
"name",
")"
] | Sort array by a column.
Parameters
==========
by: str
Name of the columns to sort by(e.g. 'time'). | [
"Sort",
"array",
"by",
"a",
"column",
"."
] | 7a9b59ac899a28775b5bdc5d391d9a5340d08040 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/dataclasses.py#L421-L435 | train |
tamasgal/km3pipe | km3pipe/dataclasses.py | Table.merge | def merge(cls, tables, fillna=False):
"""Merge a list of tables"""
cols = set(itertools.chain(*[table.dtype.descr for table in tables]))
tables_to_merge = []
for table in tables:
missing_cols = cols - set(table.dtype.descr)
if missing_cols:
if fillna:
n = len(table)
n_cols = len(missing_cols)
col_names = []
for col_name, col_dtype in missing_cols:
if 'f' not in col_dtype:
raise ValueError(
"Cannot create NaNs for non-float"
" type column '{}'".format(col_name)
)
col_names.append(col_name)
table = table.append_columns(
col_names, np.full((n_cols, n), np.nan)
)
else:
raise ValueError(
"Table columns do not match. Use fill_na=True"
" if you want to append missing values with NaNs"
)
tables_to_merge.append(table)
first_table = tables_to_merge[0]
merged_table = sum(tables_to_merge[1:], first_table)
merged_table.h5loc = first_table.h5loc
merged_table.h5singleton = first_table.h5singleton
merged_table.split_h5 = first_table.split_h5
merged_table.name = first_table.name
return merged_table | python | def merge(cls, tables, fillna=False):
"""Merge a list of tables"""
cols = set(itertools.chain(*[table.dtype.descr for table in tables]))
tables_to_merge = []
for table in tables:
missing_cols = cols - set(table.dtype.descr)
if missing_cols:
if fillna:
n = len(table)
n_cols = len(missing_cols)
col_names = []
for col_name, col_dtype in missing_cols:
if 'f' not in col_dtype:
raise ValueError(
"Cannot create NaNs for non-float"
" type column '{}'".format(col_name)
)
col_names.append(col_name)
table = table.append_columns(
col_names, np.full((n_cols, n), np.nan)
)
else:
raise ValueError(
"Table columns do not match. Use fill_na=True"
" if you want to append missing values with NaNs"
)
tables_to_merge.append(table)
first_table = tables_to_merge[0]
merged_table = sum(tables_to_merge[1:], first_table)
merged_table.h5loc = first_table.h5loc
merged_table.h5singleton = first_table.h5singleton
merged_table.split_h5 = first_table.split_h5
merged_table.name = first_table.name
return merged_table | [
"def",
"merge",
"(",
"cls",
",",
"tables",
",",
"fillna",
"=",
"False",
")",
":",
"cols",
"=",
"set",
"(",
"itertools",
".",
"chain",
"(",
"*",
"[",
"table",
".",
"dtype",
".",
"descr",
"for",
"table",
"in",
"tables",
"]",
")",
")",
"tables_to_merge",
"=",
"[",
"]",
"for",
"table",
"in",
"tables",
":",
"missing_cols",
"=",
"cols",
"-",
"set",
"(",
"table",
".",
"dtype",
".",
"descr",
")",
"if",
"missing_cols",
":",
"if",
"fillna",
":",
"n",
"=",
"len",
"(",
"table",
")",
"n_cols",
"=",
"len",
"(",
"missing_cols",
")",
"col_names",
"=",
"[",
"]",
"for",
"col_name",
",",
"col_dtype",
"in",
"missing_cols",
":",
"if",
"'f'",
"not",
"in",
"col_dtype",
":",
"raise",
"ValueError",
"(",
"\"Cannot create NaNs for non-float\"",
"\" type column '{}'\"",
".",
"format",
"(",
"col_name",
")",
")",
"col_names",
".",
"append",
"(",
"col_name",
")",
"table",
"=",
"table",
".",
"append_columns",
"(",
"col_names",
",",
"np",
".",
"full",
"(",
"(",
"n_cols",
",",
"n",
")",
",",
"np",
".",
"nan",
")",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"Table columns do not match. Use fill_na=True\"",
"\" if you want to append missing values with NaNs\"",
")",
"tables_to_merge",
".",
"append",
"(",
"table",
")",
"first_table",
"=",
"tables_to_merge",
"[",
"0",
"]",
"merged_table",
"=",
"sum",
"(",
"tables_to_merge",
"[",
"1",
":",
"]",
",",
"first_table",
")",
"merged_table",
".",
"h5loc",
"=",
"first_table",
".",
"h5loc",
"merged_table",
".",
"h5singleton",
"=",
"first_table",
".",
"h5singleton",
"merged_table",
".",
"split_h5",
"=",
"first_table",
".",
"split_h5",
"merged_table",
".",
"name",
"=",
"first_table",
".",
"name",
"return",
"merged_table"
] | Merge a list of tables | [
"Merge",
"a",
"list",
"of",
"tables"
] | 7a9b59ac899a28775b5bdc5d391d9a5340d08040 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/dataclasses.py#L447-L487 | train |
tamasgal/km3pipe | km3pipe/io/hdf5.py | create_index_tuple | def create_index_tuple(group_ids):
"""An helper function to create index tuples for fast lookup in HDF5Pump"""
max_group_id = np.max(group_ids)
start_idx_arr = np.full(max_group_id + 1, 0)
n_items_arr = np.full(max_group_id + 1, 0)
current_group_id = group_ids[0]
current_idx = 0
item_count = 0
for group_id in group_ids:
if group_id != current_group_id:
start_idx_arr[current_group_id] = current_idx
n_items_arr[current_group_id] = item_count
current_idx += item_count
item_count = 0
current_group_id = group_id
item_count += 1
else:
start_idx_arr[current_group_id] = current_idx
n_items_arr[current_group_id] = item_count
return (start_idx_arr, n_items_arr) | python | def create_index_tuple(group_ids):
"""An helper function to create index tuples for fast lookup in HDF5Pump"""
max_group_id = np.max(group_ids)
start_idx_arr = np.full(max_group_id + 1, 0)
n_items_arr = np.full(max_group_id + 1, 0)
current_group_id = group_ids[0]
current_idx = 0
item_count = 0
for group_id in group_ids:
if group_id != current_group_id:
start_idx_arr[current_group_id] = current_idx
n_items_arr[current_group_id] = item_count
current_idx += item_count
item_count = 0
current_group_id = group_id
item_count += 1
else:
start_idx_arr[current_group_id] = current_idx
n_items_arr[current_group_id] = item_count
return (start_idx_arr, n_items_arr) | [
"def",
"create_index_tuple",
"(",
"group_ids",
")",
":",
"max_group_id",
"=",
"np",
".",
"max",
"(",
"group_ids",
")",
"start_idx_arr",
"=",
"np",
".",
"full",
"(",
"max_group_id",
"+",
"1",
",",
"0",
")",
"n_items_arr",
"=",
"np",
".",
"full",
"(",
"max_group_id",
"+",
"1",
",",
"0",
")",
"current_group_id",
"=",
"group_ids",
"[",
"0",
"]",
"current_idx",
"=",
"0",
"item_count",
"=",
"0",
"for",
"group_id",
"in",
"group_ids",
":",
"if",
"group_id",
"!=",
"current_group_id",
":",
"start_idx_arr",
"[",
"current_group_id",
"]",
"=",
"current_idx",
"n_items_arr",
"[",
"current_group_id",
"]",
"=",
"item_count",
"current_idx",
"+=",
"item_count",
"item_count",
"=",
"0",
"current_group_id",
"=",
"group_id",
"item_count",
"+=",
"1",
"else",
":",
"start_idx_arr",
"[",
"current_group_id",
"]",
"=",
"current_idx",
"n_items_arr",
"[",
"current_group_id",
"]",
"=",
"item_count",
"return",
"(",
"start_idx_arr",
",",
"n_items_arr",
")"
] | An helper function to create index tuples for fast lookup in HDF5Pump | [
"An",
"helper",
"function",
"to",
"create",
"index",
"tuples",
"for",
"fast",
"lookup",
"in",
"HDF5Pump"
] | 7a9b59ac899a28775b5bdc5d391d9a5340d08040 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/io/hdf5.py#L892-L915 | train |
tamasgal/km3pipe | km3pipe/io/hdf5.py | HDF5Header._set_attributes | def _set_attributes(self):
"""Traverse the internal dictionary and set the getters"""
for parameter, data in self._data.items():
if isinstance(data, dict) or isinstance(data, OrderedDict):
field_names, field_values = zip(*data.items())
sorted_indices = np.argsort(field_names)
attr = namedtuple(
parameter, [field_names[i] for i in sorted_indices]
)
setattr(
self, parameter,
attr(*[field_values[i] for i in sorted_indices])
)
else:
setattr(self, parameter, data) | python | def _set_attributes(self):
"""Traverse the internal dictionary and set the getters"""
for parameter, data in self._data.items():
if isinstance(data, dict) or isinstance(data, OrderedDict):
field_names, field_values = zip(*data.items())
sorted_indices = np.argsort(field_names)
attr = namedtuple(
parameter, [field_names[i] for i in sorted_indices]
)
setattr(
self, parameter,
attr(*[field_values[i] for i in sorted_indices])
)
else:
setattr(self, parameter, data) | [
"def",
"_set_attributes",
"(",
"self",
")",
":",
"for",
"parameter",
",",
"data",
"in",
"self",
".",
"_data",
".",
"items",
"(",
")",
":",
"if",
"isinstance",
"(",
"data",
",",
"dict",
")",
"or",
"isinstance",
"(",
"data",
",",
"OrderedDict",
")",
":",
"field_names",
",",
"field_values",
"=",
"zip",
"(",
"*",
"data",
".",
"items",
"(",
")",
")",
"sorted_indices",
"=",
"np",
".",
"argsort",
"(",
"field_names",
")",
"attr",
"=",
"namedtuple",
"(",
"parameter",
",",
"[",
"field_names",
"[",
"i",
"]",
"for",
"i",
"in",
"sorted_indices",
"]",
")",
"setattr",
"(",
"self",
",",
"parameter",
",",
"attr",
"(",
"*",
"[",
"field_values",
"[",
"i",
"]",
"for",
"i",
"in",
"sorted_indices",
"]",
")",
")",
"else",
":",
"setattr",
"(",
"self",
",",
"parameter",
",",
"data",
")"
] | Traverse the internal dictionary and set the getters | [
"Traverse",
"the",
"internal",
"dictionary",
"and",
"set",
"the",
"getters"
] | 7a9b59ac899a28775b5bdc5d391d9a5340d08040 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/io/hdf5.py#L74-L88 | train |
tamasgal/km3pipe | km3pipe/io/hdf5.py | HDF5Sink._write_ndarrays_cache_to_disk | def _write_ndarrays_cache_to_disk(self):
"""Writes all the cached NDArrays to disk and empties the cache"""
for h5loc, arrs in self._ndarrays_cache.items():
title = arrs[0].title
chunkshape = (self.chunksize,) + arrs[0].shape[1:] if self.chunksize is not\
None else None
arr = NDArray(np.concatenate(arrs), h5loc=h5loc, title=title)
if h5loc not in self._ndarrays:
loc, tabname = os.path.split(h5loc)
ndarr = self.h5file.create_earray(
loc,
tabname,
tb.Atom.from_dtype(arr.dtype),
(0, ) + arr.shape[1:],
chunkshape=chunkshape,
title=title,
filters=self.filters,
createparents=True,
)
self._ndarrays[h5loc] = ndarr
else:
ndarr = self._ndarrays[h5loc]
idx_table_h5loc = h5loc + '_indices'
if idx_table_h5loc not in self.indices:
self.indices[idx_table_h5loc] = HDF5IndexTable(idx_table_h5loc)
idx_tab = self.indices[idx_table_h5loc]
for arr_length in (len(a) for a in arrs):
idx_tab.append(arr_length)
ndarr.append(arr)
self._ndarrays_cache = defaultdict(list) | python | def _write_ndarrays_cache_to_disk(self):
"""Writes all the cached NDArrays to disk and empties the cache"""
for h5loc, arrs in self._ndarrays_cache.items():
title = arrs[0].title
chunkshape = (self.chunksize,) + arrs[0].shape[1:] if self.chunksize is not\
None else None
arr = NDArray(np.concatenate(arrs), h5loc=h5loc, title=title)
if h5loc not in self._ndarrays:
loc, tabname = os.path.split(h5loc)
ndarr = self.h5file.create_earray(
loc,
tabname,
tb.Atom.from_dtype(arr.dtype),
(0, ) + arr.shape[1:],
chunkshape=chunkshape,
title=title,
filters=self.filters,
createparents=True,
)
self._ndarrays[h5loc] = ndarr
else:
ndarr = self._ndarrays[h5loc]
idx_table_h5loc = h5loc + '_indices'
if idx_table_h5loc not in self.indices:
self.indices[idx_table_h5loc] = HDF5IndexTable(idx_table_h5loc)
idx_tab = self.indices[idx_table_h5loc]
for arr_length in (len(a) for a in arrs):
idx_tab.append(arr_length)
ndarr.append(arr)
self._ndarrays_cache = defaultdict(list) | [
"def",
"_write_ndarrays_cache_to_disk",
"(",
"self",
")",
":",
"for",
"h5loc",
",",
"arrs",
"in",
"self",
".",
"_ndarrays_cache",
".",
"items",
"(",
")",
":",
"title",
"=",
"arrs",
"[",
"0",
"]",
".",
"title",
"chunkshape",
"=",
"(",
"self",
".",
"chunksize",
",",
")",
"+",
"arrs",
"[",
"0",
"]",
".",
"shape",
"[",
"1",
":",
"]",
"if",
"self",
".",
"chunksize",
"is",
"not",
"None",
"else",
"None",
"arr",
"=",
"NDArray",
"(",
"np",
".",
"concatenate",
"(",
"arrs",
")",
",",
"h5loc",
"=",
"h5loc",
",",
"title",
"=",
"title",
")",
"if",
"h5loc",
"not",
"in",
"self",
".",
"_ndarrays",
":",
"loc",
",",
"tabname",
"=",
"os",
".",
"path",
".",
"split",
"(",
"h5loc",
")",
"ndarr",
"=",
"self",
".",
"h5file",
".",
"create_earray",
"(",
"loc",
",",
"tabname",
",",
"tb",
".",
"Atom",
".",
"from_dtype",
"(",
"arr",
".",
"dtype",
")",
",",
"(",
"0",
",",
")",
"+",
"arr",
".",
"shape",
"[",
"1",
":",
"]",
",",
"chunkshape",
"=",
"chunkshape",
",",
"title",
"=",
"title",
",",
"filters",
"=",
"self",
".",
"filters",
",",
"createparents",
"=",
"True",
",",
")",
"self",
".",
"_ndarrays",
"[",
"h5loc",
"]",
"=",
"ndarr",
"else",
":",
"ndarr",
"=",
"self",
".",
"_ndarrays",
"[",
"h5loc",
"]",
"idx_table_h5loc",
"=",
"h5loc",
"+",
"'_indices'",
"if",
"idx_table_h5loc",
"not",
"in",
"self",
".",
"indices",
":",
"self",
".",
"indices",
"[",
"idx_table_h5loc",
"]",
"=",
"HDF5IndexTable",
"(",
"idx_table_h5loc",
")",
"idx_tab",
"=",
"self",
".",
"indices",
"[",
"idx_table_h5loc",
"]",
"for",
"arr_length",
"in",
"(",
"len",
"(",
"a",
")",
"for",
"a",
"in",
"arrs",
")",
":",
"idx_tab",
".",
"append",
"(",
"arr_length",
")",
"ndarr",
".",
"append",
"(",
"arr",
")",
"self",
".",
"_ndarrays_cache",
"=",
"defaultdict",
"(",
"list",
")"
] | Writes all the cached NDArrays to disk and empties the cache | [
"Writes",
"all",
"the",
"cached",
"NDArrays",
"to",
"disk",
"and",
"empties",
"the",
"cache"
] | 7a9b59ac899a28775b5bdc5d391d9a5340d08040 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/io/hdf5.py#L254-L289 | train |
tamasgal/km3pipe | km3pipe/io/hdf5.py | HDF5Sink.flush | def flush(self):
"""Flush tables and arrays to disk"""
self.log.info('Flushing tables and arrays to disk...')
for tab in self._tables.values():
tab.flush()
self._write_ndarrays_cache_to_disk() | python | def flush(self):
"""Flush tables and arrays to disk"""
self.log.info('Flushing tables and arrays to disk...')
for tab in self._tables.values():
tab.flush()
self._write_ndarrays_cache_to_disk() | [
"def",
"flush",
"(",
"self",
")",
":",
"self",
".",
"log",
".",
"info",
"(",
"'Flushing tables and arrays to disk...'",
")",
"for",
"tab",
"in",
"self",
".",
"_tables",
".",
"values",
"(",
")",
":",
"tab",
".",
"flush",
"(",
")",
"self",
".",
"_write_ndarrays_cache_to_disk",
"(",
")"
] | Flush tables and arrays to disk | [
"Flush",
"tables",
"and",
"arrays",
"to",
"disk"
] | 7a9b59ac899a28775b5bdc5d391d9a5340d08040 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/io/hdf5.py#L450-L455 | train |
cprogrammer1994/GLWindow | GLWindow/__main__.py | main | def main():
'''
Sample program to test GLWindow.
'''
print('GLWindow:', GLWindow.__version__)
print('Python:', sys.version)
print('Platform:', sys.platform)
wnd = GLWindow.create_window((480, 480), title='GLWindow Sample')
wnd.vsync = False
ctx = ModernGL.create_context()
prog = ctx.program([
ctx.vertex_shader('''
#version 330
in vec2 vert;
in vec4 vert_color;
out vec4 frag_color;
uniform vec2 scale;
uniform float rotation;
void main() {
frag_color = vert_color;
float r = rotation * (0.5 + gl_InstanceID * 0.05);
mat2 rot = mat2(cos(r), sin(r), -sin(r), cos(r));
gl_Position = vec4((rot * vert) * scale, 0.0, 1.0);
}
'''),
ctx.fragment_shader('''
#version 330
in vec4 frag_color;
out vec4 color;
void main() {
color = vec4(frag_color);
}
'''),
])
scale = prog.uniforms['scale']
rotation = prog.uniforms['rotation']
vbo = ctx.buffer(struct.pack(
'18f',
1.0, 0.0, 1.0, 0.0, 0.0, 0.5,
-0.5, 0.86, 0.0, 1.0, 0.0, 0.5,
-0.5, -0.86, 0.0, 0.0, 1.0, 0.5,
))
vao = ctx.simple_vertex_array(prog, vbo, ['vert', 'vert_color'])
while wnd.update():
wnd.clear(0.95, 0.95, 0.95)
width, height = wnd.size
scale.value = (height / width * 0.75, 0.75)
ctx.viewport = wnd.viewport
ctx.enable(ModernGL.BLEND)
rotation.value = wnd.time
vao.render(instances=10) | python | def main():
'''
Sample program to test GLWindow.
'''
print('GLWindow:', GLWindow.__version__)
print('Python:', sys.version)
print('Platform:', sys.platform)
wnd = GLWindow.create_window((480, 480), title='GLWindow Sample')
wnd.vsync = False
ctx = ModernGL.create_context()
prog = ctx.program([
ctx.vertex_shader('''
#version 330
in vec2 vert;
in vec4 vert_color;
out vec4 frag_color;
uniform vec2 scale;
uniform float rotation;
void main() {
frag_color = vert_color;
float r = rotation * (0.5 + gl_InstanceID * 0.05);
mat2 rot = mat2(cos(r), sin(r), -sin(r), cos(r));
gl_Position = vec4((rot * vert) * scale, 0.0, 1.0);
}
'''),
ctx.fragment_shader('''
#version 330
in vec4 frag_color;
out vec4 color;
void main() {
color = vec4(frag_color);
}
'''),
])
scale = prog.uniforms['scale']
rotation = prog.uniforms['rotation']
vbo = ctx.buffer(struct.pack(
'18f',
1.0, 0.0, 1.0, 0.0, 0.0, 0.5,
-0.5, 0.86, 0.0, 1.0, 0.0, 0.5,
-0.5, -0.86, 0.0, 0.0, 1.0, 0.5,
))
vao = ctx.simple_vertex_array(prog, vbo, ['vert', 'vert_color'])
while wnd.update():
wnd.clear(0.95, 0.95, 0.95)
width, height = wnd.size
scale.value = (height / width * 0.75, 0.75)
ctx.viewport = wnd.viewport
ctx.enable(ModernGL.BLEND)
rotation.value = wnd.time
vao.render(instances=10) | [
"def",
"main",
"(",
")",
":",
"print",
"(",
"'GLWindow:'",
",",
"GLWindow",
".",
"__version__",
")",
"print",
"(",
"'Python:'",
",",
"sys",
".",
"version",
")",
"print",
"(",
"'Platform:'",
",",
"sys",
".",
"platform",
")",
"wnd",
"=",
"GLWindow",
".",
"create_window",
"(",
"(",
"480",
",",
"480",
")",
",",
"title",
"=",
"'GLWindow Sample'",
")",
"wnd",
".",
"vsync",
"=",
"False",
"ctx",
"=",
"ModernGL",
".",
"create_context",
"(",
")",
"prog",
"=",
"ctx",
".",
"program",
"(",
"[",
"ctx",
".",
"vertex_shader",
"(",
"'''\n #version 330\n in vec2 vert;\n in vec4 vert_color;\n out vec4 frag_color;\n uniform vec2 scale;\n uniform float rotation;\n void main() {\n frag_color = vert_color;\n float r = rotation * (0.5 + gl_InstanceID * 0.05);\n mat2 rot = mat2(cos(r), sin(r), -sin(r), cos(r));\n gl_Position = vec4((rot * vert) * scale, 0.0, 1.0);\n }\n '''",
")",
",",
"ctx",
".",
"fragment_shader",
"(",
"'''\n #version 330\n in vec4 frag_color;\n out vec4 color;\n void main() {\n color = vec4(frag_color);\n }\n '''",
")",
",",
"]",
")",
"scale",
"=",
"prog",
".",
"uniforms",
"[",
"'scale'",
"]",
"rotation",
"=",
"prog",
".",
"uniforms",
"[",
"'rotation'",
"]",
"vbo",
"=",
"ctx",
".",
"buffer",
"(",
"struct",
".",
"pack",
"(",
"'18f'",
",",
"1.0",
",",
"0.0",
",",
"1.0",
",",
"0.0",
",",
"0.0",
",",
"0.5",
",",
"-",
"0.5",
",",
"0.86",
",",
"0.0",
",",
"1.0",
",",
"0.0",
",",
"0.5",
",",
"-",
"0.5",
",",
"-",
"0.86",
",",
"0.0",
",",
"0.0",
",",
"1.0",
",",
"0.5",
",",
")",
")",
"vao",
"=",
"ctx",
".",
"simple_vertex_array",
"(",
"prog",
",",
"vbo",
",",
"[",
"'vert'",
",",
"'vert_color'",
"]",
")",
"while",
"wnd",
".",
"update",
"(",
")",
":",
"wnd",
".",
"clear",
"(",
"0.95",
",",
"0.95",
",",
"0.95",
")",
"width",
",",
"height",
"=",
"wnd",
".",
"size",
"scale",
".",
"value",
"=",
"(",
"height",
"/",
"width",
"*",
"0.75",
",",
"0.75",
")",
"ctx",
".",
"viewport",
"=",
"wnd",
".",
"viewport",
"ctx",
".",
"enable",
"(",
"ModernGL",
".",
"BLEND",
")",
"rotation",
".",
"value",
"=",
"wnd",
".",
"time",
"vao",
".",
"render",
"(",
"instances",
"=",
"10",
")"
] | Sample program to test GLWindow. | [
"Sample",
"program",
"to",
"test",
"GLWindow",
"."
] | 521e18fcbc15e88d3c1f3547aa313c3a07386ee5 | https://github.com/cprogrammer1994/GLWindow/blob/521e18fcbc15e88d3c1f3547aa313c3a07386ee5/GLWindow/__main__.py#L12-L71 | train |
tamasgal/km3pipe | examples/offline_analysis/k40summary.py | write_header | def write_header(fobj):
"""Add the header to the CSV file"""
fobj.write("# K40 calibration results\n")
fobj.write("det_id\trun_id\tdom_id")
for param in ['t0', 'qe']:
for i in range(31):
fobj.write("\t{}_ch{}".format(param, i)) | python | def write_header(fobj):
"""Add the header to the CSV file"""
fobj.write("# K40 calibration results\n")
fobj.write("det_id\trun_id\tdom_id")
for param in ['t0', 'qe']:
for i in range(31):
fobj.write("\t{}_ch{}".format(param, i)) | [
"def",
"write_header",
"(",
"fobj",
")",
":",
"fobj",
".",
"write",
"(",
"\"# K40 calibration results\\n\"",
")",
"fobj",
".",
"write",
"(",
"\"det_id\\trun_id\\tdom_id\"",
")",
"for",
"param",
"in",
"[",
"'t0'",
",",
"'qe'",
"]",
":",
"for",
"i",
"in",
"range",
"(",
"31",
")",
":",
"fobj",
".",
"write",
"(",
"\"\\t{}_ch{}\"",
".",
"format",
"(",
"param",
",",
"i",
")",
")"
] | Add the header to the CSV file | [
"Add",
"the",
"header",
"to",
"the",
"CSV",
"file"
] | 7a9b59ac899a28775b5bdc5d391d9a5340d08040 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/examples/offline_analysis/k40summary.py#L30-L36 | train |
tamasgal/km3pipe | km3pipe/math.py | azimuth | def azimuth(v):
"""Return the azimuth angle in radians.
``phi``, ``theta`` is the opposite of ``zenith``, ``azimuth``.
This is the 'normal' azimuth definition -- beware of how you
define your coordinates. KM3NeT defines azimuth
differently than e.g. SLALIB, astropy, the AAS.org
"""
v = np.atleast_2d(v)
azi = phi(v) - np.pi
azi[azi < 0] += 2 * np.pi
if len(azi) == 1:
return azi[0]
return azi | python | def azimuth(v):
"""Return the azimuth angle in radians.
``phi``, ``theta`` is the opposite of ``zenith``, ``azimuth``.
This is the 'normal' azimuth definition -- beware of how you
define your coordinates. KM3NeT defines azimuth
differently than e.g. SLALIB, astropy, the AAS.org
"""
v = np.atleast_2d(v)
azi = phi(v) - np.pi
azi[azi < 0] += 2 * np.pi
if len(azi) == 1:
return azi[0]
return azi | [
"def",
"azimuth",
"(",
"v",
")",
":",
"v",
"=",
"np",
".",
"atleast_2d",
"(",
"v",
")",
"azi",
"=",
"phi",
"(",
"v",
")",
"-",
"np",
".",
"pi",
"azi",
"[",
"azi",
"<",
"0",
"]",
"+=",
"2",
"*",
"np",
".",
"pi",
"if",
"len",
"(",
"azi",
")",
"==",
"1",
":",
"return",
"azi",
"[",
"0",
"]",
"return",
"azi"
] | Return the azimuth angle in radians.
``phi``, ``theta`` is the opposite of ``zenith``, ``azimuth``.
This is the 'normal' azimuth definition -- beware of how you
define your coordinates. KM3NeT defines azimuth
differently than e.g. SLALIB, astropy, the AAS.org | [
"Return",
"the",
"azimuth",
"angle",
"in",
"radians",
"."
] | 7a9b59ac899a28775b5bdc5d391d9a5340d08040 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/math.py#L119-L133 | train |
tamasgal/km3pipe | km3pipe/math.py | unit_vector | def unit_vector(vector, **kwargs):
"""Returns the unit vector of the vector."""
# This also works for a dataframe with columns ['x', 'y', 'z']
# However, the division operation is picky about the shapes
# So, remember input vector shape, cast all up to 2d,
# do the (ugly) conversion, then return unit in same shape as input
# of course, the numpy-ized version of the input...
vector = np.array(vector)
out_shape = vector.shape
vector = np.atleast_2d(vector)
unit = vector / np.linalg.norm(vector, axis=1, **kwargs)[:, None]
return unit.reshape(out_shape) | python | def unit_vector(vector, **kwargs):
"""Returns the unit vector of the vector."""
# This also works for a dataframe with columns ['x', 'y', 'z']
# However, the division operation is picky about the shapes
# So, remember input vector shape, cast all up to 2d,
# do the (ugly) conversion, then return unit in same shape as input
# of course, the numpy-ized version of the input...
vector = np.array(vector)
out_shape = vector.shape
vector = np.atleast_2d(vector)
unit = vector / np.linalg.norm(vector, axis=1, **kwargs)[:, None]
return unit.reshape(out_shape) | [
"def",
"unit_vector",
"(",
"vector",
",",
"*",
"*",
"kwargs",
")",
":",
"# This also works for a dataframe with columns ['x', 'y', 'z']",
"# However, the division operation is picky about the shapes",
"# So, remember input vector shape, cast all up to 2d,",
"# do the (ugly) conversion, then return unit in same shape as input",
"# of course, the numpy-ized version of the input...",
"vector",
"=",
"np",
".",
"array",
"(",
"vector",
")",
"out_shape",
"=",
"vector",
".",
"shape",
"vector",
"=",
"np",
".",
"atleast_2d",
"(",
"vector",
")",
"unit",
"=",
"vector",
"/",
"np",
".",
"linalg",
".",
"norm",
"(",
"vector",
",",
"axis",
"=",
"1",
",",
"*",
"*",
"kwargs",
")",
"[",
":",
",",
"None",
"]",
"return",
"unit",
".",
"reshape",
"(",
"out_shape",
")"
] | Returns the unit vector of the vector. | [
"Returns",
"the",
"unit",
"vector",
"of",
"the",
"vector",
"."
] | 7a9b59ac899a28775b5bdc5d391d9a5340d08040 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/math.py#L175-L186 | train |
tamasgal/km3pipe | km3pipe/math.py | pld3 | def pld3(pos, line_vertex, line_dir):
"""Calculate the point-line-distance for given point and line."""
pos = np.atleast_2d(pos)
line_vertex = np.atleast_1d(line_vertex)
line_dir = np.atleast_1d(line_dir)
c = np.cross(line_dir, line_vertex - pos)
n1 = np.linalg.norm(c, axis=1)
n2 = np.linalg.norm(line_dir)
out = n1 / n2
if out.ndim == 1 and len(out) == 1:
return out[0]
return out | python | def pld3(pos, line_vertex, line_dir):
"""Calculate the point-line-distance for given point and line."""
pos = np.atleast_2d(pos)
line_vertex = np.atleast_1d(line_vertex)
line_dir = np.atleast_1d(line_dir)
c = np.cross(line_dir, line_vertex - pos)
n1 = np.linalg.norm(c, axis=1)
n2 = np.linalg.norm(line_dir)
out = n1 / n2
if out.ndim == 1 and len(out) == 1:
return out[0]
return out | [
"def",
"pld3",
"(",
"pos",
",",
"line_vertex",
",",
"line_dir",
")",
":",
"pos",
"=",
"np",
".",
"atleast_2d",
"(",
"pos",
")",
"line_vertex",
"=",
"np",
".",
"atleast_1d",
"(",
"line_vertex",
")",
"line_dir",
"=",
"np",
".",
"atleast_1d",
"(",
"line_dir",
")",
"c",
"=",
"np",
".",
"cross",
"(",
"line_dir",
",",
"line_vertex",
"-",
"pos",
")",
"n1",
"=",
"np",
".",
"linalg",
".",
"norm",
"(",
"c",
",",
"axis",
"=",
"1",
")",
"n2",
"=",
"np",
".",
"linalg",
".",
"norm",
"(",
"line_dir",
")",
"out",
"=",
"n1",
"/",
"n2",
"if",
"out",
".",
"ndim",
"==",
"1",
"and",
"len",
"(",
"out",
")",
"==",
"1",
":",
"return",
"out",
"[",
"0",
"]",
"return",
"out"
] | Calculate the point-line-distance for given point and line. | [
"Calculate",
"the",
"point",
"-",
"line",
"-",
"distance",
"for",
"given",
"point",
"and",
"line",
"."
] | 7a9b59ac899a28775b5bdc5d391d9a5340d08040 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/math.py#L189-L200 | train |
tamasgal/km3pipe | km3pipe/math.py | dist | def dist(x1, x2, axis=0):
"""Return the distance between two points.
Set axis=1 if x1 is a vector and x2 a matrix to get a vector of distances.
"""
return np.linalg.norm(x2 - x1, axis=axis) | python | def dist(x1, x2, axis=0):
"""Return the distance between two points.
Set axis=1 if x1 is a vector and x2 a matrix to get a vector of distances.
"""
return np.linalg.norm(x2 - x1, axis=axis) | [
"def",
"dist",
"(",
"x1",
",",
"x2",
",",
"axis",
"=",
"0",
")",
":",
"return",
"np",
".",
"linalg",
".",
"norm",
"(",
"x2",
"-",
"x1",
",",
"axis",
"=",
"axis",
")"
] | Return the distance between two points.
Set axis=1 if x1 is a vector and x2 a matrix to get a vector of distances. | [
"Return",
"the",
"distance",
"between",
"two",
"points",
"."
] | 7a9b59ac899a28775b5bdc5d391d9a5340d08040 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/math.py#L207-L212 | train |
tamasgal/km3pipe | km3pipe/math.py | com | def com(points, masses=None):
"""Calculate center of mass for given points.
If masses is not set, assume equal masses."""
if masses is None:
return np.average(points, axis=0)
else:
return np.average(points, axis=0, weights=masses) | python | def com(points, masses=None):
"""Calculate center of mass for given points.
If masses is not set, assume equal masses."""
if masses is None:
return np.average(points, axis=0)
else:
return np.average(points, axis=0, weights=masses) | [
"def",
"com",
"(",
"points",
",",
"masses",
"=",
"None",
")",
":",
"if",
"masses",
"is",
"None",
":",
"return",
"np",
".",
"average",
"(",
"points",
",",
"axis",
"=",
"0",
")",
"else",
":",
"return",
"np",
".",
"average",
"(",
"points",
",",
"axis",
"=",
"0",
",",
"weights",
"=",
"masses",
")"
] | Calculate center of mass for given points.
If masses is not set, assume equal masses. | [
"Calculate",
"center",
"of",
"mass",
"for",
"given",
"points",
".",
"If",
"masses",
"is",
"not",
"set",
"assume",
"equal",
"masses",
"."
] | 7a9b59ac899a28775b5bdc5d391d9a5340d08040 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/math.py#L215-L221 | train |
tamasgal/km3pipe | km3pipe/math.py | circ_permutation | def circ_permutation(items):
"""Calculate the circular permutation for a given list of items."""
permutations = []
for i in range(len(items)):
permutations.append(items[i:] + items[:i])
return permutations | python | def circ_permutation(items):
"""Calculate the circular permutation for a given list of items."""
permutations = []
for i in range(len(items)):
permutations.append(items[i:] + items[:i])
return permutations | [
"def",
"circ_permutation",
"(",
"items",
")",
":",
"permutations",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"items",
")",
")",
":",
"permutations",
".",
"append",
"(",
"items",
"[",
"i",
":",
"]",
"+",
"items",
"[",
":",
"i",
"]",
")",
"return",
"permutations"
] | Calculate the circular permutation for a given list of items. | [
"Calculate",
"the",
"circular",
"permutation",
"for",
"a",
"given",
"list",
"of",
"items",
"."
] | 7a9b59ac899a28775b5bdc5d391d9a5340d08040 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/math.py#L224-L229 | train |
tamasgal/km3pipe | km3pipe/math.py | inertia | def inertia(x, y, z, weight=None):
"""Inertia tensor, stolen of thomas"""
if weight is None:
weight = 1
tensor_of_inertia = np.zeros((3, 3), dtype=float)
tensor_of_inertia[0][0] = (y * y + z * z) * weight
tensor_of_inertia[0][1] = (-1) * x * y * weight
tensor_of_inertia[0][2] = (-1) * x * z * weight
tensor_of_inertia[1][0] = (-1) * x * y * weight
tensor_of_inertia[1][1] = (x * x + z * z) * weight
tensor_of_inertia[1][2] = (-1) * y * z * weight
tensor_of_inertia[2][0] = (-1) * x * z * weight
tensor_of_inertia[2][1] = (-1) * z * y * weight
tensor_of_inertia[2][2] = (x * x + y * y) * weight
eigen_values = np.linalg.eigvals(tensor_of_inertia)
small_inertia = eigen_values[2][2]
middle_inertia = eigen_values[1][1]
big_inertia = eigen_values[0][0]
return small_inertia, middle_inertia, big_inertia | python | def inertia(x, y, z, weight=None):
"""Inertia tensor, stolen of thomas"""
if weight is None:
weight = 1
tensor_of_inertia = np.zeros((3, 3), dtype=float)
tensor_of_inertia[0][0] = (y * y + z * z) * weight
tensor_of_inertia[0][1] = (-1) * x * y * weight
tensor_of_inertia[0][2] = (-1) * x * z * weight
tensor_of_inertia[1][0] = (-1) * x * y * weight
tensor_of_inertia[1][1] = (x * x + z * z) * weight
tensor_of_inertia[1][2] = (-1) * y * z * weight
tensor_of_inertia[2][0] = (-1) * x * z * weight
tensor_of_inertia[2][1] = (-1) * z * y * weight
tensor_of_inertia[2][2] = (x * x + y * y) * weight
eigen_values = np.linalg.eigvals(tensor_of_inertia)
small_inertia = eigen_values[2][2]
middle_inertia = eigen_values[1][1]
big_inertia = eigen_values[0][0]
return small_inertia, middle_inertia, big_inertia | [
"def",
"inertia",
"(",
"x",
",",
"y",
",",
"z",
",",
"weight",
"=",
"None",
")",
":",
"if",
"weight",
"is",
"None",
":",
"weight",
"=",
"1",
"tensor_of_inertia",
"=",
"np",
".",
"zeros",
"(",
"(",
"3",
",",
"3",
")",
",",
"dtype",
"=",
"float",
")",
"tensor_of_inertia",
"[",
"0",
"]",
"[",
"0",
"]",
"=",
"(",
"y",
"*",
"y",
"+",
"z",
"*",
"z",
")",
"*",
"weight",
"tensor_of_inertia",
"[",
"0",
"]",
"[",
"1",
"]",
"=",
"(",
"-",
"1",
")",
"*",
"x",
"*",
"y",
"*",
"weight",
"tensor_of_inertia",
"[",
"0",
"]",
"[",
"2",
"]",
"=",
"(",
"-",
"1",
")",
"*",
"x",
"*",
"z",
"*",
"weight",
"tensor_of_inertia",
"[",
"1",
"]",
"[",
"0",
"]",
"=",
"(",
"-",
"1",
")",
"*",
"x",
"*",
"y",
"*",
"weight",
"tensor_of_inertia",
"[",
"1",
"]",
"[",
"1",
"]",
"=",
"(",
"x",
"*",
"x",
"+",
"z",
"*",
"z",
")",
"*",
"weight",
"tensor_of_inertia",
"[",
"1",
"]",
"[",
"2",
"]",
"=",
"(",
"-",
"1",
")",
"*",
"y",
"*",
"z",
"*",
"weight",
"tensor_of_inertia",
"[",
"2",
"]",
"[",
"0",
"]",
"=",
"(",
"-",
"1",
")",
"*",
"x",
"*",
"z",
"*",
"weight",
"tensor_of_inertia",
"[",
"2",
"]",
"[",
"1",
"]",
"=",
"(",
"-",
"1",
")",
"*",
"z",
"*",
"y",
"*",
"weight",
"tensor_of_inertia",
"[",
"2",
"]",
"[",
"2",
"]",
"=",
"(",
"x",
"*",
"x",
"+",
"y",
"*",
"y",
")",
"*",
"weight",
"eigen_values",
"=",
"np",
".",
"linalg",
".",
"eigvals",
"(",
"tensor_of_inertia",
")",
"small_inertia",
"=",
"eigen_values",
"[",
"2",
"]",
"[",
"2",
"]",
"middle_inertia",
"=",
"eigen_values",
"[",
"1",
"]",
"[",
"1",
"]",
"big_inertia",
"=",
"eigen_values",
"[",
"0",
"]",
"[",
"0",
"]",
"return",
"small_inertia",
",",
"middle_inertia",
",",
"big_inertia"
] | Inertia tensor, stolen of thomas | [
"Inertia",
"tensor",
"stolen",
"of",
"thomas"
] | 7a9b59ac899a28775b5bdc5d391d9a5340d08040 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/math.py#L381-L400 | train |
tamasgal/km3pipe | km3pipe/math.py | qrot | def qrot(vector, quaternion):
"""Rotate a 3D vector using quaternion algebra.
Implemented by Vladimir Kulikovskiy.
Parameters
----------
vector: np.array
quaternion: np.array
Returns
-------
np.array
"""
t = 2 * np.cross(quaternion[1:], vector)
v_rot = vector + quaternion[0] * t + np.cross(quaternion[1:], t)
return v_rot | python | def qrot(vector, quaternion):
"""Rotate a 3D vector using quaternion algebra.
Implemented by Vladimir Kulikovskiy.
Parameters
----------
vector: np.array
quaternion: np.array
Returns
-------
np.array
"""
t = 2 * np.cross(quaternion[1:], vector)
v_rot = vector + quaternion[0] * t + np.cross(quaternion[1:], t)
return v_rot | [
"def",
"qrot",
"(",
"vector",
",",
"quaternion",
")",
":",
"t",
"=",
"2",
"*",
"np",
".",
"cross",
"(",
"quaternion",
"[",
"1",
":",
"]",
",",
"vector",
")",
"v_rot",
"=",
"vector",
"+",
"quaternion",
"[",
"0",
"]",
"*",
"t",
"+",
"np",
".",
"cross",
"(",
"quaternion",
"[",
"1",
":",
"]",
",",
"t",
")",
"return",
"v_rot"
] | Rotate a 3D vector using quaternion algebra.
Implemented by Vladimir Kulikovskiy.
Parameters
----------
vector: np.array
quaternion: np.array
Returns
-------
np.array | [
"Rotate",
"a",
"3D",
"vector",
"using",
"quaternion",
"algebra",
"."
] | 7a9b59ac899a28775b5bdc5d391d9a5340d08040 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/math.py#L425-L442 | train |
tamasgal/km3pipe | km3pipe/math.py | qeuler | def qeuler(yaw, pitch, roll):
"""Convert Euler angle to quaternion.
Parameters
----------
yaw: number
pitch: number
roll: number
Returns
-------
np.array
"""
yaw = np.radians(yaw)
pitch = np.radians(pitch)
roll = np.radians(roll)
cy = np.cos(yaw * 0.5)
sy = np.sin(yaw * 0.5)
cr = np.cos(roll * 0.5)
sr = np.sin(roll * 0.5)
cp = np.cos(pitch * 0.5)
sp = np.sin(pitch * 0.5)
q = np.array((
cy * cr * cp + sy * sr * sp, cy * sr * cp - sy * cr * sp,
cy * cr * sp + sy * sr * cp, sy * cr * cp - cy * sr * sp
))
return q | python | def qeuler(yaw, pitch, roll):
"""Convert Euler angle to quaternion.
Parameters
----------
yaw: number
pitch: number
roll: number
Returns
-------
np.array
"""
yaw = np.radians(yaw)
pitch = np.radians(pitch)
roll = np.radians(roll)
cy = np.cos(yaw * 0.5)
sy = np.sin(yaw * 0.5)
cr = np.cos(roll * 0.5)
sr = np.sin(roll * 0.5)
cp = np.cos(pitch * 0.5)
sp = np.sin(pitch * 0.5)
q = np.array((
cy * cr * cp + sy * sr * sp, cy * sr * cp - sy * cr * sp,
cy * cr * sp + sy * sr * cp, sy * cr * cp - cy * sr * sp
))
return q | [
"def",
"qeuler",
"(",
"yaw",
",",
"pitch",
",",
"roll",
")",
":",
"yaw",
"=",
"np",
".",
"radians",
"(",
"yaw",
")",
"pitch",
"=",
"np",
".",
"radians",
"(",
"pitch",
")",
"roll",
"=",
"np",
".",
"radians",
"(",
"roll",
")",
"cy",
"=",
"np",
".",
"cos",
"(",
"yaw",
"*",
"0.5",
")",
"sy",
"=",
"np",
".",
"sin",
"(",
"yaw",
"*",
"0.5",
")",
"cr",
"=",
"np",
".",
"cos",
"(",
"roll",
"*",
"0.5",
")",
"sr",
"=",
"np",
".",
"sin",
"(",
"roll",
"*",
"0.5",
")",
"cp",
"=",
"np",
".",
"cos",
"(",
"pitch",
"*",
"0.5",
")",
"sp",
"=",
"np",
".",
"sin",
"(",
"pitch",
"*",
"0.5",
")",
"q",
"=",
"np",
".",
"array",
"(",
"(",
"cy",
"*",
"cr",
"*",
"cp",
"+",
"sy",
"*",
"sr",
"*",
"sp",
",",
"cy",
"*",
"sr",
"*",
"cp",
"-",
"sy",
"*",
"cr",
"*",
"sp",
",",
"cy",
"*",
"cr",
"*",
"sp",
"+",
"sy",
"*",
"sr",
"*",
"cp",
",",
"sy",
"*",
"cr",
"*",
"cp",
"-",
"cy",
"*",
"sr",
"*",
"sp",
")",
")",
"return",
"q"
] | Convert Euler angle to quaternion.
Parameters
----------
yaw: number
pitch: number
roll: number
Returns
-------
np.array | [
"Convert",
"Euler",
"angle",
"to",
"quaternion",
"."
] | 7a9b59ac899a28775b5bdc5d391d9a5340d08040 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/math.py#L445-L474 | train |
tamasgal/km3pipe | km3pipe/math.py | intersect_3d | def intersect_3d(p1, p2):
"""Find the closes point for a given set of lines in 3D.
Parameters
----------
p1 : (M, N) array_like
Starting points
p2 : (M, N) array_like
End points.
Returns
-------
x : (N,) ndarray
Least-squares solution - the closest point of the intersections.
Raises
------
numpy.linalg.LinAlgError
If computation does not converge.
"""
v = p2 - p1
normed_v = unit_vector(v)
nx = normed_v[:, 0]
ny = normed_v[:, 1]
nz = normed_v[:, 2]
xx = np.sum(nx**2 - 1)
yy = np.sum(ny**2 - 1)
zz = np.sum(nz**2 - 1)
xy = np.sum(nx * ny)
xz = np.sum(nx * nz)
yz = np.sum(ny * nz)
M = np.array([(xx, xy, xz), (xy, yy, yz), (xz, yz, zz)])
x = np.sum(
p1[:, 0] * (nx**2 - 1) + p1[:, 1] * (nx * ny) + p1[:, 2] * (nx * nz)
)
y = np.sum(
p1[:, 0] * (nx * ny) + p1[:, 1] * (ny * ny - 1) + p1[:, 2] * (ny * nz)
)
z = np.sum(
p1[:, 0] * (nx * nz) + p1[:, 1] * (ny * nz) + p1[:, 2] * (nz**2 - 1)
)
return np.linalg.lstsq(M, np.array((x, y, z)), rcond=None)[0] | python | def intersect_3d(p1, p2):
"""Find the closes point for a given set of lines in 3D.
Parameters
----------
p1 : (M, N) array_like
Starting points
p2 : (M, N) array_like
End points.
Returns
-------
x : (N,) ndarray
Least-squares solution - the closest point of the intersections.
Raises
------
numpy.linalg.LinAlgError
If computation does not converge.
"""
v = p2 - p1
normed_v = unit_vector(v)
nx = normed_v[:, 0]
ny = normed_v[:, 1]
nz = normed_v[:, 2]
xx = np.sum(nx**2 - 1)
yy = np.sum(ny**2 - 1)
zz = np.sum(nz**2 - 1)
xy = np.sum(nx * ny)
xz = np.sum(nx * nz)
yz = np.sum(ny * nz)
M = np.array([(xx, xy, xz), (xy, yy, yz), (xz, yz, zz)])
x = np.sum(
p1[:, 0] * (nx**2 - 1) + p1[:, 1] * (nx * ny) + p1[:, 2] * (nx * nz)
)
y = np.sum(
p1[:, 0] * (nx * ny) + p1[:, 1] * (ny * ny - 1) + p1[:, 2] * (ny * nz)
)
z = np.sum(
p1[:, 0] * (nx * nz) + p1[:, 1] * (ny * nz) + p1[:, 2] * (nz**2 - 1)
)
return np.linalg.lstsq(M, np.array((x, y, z)), rcond=None)[0] | [
"def",
"intersect_3d",
"(",
"p1",
",",
"p2",
")",
":",
"v",
"=",
"p2",
"-",
"p1",
"normed_v",
"=",
"unit_vector",
"(",
"v",
")",
"nx",
"=",
"normed_v",
"[",
":",
",",
"0",
"]",
"ny",
"=",
"normed_v",
"[",
":",
",",
"1",
"]",
"nz",
"=",
"normed_v",
"[",
":",
",",
"2",
"]",
"xx",
"=",
"np",
".",
"sum",
"(",
"nx",
"**",
"2",
"-",
"1",
")",
"yy",
"=",
"np",
".",
"sum",
"(",
"ny",
"**",
"2",
"-",
"1",
")",
"zz",
"=",
"np",
".",
"sum",
"(",
"nz",
"**",
"2",
"-",
"1",
")",
"xy",
"=",
"np",
".",
"sum",
"(",
"nx",
"*",
"ny",
")",
"xz",
"=",
"np",
".",
"sum",
"(",
"nx",
"*",
"nz",
")",
"yz",
"=",
"np",
".",
"sum",
"(",
"ny",
"*",
"nz",
")",
"M",
"=",
"np",
".",
"array",
"(",
"[",
"(",
"xx",
",",
"xy",
",",
"xz",
")",
",",
"(",
"xy",
",",
"yy",
",",
"yz",
")",
",",
"(",
"xz",
",",
"yz",
",",
"zz",
")",
"]",
")",
"x",
"=",
"np",
".",
"sum",
"(",
"p1",
"[",
":",
",",
"0",
"]",
"*",
"(",
"nx",
"**",
"2",
"-",
"1",
")",
"+",
"p1",
"[",
":",
",",
"1",
"]",
"*",
"(",
"nx",
"*",
"ny",
")",
"+",
"p1",
"[",
":",
",",
"2",
"]",
"*",
"(",
"nx",
"*",
"nz",
")",
")",
"y",
"=",
"np",
".",
"sum",
"(",
"p1",
"[",
":",
",",
"0",
"]",
"*",
"(",
"nx",
"*",
"ny",
")",
"+",
"p1",
"[",
":",
",",
"1",
"]",
"*",
"(",
"ny",
"*",
"ny",
"-",
"1",
")",
"+",
"p1",
"[",
":",
",",
"2",
"]",
"*",
"(",
"ny",
"*",
"nz",
")",
")",
"z",
"=",
"np",
".",
"sum",
"(",
"p1",
"[",
":",
",",
"0",
"]",
"*",
"(",
"nx",
"*",
"nz",
")",
"+",
"p1",
"[",
":",
",",
"1",
"]",
"*",
"(",
"ny",
"*",
"nz",
")",
"+",
"p1",
"[",
":",
",",
"2",
"]",
"*",
"(",
"nz",
"**",
"2",
"-",
"1",
")",
")",
"return",
"np",
".",
"linalg",
".",
"lstsq",
"(",
"M",
",",
"np",
".",
"array",
"(",
"(",
"x",
",",
"y",
",",
"z",
")",
")",
",",
"rcond",
"=",
"None",
")",
"[",
"0",
"]"
] | Find the closes point for a given set of lines in 3D.
Parameters
----------
p1 : (M, N) array_like
Starting points
p2 : (M, N) array_like
End points.
Returns
-------
x : (N,) ndarray
Least-squares solution - the closest point of the intersections.
Raises
------
numpy.linalg.LinAlgError
If computation does not converge. | [
"Find",
"the",
"closes",
"point",
"for",
"a",
"given",
"set",
"of",
"lines",
"in",
"3D",
"."
] | 7a9b59ac899a28775b5bdc5d391d9a5340d08040 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/math.py#L494-L536 | train |
astooke/gtimer | gtimer/util.py | compat_py2_py3 | def compat_py2_py3():
""" For Python 2, 3 compatibility. """
if (sys.version_info > (3, 0)):
def iteritems(dictionary):
return dictionary.items()
def itervalues(dictionary):
return dictionary.values()
else:
def iteritems(dictionary):
return dictionary.iteritems()
def itervalues(dictionary):
return dictionary.itervalues()
return iteritems, itervalues | python | def compat_py2_py3():
""" For Python 2, 3 compatibility. """
if (sys.version_info > (3, 0)):
def iteritems(dictionary):
return dictionary.items()
def itervalues(dictionary):
return dictionary.values()
else:
def iteritems(dictionary):
return dictionary.iteritems()
def itervalues(dictionary):
return dictionary.itervalues()
return iteritems, itervalues | [
"def",
"compat_py2_py3",
"(",
")",
":",
"if",
"(",
"sys",
".",
"version_info",
">",
"(",
"3",
",",
"0",
")",
")",
":",
"def",
"iteritems",
"(",
"dictionary",
")",
":",
"return",
"dictionary",
".",
"items",
"(",
")",
"def",
"itervalues",
"(",
"dictionary",
")",
":",
"return",
"dictionary",
".",
"values",
"(",
")",
"else",
":",
"def",
"iteritems",
"(",
"dictionary",
")",
":",
"return",
"dictionary",
".",
"iteritems",
"(",
")",
"def",
"itervalues",
"(",
"dictionary",
")",
":",
"return",
"dictionary",
".",
"itervalues",
"(",
")",
"return",
"iteritems",
",",
"itervalues"
] | For Python 2, 3 compatibility. | [
"For",
"Python",
"2",
"3",
"compatibility",
"."
] | 2146dab459e5d959feb291821733d3d3ba7c523c | https://github.com/astooke/gtimer/blob/2146dab459e5d959feb291821733d3d3ba7c523c/gtimer/util.py#L20-L36 | train |
tamasgal/km3pipe | km3pipe/io/jpp.py | TimeslicePump.timeslice_generator | def timeslice_generator(self):
"""Uses slice ID as iterator"""
slice_id = 0
while slice_id < self.n_timeslices:
blob = self.get_blob(slice_id)
yield blob
slice_id += 1 | python | def timeslice_generator(self):
"""Uses slice ID as iterator"""
slice_id = 0
while slice_id < self.n_timeslices:
blob = self.get_blob(slice_id)
yield blob
slice_id += 1 | [
"def",
"timeslice_generator",
"(",
"self",
")",
":",
"slice_id",
"=",
"0",
"while",
"slice_id",
"<",
"self",
".",
"n_timeslices",
":",
"blob",
"=",
"self",
".",
"get_blob",
"(",
"slice_id",
")",
"yield",
"blob",
"slice_id",
"+=",
"1"
] | Uses slice ID as iterator | [
"Uses",
"slice",
"ID",
"as",
"iterator"
] | 7a9b59ac899a28775b5bdc5d391d9a5340d08040 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/io/jpp.py#L185-L191 | train |
tamasgal/km3pipe | km3pipe/io/jpp.py | TimeslicePump.get_blob | def get_blob(self, index):
"""Index is slice ID"""
blob = self._current_blob
self.r.retrieve_timeslice(index)
timeslice_info = Table.from_template({
'frame_index': self.r.frame_index,
'slice_id': index,
'timestamp': self.r.utc_seconds,
'nanoseconds': self.r.utc_nanoseconds,
'n_frames': self.r.n_frames,
}, 'TimesliceInfo')
hits = self._extract_hits()
hits.group_id = index
blob['TimesliceInfo'] = timeslice_info
blob[self._hits_blob_key] = hits
return blob | python | def get_blob(self, index):
"""Index is slice ID"""
blob = self._current_blob
self.r.retrieve_timeslice(index)
timeslice_info = Table.from_template({
'frame_index': self.r.frame_index,
'slice_id': index,
'timestamp': self.r.utc_seconds,
'nanoseconds': self.r.utc_nanoseconds,
'n_frames': self.r.n_frames,
}, 'TimesliceInfo')
hits = self._extract_hits()
hits.group_id = index
blob['TimesliceInfo'] = timeslice_info
blob[self._hits_blob_key] = hits
return blob | [
"def",
"get_blob",
"(",
"self",
",",
"index",
")",
":",
"blob",
"=",
"self",
".",
"_current_blob",
"self",
".",
"r",
".",
"retrieve_timeslice",
"(",
"index",
")",
"timeslice_info",
"=",
"Table",
".",
"from_template",
"(",
"{",
"'frame_index'",
":",
"self",
".",
"r",
".",
"frame_index",
",",
"'slice_id'",
":",
"index",
",",
"'timestamp'",
":",
"self",
".",
"r",
".",
"utc_seconds",
",",
"'nanoseconds'",
":",
"self",
".",
"r",
".",
"utc_nanoseconds",
",",
"'n_frames'",
":",
"self",
".",
"r",
".",
"n_frames",
",",
"}",
",",
"'TimesliceInfo'",
")",
"hits",
"=",
"self",
".",
"_extract_hits",
"(",
")",
"hits",
".",
"group_id",
"=",
"index",
"blob",
"[",
"'TimesliceInfo'",
"]",
"=",
"timeslice_info",
"blob",
"[",
"self",
".",
"_hits_blob_key",
"]",
"=",
"hits",
"return",
"blob"
] | Index is slice ID | [
"Index",
"is",
"slice",
"ID"
] | 7a9b59ac899a28775b5bdc5d391d9a5340d08040 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/io/jpp.py#L193-L208 | train |
tamasgal/km3pipe | km3pipe/io/jpp.py | TimeslicePump._slice_generator | def _slice_generator(self, index):
"""A simple slice generator for iterations"""
start, stop, step = index.indices(len(self))
for i in range(start, stop, step):
yield self.get_blob(i) | python | def _slice_generator(self, index):
"""A simple slice generator for iterations"""
start, stop, step = index.indices(len(self))
for i in range(start, stop, step):
yield self.get_blob(i) | [
"def",
"_slice_generator",
"(",
"self",
",",
"index",
")",
":",
"start",
",",
"stop",
",",
"step",
"=",
"index",
".",
"indices",
"(",
"len",
"(",
"self",
")",
")",
"for",
"i",
"in",
"range",
"(",
"start",
",",
"stop",
",",
"step",
")",
":",
"yield",
"self",
".",
"get_blob",
"(",
"i",
")"
] | A simple slice generator for iterations | [
"A",
"simple",
"slice",
"generator",
"for",
"iterations"
] | 7a9b59ac899a28775b5bdc5d391d9a5340d08040 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/io/jpp.py#L270-L274 | train |
tapilab/brandelion | brandelion/cli/diagnose.py | correlation_by_exemplar | def correlation_by_exemplar(brands, exemplars, validation_scores, analyze_fn_str, outf):
""" Report the overall correlation with the validation scores using each exemplar in isolation. """
analyze_fn = getattr(analyze, analyze_fn_str)
keys = sorted(k for k in validation_scores.keys() if k in set(x[0] for x in brands))
truth = [validation_scores[k] for k in keys]
result = {}
outf.write('exemplar\tcorr\tn_followers\n')
outf.flush()
for exemplar in exemplars:
single_exemplar = {exemplar: exemplars[exemplar]}
social_scores = analyze_fn(brands, single_exemplar)
predicted = [social_scores[k] for k in keys]
outf.write('%s\t%g\t%d\n' % (exemplar, scistat.pearsonr(predicted, truth)[0], len(exemplars[exemplar])))
outf.flush()
result[exemplar] = scistat.pearsonr(predicted, truth)[0]
outf.close()
return result | python | def correlation_by_exemplar(brands, exemplars, validation_scores, analyze_fn_str, outf):
""" Report the overall correlation with the validation scores using each exemplar in isolation. """
analyze_fn = getattr(analyze, analyze_fn_str)
keys = sorted(k for k in validation_scores.keys() if k in set(x[0] for x in brands))
truth = [validation_scores[k] for k in keys]
result = {}
outf.write('exemplar\tcorr\tn_followers\n')
outf.flush()
for exemplar in exemplars:
single_exemplar = {exemplar: exemplars[exemplar]}
social_scores = analyze_fn(brands, single_exemplar)
predicted = [social_scores[k] for k in keys]
outf.write('%s\t%g\t%d\n' % (exemplar, scistat.pearsonr(predicted, truth)[0], len(exemplars[exemplar])))
outf.flush()
result[exemplar] = scistat.pearsonr(predicted, truth)[0]
outf.close()
return result | [
"def",
"correlation_by_exemplar",
"(",
"brands",
",",
"exemplars",
",",
"validation_scores",
",",
"analyze_fn_str",
",",
"outf",
")",
":",
"analyze_fn",
"=",
"getattr",
"(",
"analyze",
",",
"analyze_fn_str",
")",
"keys",
"=",
"sorted",
"(",
"k",
"for",
"k",
"in",
"validation_scores",
".",
"keys",
"(",
")",
"if",
"k",
"in",
"set",
"(",
"x",
"[",
"0",
"]",
"for",
"x",
"in",
"brands",
")",
")",
"truth",
"=",
"[",
"validation_scores",
"[",
"k",
"]",
"for",
"k",
"in",
"keys",
"]",
"result",
"=",
"{",
"}",
"outf",
".",
"write",
"(",
"'exemplar\\tcorr\\tn_followers\\n'",
")",
"outf",
".",
"flush",
"(",
")",
"for",
"exemplar",
"in",
"exemplars",
":",
"single_exemplar",
"=",
"{",
"exemplar",
":",
"exemplars",
"[",
"exemplar",
"]",
"}",
"social_scores",
"=",
"analyze_fn",
"(",
"brands",
",",
"single_exemplar",
")",
"predicted",
"=",
"[",
"social_scores",
"[",
"k",
"]",
"for",
"k",
"in",
"keys",
"]",
"outf",
".",
"write",
"(",
"'%s\\t%g\\t%d\\n'",
"%",
"(",
"exemplar",
",",
"scistat",
".",
"pearsonr",
"(",
"predicted",
",",
"truth",
")",
"[",
"0",
"]",
",",
"len",
"(",
"exemplars",
"[",
"exemplar",
"]",
")",
")",
")",
"outf",
".",
"flush",
"(",
")",
"result",
"[",
"exemplar",
"]",
"=",
"scistat",
".",
"pearsonr",
"(",
"predicted",
",",
"truth",
")",
"[",
"0",
"]",
"outf",
".",
"close",
"(",
")",
"return",
"result"
] | Report the overall correlation with the validation scores using each exemplar in isolation. | [
"Report",
"the",
"overall",
"correlation",
"with",
"the",
"validation",
"scores",
"using",
"each",
"exemplar",
"in",
"isolation",
"."
] | 40a5a5333cf704182c8666d1fbbbdadc7ff88546 | https://github.com/tapilab/brandelion/blob/40a5a5333cf704182c8666d1fbbbdadc7ff88546/brandelion/cli/diagnose.py#L50-L66 | train |
IRC-SPHERE/HyperStream | hyperstream/node/node.py | Node.difference | def difference(self, other):
"""
Summarise the differences between this node and the other node.
:param other: The other node
:return: A tuple containing the diff, the counts of the diff, and whether this plate is a sub-plate of the other
:type other: Node
"""
diff = (tuple(set(self.plates) - set(other.plates)), tuple(set(other.plates) - set(self.plates)))
counts = map(len, diff)
# is_sub_plate = counts == [1, 1] and diff[1][0].is_sub_plate(diff[0][0])
is_sub_plate = counts == [1, 1] and diff[0][0].is_sub_plate(diff[1][0]) # MK fixed
if len(other.plates) == 1 and counts == [1, 0] and diff[0][0].parent == other.plates[0].parent:
is_sub_plate = True
return diff, counts, is_sub_plate | python | def difference(self, other):
"""
Summarise the differences between this node and the other node.
:param other: The other node
:return: A tuple containing the diff, the counts of the diff, and whether this plate is a sub-plate of the other
:type other: Node
"""
diff = (tuple(set(self.plates) - set(other.plates)), tuple(set(other.plates) - set(self.plates)))
counts = map(len, diff)
# is_sub_plate = counts == [1, 1] and diff[1][0].is_sub_plate(diff[0][0])
is_sub_plate = counts == [1, 1] and diff[0][0].is_sub_plate(diff[1][0]) # MK fixed
if len(other.plates) == 1 and counts == [1, 0] and diff[0][0].parent == other.plates[0].parent:
is_sub_plate = True
return diff, counts, is_sub_plate | [
"def",
"difference",
"(",
"self",
",",
"other",
")",
":",
"diff",
"=",
"(",
"tuple",
"(",
"set",
"(",
"self",
".",
"plates",
")",
"-",
"set",
"(",
"other",
".",
"plates",
")",
")",
",",
"tuple",
"(",
"set",
"(",
"other",
".",
"plates",
")",
"-",
"set",
"(",
"self",
".",
"plates",
")",
")",
")",
"counts",
"=",
"map",
"(",
"len",
",",
"diff",
")",
"# is_sub_plate = counts == [1, 1] and diff[1][0].is_sub_plate(diff[0][0])",
"is_sub_plate",
"=",
"counts",
"==",
"[",
"1",
",",
"1",
"]",
"and",
"diff",
"[",
"0",
"]",
"[",
"0",
"]",
".",
"is_sub_plate",
"(",
"diff",
"[",
"1",
"]",
"[",
"0",
"]",
")",
"# MK fixed",
"if",
"len",
"(",
"other",
".",
"plates",
")",
"==",
"1",
"and",
"counts",
"==",
"[",
"1",
",",
"0",
"]",
"and",
"diff",
"[",
"0",
"]",
"[",
"0",
"]",
".",
"parent",
"==",
"other",
".",
"plates",
"[",
"0",
"]",
".",
"parent",
":",
"is_sub_plate",
"=",
"True",
"return",
"diff",
",",
"counts",
",",
"is_sub_plate"
] | Summarise the differences between this node and the other node.
:param other: The other node
:return: A tuple containing the diff, the counts of the diff, and whether this plate is a sub-plate of the other
:type other: Node | [
"Summarise",
"the",
"differences",
"between",
"this",
"node",
"and",
"the",
"other",
"node",
"."
] | 98478f4d31ed938f4aa7c958ed0d4c3ffcb2e780 | https://github.com/IRC-SPHERE/HyperStream/blob/98478f4d31ed938f4aa7c958ed0d4c3ffcb2e780/hyperstream/node/node.py#L113-L127 | train |
astooke/gtimer | gtimer/public/report.py | report | def report(times=None,
include_itrs=True,
include_stats=True,
delim_mode=False,
format_options=None):
"""
Produce a formatted report of the current timing data.
Notes:
When reporting a collection of parallel subdivisions, only the one with
the greatest total time is reported on, and the rest are ignored (no
branching). To compare parallel subdivisions use compare().
Args:
times (Times, optional): Times object to report on. If not provided,
uses current root timer.
include_itrs (bool, optional): Display invidual iteration times.
include_stats (bool, optional): Display iteration statistics.
delim_mode (bool, optional): If True, format for spreadsheet.
format_options (dict, optional): Formatting options, see below.
Formatting Keywords & Defaults:
Human-Readable Mode
- 'stamp_name_width': 20
- 'itr_tab_width': 2
- 'itr_num_width': 6
- 'itr_name_width': 12
- 'indent_symbol': ' ' (two spaces)
- 'parallel_symbol': '(par)'
Delimited Mode
- 'delimiter': '\t' (tab)
- 'ident_symbol': '+'
- 'parallel_symbol': '(par)'
Returns:
str: Timing data report as formatted string.
Raises:
TypeError: If 'times' param is used and value is not a Times object.
"""
if times is None:
if f.root.stopped:
return report_loc.report(f.root.times,
include_itrs,
include_stats,
delim_mode,
format_options)
else:
t = timer()
rep = report_loc.report(collapse.collapse_times(),
include_itrs,
include_stats,
delim_mode,
format_options,
timer_state='running')
f.root.self_cut += timer() - t
return rep
else:
if not isinstance(times, Times):
raise TypeError("Expected Times instance for param 'times' (default is root).")
return report_loc.report(times,
include_itrs,
include_stats,
delim_mode,
format_options) | python | def report(times=None,
include_itrs=True,
include_stats=True,
delim_mode=False,
format_options=None):
"""
Produce a formatted report of the current timing data.
Notes:
When reporting a collection of parallel subdivisions, only the one with
the greatest total time is reported on, and the rest are ignored (no
branching). To compare parallel subdivisions use compare().
Args:
times (Times, optional): Times object to report on. If not provided,
uses current root timer.
include_itrs (bool, optional): Display invidual iteration times.
include_stats (bool, optional): Display iteration statistics.
delim_mode (bool, optional): If True, format for spreadsheet.
format_options (dict, optional): Formatting options, see below.
Formatting Keywords & Defaults:
Human-Readable Mode
- 'stamp_name_width': 20
- 'itr_tab_width': 2
- 'itr_num_width': 6
- 'itr_name_width': 12
- 'indent_symbol': ' ' (two spaces)
- 'parallel_symbol': '(par)'
Delimited Mode
- 'delimiter': '\t' (tab)
- 'ident_symbol': '+'
- 'parallel_symbol': '(par)'
Returns:
str: Timing data report as formatted string.
Raises:
TypeError: If 'times' param is used and value is not a Times object.
"""
if times is None:
if f.root.stopped:
return report_loc.report(f.root.times,
include_itrs,
include_stats,
delim_mode,
format_options)
else:
t = timer()
rep = report_loc.report(collapse.collapse_times(),
include_itrs,
include_stats,
delim_mode,
format_options,
timer_state='running')
f.root.self_cut += timer() - t
return rep
else:
if not isinstance(times, Times):
raise TypeError("Expected Times instance for param 'times' (default is root).")
return report_loc.report(times,
include_itrs,
include_stats,
delim_mode,
format_options) | [
"def",
"report",
"(",
"times",
"=",
"None",
",",
"include_itrs",
"=",
"True",
",",
"include_stats",
"=",
"True",
",",
"delim_mode",
"=",
"False",
",",
"format_options",
"=",
"None",
")",
":",
"if",
"times",
"is",
"None",
":",
"if",
"f",
".",
"root",
".",
"stopped",
":",
"return",
"report_loc",
".",
"report",
"(",
"f",
".",
"root",
".",
"times",
",",
"include_itrs",
",",
"include_stats",
",",
"delim_mode",
",",
"format_options",
")",
"else",
":",
"t",
"=",
"timer",
"(",
")",
"rep",
"=",
"report_loc",
".",
"report",
"(",
"collapse",
".",
"collapse_times",
"(",
")",
",",
"include_itrs",
",",
"include_stats",
",",
"delim_mode",
",",
"format_options",
",",
"timer_state",
"=",
"'running'",
")",
"f",
".",
"root",
".",
"self_cut",
"+=",
"timer",
"(",
")",
"-",
"t",
"return",
"rep",
"else",
":",
"if",
"not",
"isinstance",
"(",
"times",
",",
"Times",
")",
":",
"raise",
"TypeError",
"(",
"\"Expected Times instance for param 'times' (default is root).\"",
")",
"return",
"report_loc",
".",
"report",
"(",
"times",
",",
"include_itrs",
",",
"include_stats",
",",
"delim_mode",
",",
"format_options",
")"
] | Produce a formatted report of the current timing data.
Notes:
When reporting a collection of parallel subdivisions, only the one with
the greatest total time is reported on, and the rest are ignored (no
branching). To compare parallel subdivisions use compare().
Args:
times (Times, optional): Times object to report on. If not provided,
uses current root timer.
include_itrs (bool, optional): Display invidual iteration times.
include_stats (bool, optional): Display iteration statistics.
delim_mode (bool, optional): If True, format for spreadsheet.
format_options (dict, optional): Formatting options, see below.
Formatting Keywords & Defaults:
Human-Readable Mode
- 'stamp_name_width': 20
- 'itr_tab_width': 2
- 'itr_num_width': 6
- 'itr_name_width': 12
- 'indent_symbol': ' ' (two spaces)
- 'parallel_symbol': '(par)'
Delimited Mode
- 'delimiter': '\t' (tab)
- 'ident_symbol': '+'
- 'parallel_symbol': '(par)'
Returns:
str: Timing data report as formatted string.
Raises:
TypeError: If 'times' param is used and value is not a Times object. | [
"Produce",
"a",
"formatted",
"report",
"of",
"the",
"current",
"timing",
"data",
"."
] | 2146dab459e5d959feb291821733d3d3ba7c523c | https://github.com/astooke/gtimer/blob/2146dab459e5d959feb291821733d3d3ba7c523c/gtimer/public/report.py#L22-L86 | train |
astooke/gtimer | gtimer/public/report.py | compare | def compare(times_list=None,
name=None,
include_list=True,
include_stats=True,
delim_mode=False,
format_options=None):
"""
Produce a formatted comparison of timing datas.
Notes:
If no times_list is provided, produces comparison reports on all parallel
subdivisions present at the root level of the current timer. To compare
parallel subdivisions at a lower level, get the times data, navigate
within it to the parallel list of interest, and provide that as input
here. As with report(), any further parallel subdivisions encountered
have only their member with the greatest total time reported on (no
branching).
Args:
times_list (Times, optional): list or tuple of Times objects. If not
provided, uses current root timer.
name (any, optional): Identifier, passed through str().
include_list (bool, optional): Display stamps hierarchy.
include_stats (bool, optional): Display stamp comparison statistics.
delim_mode (bool, optional): If True, format for spreadsheet.
format_options (None, optional): Formatting options, see below.
Formatting Keywords & Defaults:
Human-readable Mode
- 'stamp_name_width': 18
- 'list_column_width': 12
- 'list_tab_width': 2
- 'stat_column_width': 8
- 'stat_tab_width': 2
- 'indent_symbol: ' ' (one space)
Delimited Mode
- 'delimiter': '\t' (tab)
- 'ident_symbol': '+'
Returns:
str: Times data comparison as formatted string.
Raises:
TypeError: If any element of provided collection is not a Times object.
"""
if times_list is None:
rep = ''
for par_dict in itervalues(f.root.times.par_subdvsn):
for par_name, par_list in iteritems(par_dict):
rep += report_loc.compare(par_list,
par_name,
include_list,
include_stats,
delim_mode,
format_options)
else:
if not isinstance(times_list, (list, tuple)):
raise TypeError("Expected a list/tuple of times instances for param 'times_list'.")
if not all([isinstance(times, Times) for times in times_list]):
raise TypeError("At least one member of param 'times_list' is not a Times object.")
rep = report_loc.compare(times_list,
name,
include_list,
include_stats,
delim_mode,
format_options)
return rep | python | def compare(times_list=None,
name=None,
include_list=True,
include_stats=True,
delim_mode=False,
format_options=None):
"""
Produce a formatted comparison of timing datas.
Notes:
If no times_list is provided, produces comparison reports on all parallel
subdivisions present at the root level of the current timer. To compare
parallel subdivisions at a lower level, get the times data, navigate
within it to the parallel list of interest, and provide that as input
here. As with report(), any further parallel subdivisions encountered
have only their member with the greatest total time reported on (no
branching).
Args:
times_list (Times, optional): list or tuple of Times objects. If not
provided, uses current root timer.
name (any, optional): Identifier, passed through str().
include_list (bool, optional): Display stamps hierarchy.
include_stats (bool, optional): Display stamp comparison statistics.
delim_mode (bool, optional): If True, format for spreadsheet.
format_options (None, optional): Formatting options, see below.
Formatting Keywords & Defaults:
Human-readable Mode
- 'stamp_name_width': 18
- 'list_column_width': 12
- 'list_tab_width': 2
- 'stat_column_width': 8
- 'stat_tab_width': 2
- 'indent_symbol: ' ' (one space)
Delimited Mode
- 'delimiter': '\t' (tab)
- 'ident_symbol': '+'
Returns:
str: Times data comparison as formatted string.
Raises:
TypeError: If any element of provided collection is not a Times object.
"""
if times_list is None:
rep = ''
for par_dict in itervalues(f.root.times.par_subdvsn):
for par_name, par_list in iteritems(par_dict):
rep += report_loc.compare(par_list,
par_name,
include_list,
include_stats,
delim_mode,
format_options)
else:
if not isinstance(times_list, (list, tuple)):
raise TypeError("Expected a list/tuple of times instances for param 'times_list'.")
if not all([isinstance(times, Times) for times in times_list]):
raise TypeError("At least one member of param 'times_list' is not a Times object.")
rep = report_loc.compare(times_list,
name,
include_list,
include_stats,
delim_mode,
format_options)
return rep | [
"def",
"compare",
"(",
"times_list",
"=",
"None",
",",
"name",
"=",
"None",
",",
"include_list",
"=",
"True",
",",
"include_stats",
"=",
"True",
",",
"delim_mode",
"=",
"False",
",",
"format_options",
"=",
"None",
")",
":",
"if",
"times_list",
"is",
"None",
":",
"rep",
"=",
"''",
"for",
"par_dict",
"in",
"itervalues",
"(",
"f",
".",
"root",
".",
"times",
".",
"par_subdvsn",
")",
":",
"for",
"par_name",
",",
"par_list",
"in",
"iteritems",
"(",
"par_dict",
")",
":",
"rep",
"+=",
"report_loc",
".",
"compare",
"(",
"par_list",
",",
"par_name",
",",
"include_list",
",",
"include_stats",
",",
"delim_mode",
",",
"format_options",
")",
"else",
":",
"if",
"not",
"isinstance",
"(",
"times_list",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"raise",
"TypeError",
"(",
"\"Expected a list/tuple of times instances for param 'times_list'.\"",
")",
"if",
"not",
"all",
"(",
"[",
"isinstance",
"(",
"times",
",",
"Times",
")",
"for",
"times",
"in",
"times_list",
"]",
")",
":",
"raise",
"TypeError",
"(",
"\"At least one member of param 'times_list' is not a Times object.\"",
")",
"rep",
"=",
"report_loc",
".",
"compare",
"(",
"times_list",
",",
"name",
",",
"include_list",
",",
"include_stats",
",",
"delim_mode",
",",
"format_options",
")",
"return",
"rep"
] | Produce a formatted comparison of timing datas.
Notes:
If no times_list is provided, produces comparison reports on all parallel
subdivisions present at the root level of the current timer. To compare
parallel subdivisions at a lower level, get the times data, navigate
within it to the parallel list of interest, and provide that as input
here. As with report(), any further parallel subdivisions encountered
have only their member with the greatest total time reported on (no
branching).
Args:
times_list (Times, optional): list or tuple of Times objects. If not
provided, uses current root timer.
name (any, optional): Identifier, passed through str().
include_list (bool, optional): Display stamps hierarchy.
include_stats (bool, optional): Display stamp comparison statistics.
delim_mode (bool, optional): If True, format for spreadsheet.
format_options (None, optional): Formatting options, see below.
Formatting Keywords & Defaults:
Human-readable Mode
- 'stamp_name_width': 18
- 'list_column_width': 12
- 'list_tab_width': 2
- 'stat_column_width': 8
- 'stat_tab_width': 2
- 'indent_symbol: ' ' (one space)
Delimited Mode
- 'delimiter': '\t' (tab)
- 'ident_symbol': '+'
Returns:
str: Times data comparison as formatted string.
Raises:
TypeError: If any element of provided collection is not a Times object. | [
"Produce",
"a",
"formatted",
"comparison",
"of",
"timing",
"datas",
"."
] | 2146dab459e5d959feb291821733d3d3ba7c523c | https://github.com/astooke/gtimer/blob/2146dab459e5d959feb291821733d3d3ba7c523c/gtimer/public/report.py#L89-L155 | train |
astooke/gtimer | gtimer/public/report.py | write_structure | def write_structure(times=None):
"""
Produce a formatted record of a times data structure.
Args:
times (Times, optional): If not provided, uses the current root timer.
Returns:
str: Timer tree hierarchy in a formatted string.
Raises:
TypeError: If provided argument is not a Times object.
"""
if times is None:
return report_loc.write_structure(f.root.times)
else:
if not isinstance(times, Times):
raise TypeError("Expected Times instance for param 'times' (default is root).")
return report_loc.write_structure(times) | python | def write_structure(times=None):
"""
Produce a formatted record of a times data structure.
Args:
times (Times, optional): If not provided, uses the current root timer.
Returns:
str: Timer tree hierarchy in a formatted string.
Raises:
TypeError: If provided argument is not a Times object.
"""
if times is None:
return report_loc.write_structure(f.root.times)
else:
if not isinstance(times, Times):
raise TypeError("Expected Times instance for param 'times' (default is root).")
return report_loc.write_structure(times) | [
"def",
"write_structure",
"(",
"times",
"=",
"None",
")",
":",
"if",
"times",
"is",
"None",
":",
"return",
"report_loc",
".",
"write_structure",
"(",
"f",
".",
"root",
".",
"times",
")",
"else",
":",
"if",
"not",
"isinstance",
"(",
"times",
",",
"Times",
")",
":",
"raise",
"TypeError",
"(",
"\"Expected Times instance for param 'times' (default is root).\"",
")",
"return",
"report_loc",
".",
"write_structure",
"(",
"times",
")"
] | Produce a formatted record of a times data structure.
Args:
times (Times, optional): If not provided, uses the current root timer.
Returns:
str: Timer tree hierarchy in a formatted string.
Raises:
TypeError: If provided argument is not a Times object. | [
"Produce",
"a",
"formatted",
"record",
"of",
"a",
"times",
"data",
"structure",
"."
] | 2146dab459e5d959feb291821733d3d3ba7c523c | https://github.com/astooke/gtimer/blob/2146dab459e5d959feb291821733d3d3ba7c523c/gtimer/public/report.py#L158-L176 | train |
tamasgal/km3pipe | examples/plot_dom_hits.py | filter_muons | def filter_muons(blob):
"""Write all muons from McTracks to Muons."""
tracks = blob['McTracks']
muons = tracks[tracks.type == -13] # PDG particle code
blob["Muons"] = Table(muons)
return blob | python | def filter_muons(blob):
"""Write all muons from McTracks to Muons."""
tracks = blob['McTracks']
muons = tracks[tracks.type == -13] # PDG particle code
blob["Muons"] = Table(muons)
return blob | [
"def",
"filter_muons",
"(",
"blob",
")",
":",
"tracks",
"=",
"blob",
"[",
"'McTracks'",
"]",
"muons",
"=",
"tracks",
"[",
"tracks",
".",
"type",
"==",
"-",
"13",
"]",
"# PDG particle code",
"blob",
"[",
"\"Muons\"",
"]",
"=",
"Table",
"(",
"muons",
")",
"return",
"blob"
] | Write all muons from McTracks to Muons. | [
"Write",
"all",
"muons",
"from",
"McTracks",
"to",
"Muons",
"."
] | 7a9b59ac899a28775b5bdc5d391d9a5340d08040 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/examples/plot_dom_hits.py#L33-L38 | train |
aouyar/healthgraph-api | samples/bottle/runkeeper_demo.py | parse_conf_files | def parse_conf_files(conf_paths):
"""Parse the configuration file and return dictionary of configuration
options.
@param conf_paths: List of configuration file paths to parse.
@return: Dictionary of configuration options.
"""
conf_file = ConfigParser.RawConfigParser()
conf_read = conf_file.read(conf_paths)
conf = {}
try:
if conf_read:
conf['client_id'] = conf_file.get('runkeeper', 'client_id')
conf['client_secret'] = conf_file.get('runkeeper', 'client_secret')
if conf_file.has_option('runkeeper', 'bindport'):
conf['bindport'] = conf_file.getint('runkeeper', 'bindport')
if conf_file.has_option('runkeeper', 'bindaddr'):
conf['bindaddr'] = conf_file.get('runkeeper', 'bindaddr')
if conf_file.has_option('runkeeper', 'baseurl'):
conf['baseurl'] = conf_file.get('runkeeper', 'baseurl')
return conf
except ConfigParser.Error:
raise ConfigurationError("Error parsing configuration file(s): %s\n"
% sys.exc_info()[1])
else:
raise ConfigurationError("No valid configuration file (%s) found."
% defaultConfFilename) | python | def parse_conf_files(conf_paths):
"""Parse the configuration file and return dictionary of configuration
options.
@param conf_paths: List of configuration file paths to parse.
@return: Dictionary of configuration options.
"""
conf_file = ConfigParser.RawConfigParser()
conf_read = conf_file.read(conf_paths)
conf = {}
try:
if conf_read:
conf['client_id'] = conf_file.get('runkeeper', 'client_id')
conf['client_secret'] = conf_file.get('runkeeper', 'client_secret')
if conf_file.has_option('runkeeper', 'bindport'):
conf['bindport'] = conf_file.getint('runkeeper', 'bindport')
if conf_file.has_option('runkeeper', 'bindaddr'):
conf['bindaddr'] = conf_file.get('runkeeper', 'bindaddr')
if conf_file.has_option('runkeeper', 'baseurl'):
conf['baseurl'] = conf_file.get('runkeeper', 'baseurl')
return conf
except ConfigParser.Error:
raise ConfigurationError("Error parsing configuration file(s): %s\n"
% sys.exc_info()[1])
else:
raise ConfigurationError("No valid configuration file (%s) found."
% defaultConfFilename) | [
"def",
"parse_conf_files",
"(",
"conf_paths",
")",
":",
"conf_file",
"=",
"ConfigParser",
".",
"RawConfigParser",
"(",
")",
"conf_read",
"=",
"conf_file",
".",
"read",
"(",
"conf_paths",
")",
"conf",
"=",
"{",
"}",
"try",
":",
"if",
"conf_read",
":",
"conf",
"[",
"'client_id'",
"]",
"=",
"conf_file",
".",
"get",
"(",
"'runkeeper'",
",",
"'client_id'",
")",
"conf",
"[",
"'client_secret'",
"]",
"=",
"conf_file",
".",
"get",
"(",
"'runkeeper'",
",",
"'client_secret'",
")",
"if",
"conf_file",
".",
"has_option",
"(",
"'runkeeper'",
",",
"'bindport'",
")",
":",
"conf",
"[",
"'bindport'",
"]",
"=",
"conf_file",
".",
"getint",
"(",
"'runkeeper'",
",",
"'bindport'",
")",
"if",
"conf_file",
".",
"has_option",
"(",
"'runkeeper'",
",",
"'bindaddr'",
")",
":",
"conf",
"[",
"'bindaddr'",
"]",
"=",
"conf_file",
".",
"get",
"(",
"'runkeeper'",
",",
"'bindaddr'",
")",
"if",
"conf_file",
".",
"has_option",
"(",
"'runkeeper'",
",",
"'baseurl'",
")",
":",
"conf",
"[",
"'baseurl'",
"]",
"=",
"conf_file",
".",
"get",
"(",
"'runkeeper'",
",",
"'baseurl'",
")",
"return",
"conf",
"except",
"ConfigParser",
".",
"Error",
":",
"raise",
"ConfigurationError",
"(",
"\"Error parsing configuration file(s): %s\\n\"",
"%",
"sys",
".",
"exc_info",
"(",
")",
"[",
"1",
"]",
")",
"else",
":",
"raise",
"ConfigurationError",
"(",
"\"No valid configuration file (%s) found.\"",
"%",
"defaultConfFilename",
")"
] | Parse the configuration file and return dictionary of configuration
options.
@param conf_paths: List of configuration file paths to parse.
@return: Dictionary of configuration options. | [
"Parse",
"the",
"configuration",
"file",
"and",
"return",
"dictionary",
"of",
"configuration",
"options",
"."
] | fc5135ab353ca1f05e8a70ec784ff921e686c072 | https://github.com/aouyar/healthgraph-api/blob/fc5135ab353ca1f05e8a70ec784ff921e686c072/samples/bottle/runkeeper_demo.py#L148-L175 | train |
aouyar/healthgraph-api | samples/bottle/runkeeper_demo.py | main | def main(argv=None):
"""Main Block - Configure and run the Bottle Web Server."""
cmd_opts = parse_cmdline(argv)[0]
if cmd_opts.confpath is not None:
if os.path.exists(cmd_opts.confpath):
conf_paths = [cmd_opts.confpath,]
else:
return "Configuration file not found: %s" % cmd_opts.confpath
else:
conf_paths = [os.path.join(path, defaultConfFilename)
for path in ('/etc', '.',)]
try:
conf.update(parse_conf_files(conf_paths))
except ConfigurationError:
return(sys.exc_info()[1])
if cmd_opts.bindport is not None:
conf['bindport'] = cmd_opts.bindport
if cmd_opts.bindaddr is not None:
conf['bindaddr'] = cmd_opts.bindaddr
if cmd_opts.baseurl is not None:
conf['baseurl'] = cmd_opts.baseurl
if cmd_opts.devel:
from bottle import debug
debug(True)
app = SessionMiddleware(bottle.app(), sessionOpts)
bottle.run(app=app, host=conf['bindaddr'], port=conf['bindport'],
reloader=cmd_opts.devel) | python | def main(argv=None):
"""Main Block - Configure and run the Bottle Web Server."""
cmd_opts = parse_cmdline(argv)[0]
if cmd_opts.confpath is not None:
if os.path.exists(cmd_opts.confpath):
conf_paths = [cmd_opts.confpath,]
else:
return "Configuration file not found: %s" % cmd_opts.confpath
else:
conf_paths = [os.path.join(path, defaultConfFilename)
for path in ('/etc', '.',)]
try:
conf.update(parse_conf_files(conf_paths))
except ConfigurationError:
return(sys.exc_info()[1])
if cmd_opts.bindport is not None:
conf['bindport'] = cmd_opts.bindport
if cmd_opts.bindaddr is not None:
conf['bindaddr'] = cmd_opts.bindaddr
if cmd_opts.baseurl is not None:
conf['baseurl'] = cmd_opts.baseurl
if cmd_opts.devel:
from bottle import debug
debug(True)
app = SessionMiddleware(bottle.app(), sessionOpts)
bottle.run(app=app, host=conf['bindaddr'], port=conf['bindport'],
reloader=cmd_opts.devel) | [
"def",
"main",
"(",
"argv",
"=",
"None",
")",
":",
"cmd_opts",
"=",
"parse_cmdline",
"(",
"argv",
")",
"[",
"0",
"]",
"if",
"cmd_opts",
".",
"confpath",
"is",
"not",
"None",
":",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"cmd_opts",
".",
"confpath",
")",
":",
"conf_paths",
"=",
"[",
"cmd_opts",
".",
"confpath",
",",
"]",
"else",
":",
"return",
"\"Configuration file not found: %s\"",
"%",
"cmd_opts",
".",
"confpath",
"else",
":",
"conf_paths",
"=",
"[",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"defaultConfFilename",
")",
"for",
"path",
"in",
"(",
"'/etc'",
",",
"'.'",
",",
")",
"]",
"try",
":",
"conf",
".",
"update",
"(",
"parse_conf_files",
"(",
"conf_paths",
")",
")",
"except",
"ConfigurationError",
":",
"return",
"(",
"sys",
".",
"exc_info",
"(",
")",
"[",
"1",
"]",
")",
"if",
"cmd_opts",
".",
"bindport",
"is",
"not",
"None",
":",
"conf",
"[",
"'bindport'",
"]",
"=",
"cmd_opts",
".",
"bindport",
"if",
"cmd_opts",
".",
"bindaddr",
"is",
"not",
"None",
":",
"conf",
"[",
"'bindaddr'",
"]",
"=",
"cmd_opts",
".",
"bindaddr",
"if",
"cmd_opts",
".",
"baseurl",
"is",
"not",
"None",
":",
"conf",
"[",
"'baseurl'",
"]",
"=",
"cmd_opts",
".",
"baseurl",
"if",
"cmd_opts",
".",
"devel",
":",
"from",
"bottle",
"import",
"debug",
"debug",
"(",
"True",
")",
"app",
"=",
"SessionMiddleware",
"(",
"bottle",
".",
"app",
"(",
")",
",",
"sessionOpts",
")",
"bottle",
".",
"run",
"(",
"app",
"=",
"app",
",",
"host",
"=",
"conf",
"[",
"'bindaddr'",
"]",
",",
"port",
"=",
"conf",
"[",
"'bindport'",
"]",
",",
"reloader",
"=",
"cmd_opts",
".",
"devel",
")"
] | Main Block - Configure and run the Bottle Web Server. | [
"Main",
"Block",
"-",
"Configure",
"and",
"run",
"the",
"Bottle",
"Web",
"Server",
"."
] | fc5135ab353ca1f05e8a70ec784ff921e686c072 | https://github.com/aouyar/healthgraph-api/blob/fc5135ab353ca1f05e8a70ec784ff921e686c072/samples/bottle/runkeeper_demo.py#L178-L204 | train |
NaPs/Kolekto | kolekto/helpers.py | get_hash | def get_hash(input_string):
""" Return the hash of the movie depending on the input string.
If the input string looks like a symbolic link to a movie in a Kolekto
tree, return its movies hash, else, return the input directly in lowercase.
"""
# Check if the input looks like a link to a movie:
if os.path.islink(input_string):
directory, movie_hash = os.path.split(os.readlink(input_string))
input_string = movie_hash
return input_string.lower() | python | def get_hash(input_string):
""" Return the hash of the movie depending on the input string.
If the input string looks like a symbolic link to a movie in a Kolekto
tree, return its movies hash, else, return the input directly in lowercase.
"""
# Check if the input looks like a link to a movie:
if os.path.islink(input_string):
directory, movie_hash = os.path.split(os.readlink(input_string))
input_string = movie_hash
return input_string.lower() | [
"def",
"get_hash",
"(",
"input_string",
")",
":",
"# Check if the input looks like a link to a movie:",
"if",
"os",
".",
"path",
".",
"islink",
"(",
"input_string",
")",
":",
"directory",
",",
"movie_hash",
"=",
"os",
".",
"path",
".",
"split",
"(",
"os",
".",
"readlink",
"(",
"input_string",
")",
")",
"input_string",
"=",
"movie_hash",
"return",
"input_string",
".",
"lower",
"(",
")"
] | Return the hash of the movie depending on the input string.
If the input string looks like a symbolic link to a movie in a Kolekto
tree, return its movies hash, else, return the input directly in lowercase. | [
"Return",
"the",
"hash",
"of",
"the",
"movie",
"depending",
"on",
"the",
"input",
"string",
"."
] | 29c5469da8782780a06bf9a76c59414bb6fd8fe3 | https://github.com/NaPs/Kolekto/blob/29c5469da8782780a06bf9a76c59414bb6fd8fe3/kolekto/helpers.py#L8-L20 | train |
NaPs/Kolekto | kolekto/helpers.py | JsonDbm.get | def get(self, key):
""" Get data associated with provided key.
"""
return self._object_class(json.loads(self._db[key])) | python | def get(self, key):
""" Get data associated with provided key.
"""
return self._object_class(json.loads(self._db[key])) | [
"def",
"get",
"(",
"self",
",",
"key",
")",
":",
"return",
"self",
".",
"_object_class",
"(",
"json",
".",
"loads",
"(",
"self",
".",
"_db",
"[",
"key",
"]",
")",
")"
] | Get data associated with provided key. | [
"Get",
"data",
"associated",
"with",
"provided",
"key",
"."
] | 29c5469da8782780a06bf9a76c59414bb6fd8fe3 | https://github.com/NaPs/Kolekto/blob/29c5469da8782780a06bf9a76c59414bb6fd8fe3/kolekto/helpers.py#L35-L38 | train |
NaPs/Kolekto | kolekto/helpers.py | JsonDbm.save | def save(self, key, data):
""" Save data associated with key.
"""
self._db[key] = json.dumps(data)
self._db.sync() | python | def save(self, key, data):
""" Save data associated with key.
"""
self._db[key] = json.dumps(data)
self._db.sync() | [
"def",
"save",
"(",
"self",
",",
"key",
",",
"data",
")",
":",
"self",
".",
"_db",
"[",
"key",
"]",
"=",
"json",
".",
"dumps",
"(",
"data",
")",
"self",
".",
"_db",
".",
"sync",
"(",
")"
] | Save data associated with key. | [
"Save",
"data",
"associated",
"with",
"key",
"."
] | 29c5469da8782780a06bf9a76c59414bb6fd8fe3 | https://github.com/NaPs/Kolekto/blob/29c5469da8782780a06bf9a76c59414bb6fd8fe3/kolekto/helpers.py#L45-L49 | train |
IRC-SPHERE/HyperStream | hyperstream/meta_data/meta_data_manager.py | MetaDataManager.global_meta_data | def global_meta_data(self):
"""
Get the global meta data, which will be stored in a tree structure
:return: The global meta data
"""
with switch_db(MetaDataModel, 'hyperstream'):
return sorted(map(lambda x: x.to_dict(), MetaDataModel.objects),
key=lambda x: len(x['identifier'].split('.')),
reverse=True) | python | def global_meta_data(self):
"""
Get the global meta data, which will be stored in a tree structure
:return: The global meta data
"""
with switch_db(MetaDataModel, 'hyperstream'):
return sorted(map(lambda x: x.to_dict(), MetaDataModel.objects),
key=lambda x: len(x['identifier'].split('.')),
reverse=True) | [
"def",
"global_meta_data",
"(",
"self",
")",
":",
"with",
"switch_db",
"(",
"MetaDataModel",
",",
"'hyperstream'",
")",
":",
"return",
"sorted",
"(",
"map",
"(",
"lambda",
"x",
":",
"x",
".",
"to_dict",
"(",
")",
",",
"MetaDataModel",
".",
"objects",
")",
",",
"key",
"=",
"lambda",
"x",
":",
"len",
"(",
"x",
"[",
"'identifier'",
"]",
".",
"split",
"(",
"'.'",
")",
")",
",",
"reverse",
"=",
"True",
")"
] | Get the global meta data, which will be stored in a tree structure
:return: The global meta data | [
"Get",
"the",
"global",
"meta",
"data",
"which",
"will",
"be",
"stored",
"in",
"a",
"tree",
"structure"
] | 98478f4d31ed938f4aa7c958ed0d4c3ffcb2e780 | https://github.com/IRC-SPHERE/HyperStream/blob/98478f4d31ed938f4aa7c958ed0d4c3ffcb2e780/hyperstream/meta_data/meta_data_manager.py#L56-L65 | train |
IRC-SPHERE/HyperStream | hyperstream/meta_data/meta_data_manager.py | MetaDataManager.insert | def insert(self, tag, identifier, parent, data):
"""
Insert the given meta data into the database
:param tag: The tag (equates to meta_data_id)
:param identifier: The identifier (a combination of the meta_data_id and the plate value)
:param parent: The parent plate identifier
:param data: The data (plate value)
:return: None
"""
# First try to add it into the tree
if self.global_plate_definitions.contains(identifier):
raise KeyError("Identifier {} already exists in tree".format(identifier))
self.global_plate_definitions.create_node(tag=tag, identifier=identifier, parent=parent, data=data)
# Now try to add it into the database
with switch_db(MetaDataModel, 'hyperstream'):
meta_data = MetaDataModel(tag=tag, parent=parent, data=data)
meta_data.save()
logging.info("Meta data {} inserted".format(identifier)) | python | def insert(self, tag, identifier, parent, data):
"""
Insert the given meta data into the database
:param tag: The tag (equates to meta_data_id)
:param identifier: The identifier (a combination of the meta_data_id and the plate value)
:param parent: The parent plate identifier
:param data: The data (plate value)
:return: None
"""
# First try to add it into the tree
if self.global_plate_definitions.contains(identifier):
raise KeyError("Identifier {} already exists in tree".format(identifier))
self.global_plate_definitions.create_node(tag=tag, identifier=identifier, parent=parent, data=data)
# Now try to add it into the database
with switch_db(MetaDataModel, 'hyperstream'):
meta_data = MetaDataModel(tag=tag, parent=parent, data=data)
meta_data.save()
logging.info("Meta data {} inserted".format(identifier)) | [
"def",
"insert",
"(",
"self",
",",
"tag",
",",
"identifier",
",",
"parent",
",",
"data",
")",
":",
"# First try to add it into the tree",
"if",
"self",
".",
"global_plate_definitions",
".",
"contains",
"(",
"identifier",
")",
":",
"raise",
"KeyError",
"(",
"\"Identifier {} already exists in tree\"",
".",
"format",
"(",
"identifier",
")",
")",
"self",
".",
"global_plate_definitions",
".",
"create_node",
"(",
"tag",
"=",
"tag",
",",
"identifier",
"=",
"identifier",
",",
"parent",
"=",
"parent",
",",
"data",
"=",
"data",
")",
"# Now try to add it into the database",
"with",
"switch_db",
"(",
"MetaDataModel",
",",
"'hyperstream'",
")",
":",
"meta_data",
"=",
"MetaDataModel",
"(",
"tag",
"=",
"tag",
",",
"parent",
"=",
"parent",
",",
"data",
"=",
"data",
")",
"meta_data",
".",
"save",
"(",
")",
"logging",
".",
"info",
"(",
"\"Meta data {} inserted\"",
".",
"format",
"(",
"identifier",
")",
")"
] | Insert the given meta data into the database
:param tag: The tag (equates to meta_data_id)
:param identifier: The identifier (a combination of the meta_data_id and the plate value)
:param parent: The parent plate identifier
:param data: The data (plate value)
:return: None | [
"Insert",
"the",
"given",
"meta",
"data",
"into",
"the",
"database"
] | 98478f4d31ed938f4aa7c958ed0d4c3ffcb2e780 | https://github.com/IRC-SPHERE/HyperStream/blob/98478f4d31ed938f4aa7c958ed0d4c3ffcb2e780/hyperstream/meta_data/meta_data_manager.py#L76-L97 | train |
IRC-SPHERE/HyperStream | hyperstream/meta_data/meta_data_manager.py | MetaDataManager.delete | def delete(self, identifier):
"""
Delete the meta data with the given identifier from the database
:param identifier: The identifier
:return: None
"""
try:
node = self.global_plate_definitions[identifier]
except NodeIDAbsentError:
logging.info("Meta data {} not present during deletion".format(identifier))
return
# First delete any children of the node: REMOVED as this seemed to be unreliable
# It's now better to call delete_plate with delete_meta_data=True
# for child in node.fpointer:
# self.delete(child)
self.global_plate_definitions.remove_node(identifier)
with switch_db(MetaDataModel, 'hyperstream'):
meta_data = MetaDataModel.objects(tag=node.tag, data=node.data, parent=node.bpointer).first()
if meta_data is not None:
meta_data.delete()
logging.info("Meta data {} deleted".format(identifier)) | python | def delete(self, identifier):
"""
Delete the meta data with the given identifier from the database
:param identifier: The identifier
:return: None
"""
try:
node = self.global_plate_definitions[identifier]
except NodeIDAbsentError:
logging.info("Meta data {} not present during deletion".format(identifier))
return
# First delete any children of the node: REMOVED as this seemed to be unreliable
# It's now better to call delete_plate with delete_meta_data=True
# for child in node.fpointer:
# self.delete(child)
self.global_plate_definitions.remove_node(identifier)
with switch_db(MetaDataModel, 'hyperstream'):
meta_data = MetaDataModel.objects(tag=node.tag, data=node.data, parent=node.bpointer).first()
if meta_data is not None:
meta_data.delete()
logging.info("Meta data {} deleted".format(identifier)) | [
"def",
"delete",
"(",
"self",
",",
"identifier",
")",
":",
"try",
":",
"node",
"=",
"self",
".",
"global_plate_definitions",
"[",
"identifier",
"]",
"except",
"NodeIDAbsentError",
":",
"logging",
".",
"info",
"(",
"\"Meta data {} not present during deletion\"",
".",
"format",
"(",
"identifier",
")",
")",
"return",
"# First delete any children of the node: REMOVED as this seemed to be unreliable",
"# It's now better to call delete_plate with delete_meta_data=True",
"# for child in node.fpointer:",
"# self.delete(child)",
"self",
".",
"global_plate_definitions",
".",
"remove_node",
"(",
"identifier",
")",
"with",
"switch_db",
"(",
"MetaDataModel",
",",
"'hyperstream'",
")",
":",
"meta_data",
"=",
"MetaDataModel",
".",
"objects",
"(",
"tag",
"=",
"node",
".",
"tag",
",",
"data",
"=",
"node",
".",
"data",
",",
"parent",
"=",
"node",
".",
"bpointer",
")",
".",
"first",
"(",
")",
"if",
"meta_data",
"is",
"not",
"None",
":",
"meta_data",
".",
"delete",
"(",
")",
"logging",
".",
"info",
"(",
"\"Meta data {} deleted\"",
".",
"format",
"(",
"identifier",
")",
")"
] | Delete the meta data with the given identifier from the database
:param identifier: The identifier
:return: None | [
"Delete",
"the",
"meta",
"data",
"with",
"the",
"given",
"identifier",
"from",
"the",
"database"
] | 98478f4d31ed938f4aa7c958ed0d4c3ffcb2e780 | https://github.com/IRC-SPHERE/HyperStream/blob/98478f4d31ed938f4aa7c958ed0d4c3ffcb2e780/hyperstream/meta_data/meta_data_manager.py#L99-L125 | train |
htm-community/menorah | menorah/riverstream.py | RiverStream.load | def load(self):
"""
Loads this stream by calling River View for data.
"""
print "Loading data for %s..." % self.getName()
self._dataHandle = self._stream.data(
since=self._since, until=self._until,
limit=self._limit, aggregate=self._aggregate
)
self._data = self._dataHandle.data()
self._headers = self._dataHandle.headers()
print "Loaded %i rows." % len(self) | python | def load(self):
"""
Loads this stream by calling River View for data.
"""
print "Loading data for %s..." % self.getName()
self._dataHandle = self._stream.data(
since=self._since, until=self._until,
limit=self._limit, aggregate=self._aggregate
)
self._data = self._dataHandle.data()
self._headers = self._dataHandle.headers()
print "Loaded %i rows." % len(self) | [
"def",
"load",
"(",
"self",
")",
":",
"print",
"\"Loading data for %s...\"",
"%",
"self",
".",
"getName",
"(",
")",
"self",
".",
"_dataHandle",
"=",
"self",
".",
"_stream",
".",
"data",
"(",
"since",
"=",
"self",
".",
"_since",
",",
"until",
"=",
"self",
".",
"_until",
",",
"limit",
"=",
"self",
".",
"_limit",
",",
"aggregate",
"=",
"self",
".",
"_aggregate",
")",
"self",
".",
"_data",
"=",
"self",
".",
"_dataHandle",
".",
"data",
"(",
")",
"self",
".",
"_headers",
"=",
"self",
".",
"_dataHandle",
".",
"headers",
"(",
")",
"print",
"\"Loaded %i rows.\"",
"%",
"len",
"(",
"self",
")"
] | Loads this stream by calling River View for data. | [
"Loads",
"this",
"stream",
"by",
"calling",
"River",
"View",
"for",
"data",
"."
] | 1991b01eda3f6361b22ed165b4a688ae3fb2deaf | https://github.com/htm-community/menorah/blob/1991b01eda3f6361b22ed165b4a688ae3fb2deaf/menorah/riverstream.py#L80-L91 | train |
tamasgal/km3pipe | km3pipe/plot.py | hexbin | def hexbin(x, y, color="purple", **kwargs):
"""Seaborn-compatible hexbin plot.
See also: http://seaborn.pydata.org/tutorial/axis_grids.html#mapping-custom-functions-onto-the-grid
"""
if HAS_SEABORN:
cmap = sns.light_palette(color, as_cmap=True)
else:
cmap = "Purples"
plt.hexbin(x, y, cmap=cmap, **kwargs) | python | def hexbin(x, y, color="purple", **kwargs):
"""Seaborn-compatible hexbin plot.
See also: http://seaborn.pydata.org/tutorial/axis_grids.html#mapping-custom-functions-onto-the-grid
"""
if HAS_SEABORN:
cmap = sns.light_palette(color, as_cmap=True)
else:
cmap = "Purples"
plt.hexbin(x, y, cmap=cmap, **kwargs) | [
"def",
"hexbin",
"(",
"x",
",",
"y",
",",
"color",
"=",
"\"purple\"",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"HAS_SEABORN",
":",
"cmap",
"=",
"sns",
".",
"light_palette",
"(",
"color",
",",
"as_cmap",
"=",
"True",
")",
"else",
":",
"cmap",
"=",
"\"Purples\"",
"plt",
".",
"hexbin",
"(",
"x",
",",
"y",
",",
"cmap",
"=",
"cmap",
",",
"*",
"*",
"kwargs",
")"
] | Seaborn-compatible hexbin plot.
See also: http://seaborn.pydata.org/tutorial/axis_grids.html#mapping-custom-functions-onto-the-grid | [
"Seaborn",
"-",
"compatible",
"hexbin",
"plot",
"."
] | 7a9b59ac899a28775b5bdc5d391d9a5340d08040 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/plot.py#L33-L42 | train |
tamasgal/km3pipe | km3pipe/plot.py | diag | def diag(ax=None, linecolor='0.0', linestyle='--', **kwargs):
"""Plot the diagonal."""
ax = get_ax(ax)
xy_min = np.min((ax.get_xlim(), ax.get_ylim()))
xy_max = np.max((ax.get_ylim(), ax.get_xlim()))
return ax.plot([xy_min, xy_max], [xy_min, xy_max],
ls=linestyle,
c=linecolor,
**kwargs) | python | def diag(ax=None, linecolor='0.0', linestyle='--', **kwargs):
"""Plot the diagonal."""
ax = get_ax(ax)
xy_min = np.min((ax.get_xlim(), ax.get_ylim()))
xy_max = np.max((ax.get_ylim(), ax.get_xlim()))
return ax.plot([xy_min, xy_max], [xy_min, xy_max],
ls=linestyle,
c=linecolor,
**kwargs) | [
"def",
"diag",
"(",
"ax",
"=",
"None",
",",
"linecolor",
"=",
"'0.0'",
",",
"linestyle",
"=",
"'--'",
",",
"*",
"*",
"kwargs",
")",
":",
"ax",
"=",
"get_ax",
"(",
"ax",
")",
"xy_min",
"=",
"np",
".",
"min",
"(",
"(",
"ax",
".",
"get_xlim",
"(",
")",
",",
"ax",
".",
"get_ylim",
"(",
")",
")",
")",
"xy_max",
"=",
"np",
".",
"max",
"(",
"(",
"ax",
".",
"get_ylim",
"(",
")",
",",
"ax",
".",
"get_xlim",
"(",
")",
")",
")",
"return",
"ax",
".",
"plot",
"(",
"[",
"xy_min",
",",
"xy_max",
"]",
",",
"[",
"xy_min",
",",
"xy_max",
"]",
",",
"ls",
"=",
"linestyle",
",",
"c",
"=",
"linecolor",
",",
"*",
"*",
"kwargs",
")"
] | Plot the diagonal. | [
"Plot",
"the",
"diagonal",
"."
] | 7a9b59ac899a28775b5bdc5d391d9a5340d08040 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/plot.py#L52-L60 | train |
tamasgal/km3pipe | km3pipe/plot.py | automeshgrid | def automeshgrid(
x, y, step=0.02, xstep=None, ystep=None, pad=0.5, xpad=None, ypad=None
):
"""Make a meshgrid, inferred from data."""
if xpad is None:
xpad = pad
if xstep is None:
xstep = step
if ypad is None:
ypad = pad
if ystep is None:
ystep = step
xmin = x.min() - xpad
xmax = x.max() + xpad
ymin = y.min() - ypad
ymax = y.max() + ypad
return meshgrid(xmin, xmax, step, ymin, ymax, ystep) | python | def automeshgrid(
x, y, step=0.02, xstep=None, ystep=None, pad=0.5, xpad=None, ypad=None
):
"""Make a meshgrid, inferred from data."""
if xpad is None:
xpad = pad
if xstep is None:
xstep = step
if ypad is None:
ypad = pad
if ystep is None:
ystep = step
xmin = x.min() - xpad
xmax = x.max() + xpad
ymin = y.min() - ypad
ymax = y.max() + ypad
return meshgrid(xmin, xmax, step, ymin, ymax, ystep) | [
"def",
"automeshgrid",
"(",
"x",
",",
"y",
",",
"step",
"=",
"0.02",
",",
"xstep",
"=",
"None",
",",
"ystep",
"=",
"None",
",",
"pad",
"=",
"0.5",
",",
"xpad",
"=",
"None",
",",
"ypad",
"=",
"None",
")",
":",
"if",
"xpad",
"is",
"None",
":",
"xpad",
"=",
"pad",
"if",
"xstep",
"is",
"None",
":",
"xstep",
"=",
"step",
"if",
"ypad",
"is",
"None",
":",
"ypad",
"=",
"pad",
"if",
"ystep",
"is",
"None",
":",
"ystep",
"=",
"step",
"xmin",
"=",
"x",
".",
"min",
"(",
")",
"-",
"xpad",
"xmax",
"=",
"x",
".",
"max",
"(",
")",
"+",
"xpad",
"ymin",
"=",
"y",
".",
"min",
"(",
")",
"-",
"ypad",
"ymax",
"=",
"y",
".",
"max",
"(",
")",
"+",
"ypad",
"return",
"meshgrid",
"(",
"xmin",
",",
"xmax",
",",
"step",
",",
"ymin",
",",
"ymax",
",",
"ystep",
")"
] | Make a meshgrid, inferred from data. | [
"Make",
"a",
"meshgrid",
"inferred",
"from",
"data",
"."
] | 7a9b59ac899a28775b5bdc5d391d9a5340d08040 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/plot.py#L63-L79 | train |
tamasgal/km3pipe | km3pipe/plot.py | prebinned_hist | def prebinned_hist(counts, binlims, ax=None, *args, **kwargs):
"""Plot a histogram with counts, binlims already given.
Example
=======
>>> gaus = np.random.normal(size=100)
>>> counts, binlims = np.histogram(gaus, bins='auto')
>>> prebinned_hist(countsl binlims)
"""
ax = get_ax(ax)
x = bincenters(binlims)
weights = counts
return ax.hist(x, bins=binlims, weights=weights, *args, **kwargs) | python | def prebinned_hist(counts, binlims, ax=None, *args, **kwargs):
"""Plot a histogram with counts, binlims already given.
Example
=======
>>> gaus = np.random.normal(size=100)
>>> counts, binlims = np.histogram(gaus, bins='auto')
>>> prebinned_hist(countsl binlims)
"""
ax = get_ax(ax)
x = bincenters(binlims)
weights = counts
return ax.hist(x, bins=binlims, weights=weights, *args, **kwargs) | [
"def",
"prebinned_hist",
"(",
"counts",
",",
"binlims",
",",
"ax",
"=",
"None",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"ax",
"=",
"get_ax",
"(",
"ax",
")",
"x",
"=",
"bincenters",
"(",
"binlims",
")",
"weights",
"=",
"counts",
"return",
"ax",
".",
"hist",
"(",
"x",
",",
"bins",
"=",
"binlims",
",",
"weights",
"=",
"weights",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | Plot a histogram with counts, binlims already given.
Example
=======
>>> gaus = np.random.normal(size=100)
>>> counts, binlims = np.histogram(gaus, bins='auto')
>>> prebinned_hist(countsl binlims) | [
"Plot",
"a",
"histogram",
"with",
"counts",
"binlims",
"already",
"given",
"."
] | 7a9b59ac899a28775b5bdc5d391d9a5340d08040 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/plot.py#L96-L108 | train |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.