Code
stringlengths 103
85.9k
| Summary
listlengths 0
94
|
---|---|
Please provide a description of the function:def poll(self):
if self._subprocess is not None:
self._subprocess.poll()
time.sleep(self._beaver_config.get('subprocess_poll_sleep')) | [
"Poll attached subprocess until it is available"
]
|
Please provide a description of the function:def close(self):
if self._subprocess is not None:
os.killpg(self._subprocess.pid, signal.SIGTERM)
self._subprocess = None | [
"Close child subprocess"
]
|
Please provide a description of the function:def callback(self, filename, lines, **kwargs):
timestamp = self.get_timestamp(**kwargs)
if kwargs.get('timestamp', False):
del kwargs['timestamp']
for line in lines:
try:
import warnings
with warnings.catch_warnings():
warnings.simplefilter('error')
self._client.publish(self._topic, self.format(filename, line, timestamp, **kwargs), 0)
except Exception, e:
try:
raise TransportException(e.strerror)
except AttributeError:
raise TransportException('Unspecified exception encountered') | [
"publishes lines one by one to the given topic"
]
|
Please provide a description of the function:def _to_unicode(self, data, encoding, errors='strict'):
'''Given a string and its encoding, decodes the string into Unicode.
%encoding is a string recognized by encodings.aliases'''
# strip Byte Order Mark (if present)
if (len(data) >= 4) and (data[:2] == '\xfe\xff') and (data[2:4] != '\x00\x00'):
encoding = 'utf-16be'
data = data[2:]
elif (len(data) >= 4) and (data[:2] == '\xff\xfe') and (data[2:4] != '\x00\x00'):
encoding = 'utf-16le'
data = data[2:]
elif data[:3] == '\xef\xbb\xbf':
encoding = 'utf-8'
data = data[3:]
elif data[:4] == '\x00\x00\xfe\xff':
encoding = 'utf-32be'
data = data[4:]
elif data[:4] == '\xff\xfe\x00\x00':
encoding = 'utf-32le'
data = data[4:]
newdata = unicode(data, encoding, errors)
return newdata | []
|
Please provide a description of the function:def callback(self, filename, lines, **kwargs):
timestamp = self.get_timestamp(**kwargs)
if kwargs.get('timestamp', False):
del kwargs['timestamp']
for line in lines:
try:
import warnings
with warnings.catch_warnings():
warnings.simplefilter('error')
m = self.format(filename, line, timestamp, **kwargs)
self.logger.debug("Sending message " + m)
self.conn.send(destination=self.queue, body=m)
except Exception, e:
self.logger.error(e)
try:
raise TransportException(e)
except AttributeError:
raise TransportException('Unspecified exception encountered') | [
"publishes lines one by one to the given topic"
]
|
Please provide a description of the function:def reconnect(self):
try:
self.conn.close()
except Exception,e:
self.logger.warn(e)
self.createConnection()
return True | [
"Allows reconnection from when a handled\n TransportException is thrown"
]
|
Please provide a description of the function:def _check_connections(self):
for server in self._servers:
if self._is_reachable(server):
server['down_until'] = 0
else:
server['down_until'] = time.time() + 5 | [
"Checks if all configured redis servers are reachable"
]
|
Please provide a description of the function:def _is_reachable(self, server):
try:
server['redis'].ping()
return True
except UserWarning:
self._logger.warn('Cannot reach redis server: ' + server['url'])
except Exception:
self._logger.warn('Cannot reach redis server: ' + server['url'])
return False | [
"Checks if the given redis server is reachable"
]
|
Please provide a description of the function:def invalidate(self):
super(RedisTransport, self).invalidate()
for server in self._servers:
server['redis'].connection_pool.disconnect()
return False | [
"Invalidates the current transport and disconnects all redis connections"
]
|
Please provide a description of the function:def callback(self, filename, lines, **kwargs):
self._logger.debug('Redis transport called')
timestamp = self.get_timestamp(**kwargs)
if kwargs.get('timestamp', False):
del kwargs['timestamp']
namespaces = self._beaver_config.get_field('redis_namespace', filename)
if not namespaces:
namespaces = self._namespace
namespaces = namespaces.split(",")
self._logger.debug('Got namespaces: '.join(namespaces))
data_type = self._data_type
self._logger.debug('Got data type: ' + data_type)
server = self._get_next_server()
self._logger.debug('Got redis server: ' + server['url'])
pipeline = server['redis'].pipeline(transaction=False)
callback_map = {
self.LIST_DATA_TYPE: pipeline.rpush,
self.CHANNEL_DATA_TYPE: pipeline.publish,
}
callback_method = callback_map[data_type]
for line in lines:
for namespace in namespaces:
callback_method(
namespace.strip(),
self.format(filename, line, timestamp, **kwargs)
)
try:
pipeline.execute()
except redis.exceptions.RedisError, exception:
self._logger.warn('Cannot push lines to redis server: ' + server['url'])
raise TransportException(exception) | [
"Sends log lines to redis servers"
]
|
Please provide a description of the function:def _get_next_server(self):
current_try = 0
max_tries = len(self._servers)
while current_try < max_tries:
server_index = self._raise_server_index()
server = self._servers[server_index]
down_until = server['down_until']
self._logger.debug('Checking server ' + str(current_try + 1) + '/' + str(max_tries) + ': ' + server['url'])
if down_until == 0:
self._logger.debug('Elected server: ' + server['url'])
return server
if down_until < time.time():
if self._is_reachable(server):
server['down_until'] = 0
self._logger.debug('Elected server: ' + server['url'])
return server
else:
self._logger.debug('Server still unavailable: ' + server['url'])
server['down_until'] = time.time() + 5
current_try += 1
raise TransportException('Cannot reach any redis server') | [
"Returns a valid redis server or raises a TransportException"
]
|
Please provide a description of the function:def _raise_server_index(self):
self._current_server_index = (self._current_server_index + 1) % len(self._servers)
return self._current_server_index | [
"Round robin magic: Raises the current redis server index and returns it"
]
|
Please provide a description of the function:def valid(self):
valid_servers = 0
for server in self._servers:
if server['down_until'] <= time.time():
valid_servers += 1
return valid_servers > 0 | [
"Returns whether or not the transport can send data to any redis server"
]
|
Please provide a description of the function:def callback(self, filename, lines, **kwargs):
timestamp = self.get_timestamp(**kwargs)
if kwargs.get('timestamp', False):
del kwargs['timestamp']
for line in lines:
try:
import warnings
with warnings.catch_warnings():
warnings.simplefilter('error')
#produce message
if self._key is None:
response = self._prod.send_messages(self._kafka_config['topic'], self.format(filename, line, timestamp, **kwargs))
else:
response = self._prod.send_messages(self._kafka_config['topic'], self._key, self.format(filename, line, timestamp, **kwargs))
if response:
if response[0].error:
self._logger.info('message error: {0}'.format(response[0].error))
self._logger.info('message offset: {0}'.format(response[0].offset))
except Exception as e:
try:
self._logger.error('Exception caught sending message/s : ' + str(e))
raise TransportException(e.strerror)
except AttributeError:
raise TransportException('Unspecified exception encountered') | [
"publishes lines one by one to the given topic"
]
|
Please provide a description of the function:def format(self, filename, line, timestamp, **kwargs):
line = unicode(line.encode("utf-8"), "utf-8", errors="ignore")
formatter = self._beaver_config.get_field('format', filename)
if formatter not in self._formatters:
formatter = self._default_formatter
data = {
self._fields.get('type'): kwargs.get('type'),
self._fields.get('tags'): kwargs.get('tags'),
'@timestamp': timestamp,
self._fields.get('host'): self._current_host,
self._fields.get('file'): filename,
self._fields.get('message'): line
}
if self._logstash_version == 0:
data['@source'] = 'file://{0}'.format(filename)
data['@fields'] = kwargs.get('fields')
else:
data['@version'] = self._logstash_version
fields = kwargs.get('fields')
for key in fields:
data[key] = fields.get(key)
return self._formatters[formatter](data) | [
"Returns a formatted log line"
]
|
Please provide a description of the function:def get_timestamp(self, **kwargs):
timestamp = kwargs.get('timestamp')
if not timestamp:
now = datetime.datetime.utcnow()
timestamp = now.strftime("%Y-%m-%dT%H:%M:%S") + ".%03d" % (now.microsecond / 1000) + "Z"
return timestamp | [
"Retrieves the timestamp for a given set of data"
]
|
Please provide a description of the function:def _make_executable(path):
os.chmod(path, os.stat(path).st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH) | [
"Make the file at path executable."
]
|
Please provide a description of the function:def build_parser():
parser = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
# Required args
parser.add_argument("--in_path", "-i", required=True,
help="file path to input GCT(x) file")
parser.add_argument("--rid", nargs="+", help="filepath to grp file or string array for including rows")
parser.add_argument("--cid", nargs="+", help="filepath to grp file or string array for including cols")
parser.add_argument("--exclude_rid", "-er", nargs="+", help="filepath to grp file or string array for excluding rows")
parser.add_argument("--exclude_cid", "-ec", nargs="+", help="filepath to grp file or string array for excluding cols")
parser.add_argument("--out_name", "-o", default="ds_subsetted.gct",
help="what to name the output file")
parser.add_argument("--out_type", default="gct", choices=["gct", "gctx"],
help="whether to write output as GCT or GCTx")
parser.add_argument("--verbose", "-v", action="store_true", default=False,
help="whether to increase the # of messages reported")
return parser | [
"Build argument parser."
]
|
Please provide a description of the function:def subset_main(args):
# Read in each of the command line arguments
rid = _read_arg(args.rid)
cid = _read_arg(args.cid)
exclude_rid = _read_arg(args.exclude_rid)
exclude_cid = _read_arg(args.exclude_cid)
# If GCT, use subset_gctoo
if args.in_path.endswith(".gct"):
in_gct = parse_gct.parse(args.in_path)
out_gct = sg.subset_gctoo(in_gct, rid=rid, cid=cid,
exclude_rid=exclude_rid,
exclude_cid=exclude_cid)
# If GCTx, use parse_gctx
else:
if (exclude_rid is not None) or (exclude_cid is not None):
msg = "exclude_{rid,cid} args not currently supported for parse_gctx."
raise(Exception(msg))
logger.info("Using hyperslab selection functionality of parse_gctx...")
out_gct = parse_gctx.parse(args.in_path, rid=rid, cid=cid)
# Write the output gct
if args.out_type == "gctx":
wgx.write(out_gct, args.out_name)
else:
wg.write(out_gct, args.out_name, data_null="NaN", metadata_null="NA", filler_null="NA") | [
" Separate method from main() in order to make testing easier and to\n enable command-line access. "
]
|
Please provide a description of the function:def _read_arg(arg):
# If arg is None, just return it back
if arg is None:
arg_out = arg
else:
# If len(arg) == 1 and arg[0] is a valid filepath, read it as a grp file
if len(arg) == 1 and os.path.exists(arg[0]):
arg_out = grp.read(arg[0])
else:
arg_out = arg
# Make sure that arg_out is a list of strings
assert isinstance(arg_out, list), "arg_out must be a list."
assert type(arg_out[0]) == str, "arg_out must be a list of strings."
return arg_out | [
"\n If arg is a list with 1 element that corresponds to a valid file path, use\n set_io.grp to read the grp file. Otherwise, check that arg is a list of strings.\n\n Args:\n arg (list or None)\n\n Returns:\n arg_out (list or None)\n "
]
|
Please provide a description of the function:def fast_cov(x, y=None, destination=None):
validate_inputs(x, y, destination)
if y is None:
y = x
if destination is None:
destination = numpy.zeros((x.shape[1], y.shape[1]))
mean_x = numpy.mean(x, axis=0)
mean_y = numpy.mean(y, axis=0)
mean_centered_x = (x - mean_x).astype(destination.dtype)
mean_centered_y = (y - mean_y).astype(destination.dtype)
numpy.dot(mean_centered_x.T, mean_centered_y, out=destination)
numpy.divide(destination, (x.shape[0] - 1), out=destination)
return destination | [
"calculate the covariance matrix for the columns of x (MxN), or optionally, the covariance matrix between the\n columns of x and and the columns of y (MxP). (In the language of statistics, the columns are variables, the rows\n are observations).\n\n Args:\n x (numpy array-like) MxN in shape\n y (numpy array-like) MxP in shape\n destination (numpy array-like) optional location where to store the results as they are calculated (e.g. a numpy\n memmap of a file)\n\n returns (numpy array-like) array of the covariance values\n for defaults (y=None), shape is NxN\n if y is provided, shape is NxP\n "
]
|
Please provide a description of the function:def read(file_path):
# Read in file
actual_file_path = os.path.expanduser(file_path)
with open(actual_file_path, 'r') as f:
lines = f.readlines()
# Create GMT object
gmt = []
# Iterate over each line
for line_num, line in enumerate(lines):
# Separate along tabs
fields = line.split('\t')
assert len(fields) > 2, (
"Each line must have at least 3 tab-delimited items. " +
"line_num: {}, fields: {}").format(line_num, fields)
# Get rid of trailing whitespace
fields[-1] = fields[-1].rstrip()
# Collect entries
entries = fields[2:]
# Remove empty entries
entries = [x for x in entries if x]
assert len(set(entries)) == len(entries), (
"There should not be duplicate entries for the same set. " +
"line_num: {}, entries: {}").format(line_num, entries)
# Store this line as a dictionary
line_dict = {SET_IDENTIFIER_FIELD: fields[0],
SET_DESC_FIELD: fields[1],
SET_MEMBERS_FIELD: entries}
gmt.append(line_dict)
verify_gmt_integrity(gmt)
return gmt | [
" Read a gmt file at the path specified by file_path.\n\n Args:\n file_path (string): path to gmt file\n\n Returns:\n gmt (GMT object): list of dicts, where each dict corresponds to one\n line of the GMT file\n\n "
]
|
Please provide a description of the function:def verify_gmt_integrity(gmt):
# Verify that set ids are unique
set_ids = [d[SET_IDENTIFIER_FIELD] for d in gmt]
assert len(set(set_ids)) == len(set_ids), (
"Set identifiers should be unique. set_ids: {}".format(set_ids)) | [
" Make sure that set ids are unique.\n\n Args:\n gmt (GMT object): list of dicts\n\n Returns:\n None\n\n "
]
|
Please provide a description of the function:def write(gmt, out_path):
with open(out_path, 'w') as f:
for _, each_dict in enumerate(gmt):
f.write(each_dict[SET_IDENTIFIER_FIELD] + '\t')
f.write(each_dict[SET_DESC_FIELD] + '\t')
f.write('\t'.join([str(entry) for entry in each_dict[SET_MEMBERS_FIELD]]))
f.write('\n') | [
" Write a GMT to a text file.\n\n Args:\n gmt (GMT object): list of dicts\n out_path (string): output path\n\n Returns:\n None\n\n "
]
|
Please provide a description of the function:def diff_gctoo(gctoo, plate_control=True, group_field='pert_type', group_val='ctl_vehicle',
diff_method="robust_z", upper_diff_thresh=10, lower_diff_thresh=-10):
''' Converts a matrix of values (e.g. gene expression, viability, etc.)
into a matrix of differential values.
Args:
df (pandas df): data to make diff_gctoo
plate_control (bool): True means calculate diff_gctoo using plate control.
False means vehicle control.
group_field (string): Metadata field in which to find group_val
group_val (string): Value in group_field that indicates use in vehicle control
diff_method (string): Method of computing differential data; currently only
support either "robust_z" or "median_norm"
upper_diff_thresh (float): Maximum value for diff data
lower_diff_thresh (float): Minimum value for diff data
Returns:
out_gctoo (GCToo object): GCToo with differential data values
'''
assert diff_method in possible_diff_methods, (
"possible_diff_methods: {}, diff_method: {}".format(
possible_diff_methods, diff_method))
# Compute median and MAD using all samples in the dataset
if plate_control:
# Compute differential data
if diff_method == "robust_z":
diff_data = robust_zscore.robust_zscore(gctoo.data_df)
elif diff_method == "median_norm":
medians = gctoo.data_df.median(axis=1)
diff_data = gctoo.data_df.subtract(medians, axis='index')
# Compute median and MAD from negative controls, rather than all samples
else:
assert group_field in gctoo.col_metadata_df.columns.values, (
"group_field {} not present in column metadata. " +
"gctoo.col_metadata_df.columns.values: {}").format(
group_field, gctoo.col_metadata_df.columns.values)
assert sum(gctoo.col_metadata_df[group_field] == group_val) > 0, (
"group_val {} not present in the {} column.").format(
group_val, group_field)
# Find negative control samples
neg_ctl_samples = gctoo.col_metadata_df.index[gctoo.col_metadata_df[group_field] == group_val]
neg_ctl_df = gctoo.data_df[neg_ctl_samples]
# Compute differential data
if diff_method == "robust_z":
diff_data = robust_zscore.robust_zscore(gctoo.data_df, neg_ctl_df)
elif diff_method == "median_norm":
medians = gctoo.data_df.median(axis=1)
diff_data = gctoo.data_df.subtract(medians, axis='index')
# Threshold differential data before returning
diff_data = diff_data.clip(lower=lower_diff_thresh, upper=upper_diff_thresh)
# Construct output GCToo object
out_gctoo = GCToo.GCToo(data_df=diff_data,
row_metadata_df=gctoo.row_metadata_df,
col_metadata_df=gctoo.col_metadata_df)
return out_gctoo | []
|
Please provide a description of the function:def parse(gctx_file_path, convert_neg_666=True, rid=None, cid=None,
ridx=None, cidx=None, row_meta_only=False, col_meta_only=False, make_multiindex=False):
full_path = os.path.expanduser(gctx_file_path)
# Verify that the path exists
if not os.path.exists(full_path):
err_msg = "The given path to the gctx file cannot be found. full_path: {}"
logger.error(err_msg.format(full_path))
raise Exception(err_msg.format(full_path))
logger.info("Reading GCTX: {}".format(full_path))
# open file
gctx_file = h5py.File(full_path, "r")
if row_meta_only:
# read in row metadata
row_dset = gctx_file[row_meta_group_node]
row_meta = parse_metadata_df("row", row_dset, convert_neg_666)
# validate optional input ids & get indexes to subset by
(sorted_ridx, sorted_cidx) = check_and_order_id_inputs(rid, ridx, cid, cidx, row_meta, None)
gctx_file.close()
# subset if specified, then return
row_meta = row_meta.iloc[sorted_ridx]
return row_meta
elif col_meta_only:
# read in col metadata
col_dset = gctx_file[col_meta_group_node]
col_meta = parse_metadata_df("col", col_dset, convert_neg_666)
# validate optional input ids & get indexes to subset by
(sorted_ridx, sorted_cidx) = check_and_order_id_inputs(rid, ridx, cid, cidx, None, col_meta)
gctx_file.close()
# subset if specified, then return
col_meta = col_meta.iloc[sorted_cidx]
return col_meta
else:
# read in row metadata
row_dset = gctx_file[row_meta_group_node]
row_meta = parse_metadata_df("row", row_dset, convert_neg_666)
# read in col metadata
col_dset = gctx_file[col_meta_group_node]
col_meta = parse_metadata_df("col", col_dset, convert_neg_666)
# validate optional input ids & get indexes to subset by
(sorted_ridx, sorted_cidx) = check_and_order_id_inputs(rid, ridx, cid, cidx, row_meta, col_meta)
data_dset = gctx_file[data_node]
data_df = parse_data_df(data_dset, sorted_ridx, sorted_cidx, row_meta, col_meta)
# (if subsetting) subset metadata
row_meta = row_meta.iloc[sorted_ridx]
col_meta = col_meta.iloc[sorted_cidx]
# get version
my_version = gctx_file.attrs[version_node]
if type(my_version) == np.ndarray:
my_version = my_version[0]
gctx_file.close()
# make GCToo instance
my_gctoo = GCToo.GCToo(data_df=data_df, row_metadata_df=row_meta, col_metadata_df=col_meta,
src=full_path, version=my_version, make_multiindex=make_multiindex)
return my_gctoo | [
"\n Primary method of script. Reads in path to a gctx file and parses into GCToo object.\n\n Input:\n Mandatory:\n - gctx_file_path (str): full path to gctx file you want to parse.\n\n Optional:\n - convert_neg_666 (bool): whether to convert -666 values to numpy.nan or not\n (see Note below for more details on this). Default = False.\n - rid (list of strings): list of row ids to specifically keep from gctx. Default=None.\n - cid (list of strings): list of col ids to specifically keep from gctx. Default=None.\n - ridx (list of integers): only read the rows corresponding to this\n list of integer ids. Default=None.\n - cidx (list of integers): only read the columns corresponding to this\n list of integer ids. Default=None.\n - row_meta_only (bool): Whether to load data + metadata (if False), or just row metadata (if True)\n as pandas DataFrame\n - col_meta_only (bool): Whether to load data + metadata (if False), or just col metadata (if True)\n as pandas DataFrame\n - make_multiindex (bool): whether to create a multi-index df combining\n the 3 component dfs\n\n Output:\n - myGCToo (GCToo): A GCToo instance containing content of parsed gctx file. Note: if meta_only = True,\n this will be a GCToo instance where the data_df is empty, i.e. data_df = pd.DataFrame(index=rids,\n columns = cids)\n\n Note: why does convert_neg_666 exist?\n - In CMap--for somewhat obscure historical reasons--we use \"-666\" as our null value\n for metadata. However (so that users can take full advantage of pandas' methods,\n including those for filtering nan's etc) we provide the option of converting these\n into numpy.NaN values, the pandas default.\n "
]
|
Please provide a description of the function:def check_and_order_id_inputs(rid, ridx, cid, cidx, row_meta_df, col_meta_df):
(row_type, row_ids) = check_id_idx_exclusivity(rid, ridx)
(col_type, col_ids) = check_id_idx_exclusivity(cid, cidx)
row_ids = check_and_convert_ids(row_type, row_ids, row_meta_df)
ordered_ridx = get_ordered_idx(row_type, row_ids, row_meta_df)
col_ids = check_and_convert_ids(col_type, col_ids, col_meta_df)
ordered_cidx = get_ordered_idx(col_type, col_ids, col_meta_df)
return (ordered_ridx, ordered_cidx) | [
"\n Makes sure that (if entered) id inputs entered are of one type (string id or index)\n Input:\n - rid (list or None): if not None, a list of rids\n - ridx (list or None): if not None, a list of indexes\n - cid (list or None): if not None, a list of cids\n - cidx (list or None): if not None, a list of indexes\n Output:\n - a tuple of the ordered ridx and cidx\n "
]
|
Please provide a description of the function:def check_id_idx_exclusivity(id, idx):
if (id is not None and idx is not None):
msg = ("'id' and 'idx' fields can't both not be None," +
" please specify subset in only one of these fields")
logger.error(msg)
raise Exception("parse_gctx.check_id_idx_exclusivity: " + msg)
elif id is not None:
return ("id", id)
elif idx is not None:
return ("idx", idx)
else:
return (None, []) | [
"\n Makes sure user didn't provide both ids and idx values to subset by.\n\n Input:\n - id (list or None): if not None, a list of string id names\n - idx (list or None): if not None, a list of integer id indexes\n\n Output:\n - a tuple: first element is subset type, second is subset content\n "
]
|
Please provide a description of the function:def get_ordered_idx(id_type, id_list, meta_df):
if meta_df is not None:
if id_type is None:
id_list = range(0, len(list(meta_df.index)))
elif id_type == "id":
lookup = {x: i for (i,x) in enumerate(meta_df.index)}
id_list = [lookup[str(i)] for i in id_list]
return sorted(id_list)
else:
return None | [
"\n Gets index values corresponding to ids to subset and orders them.\n Input:\n - id_type (str): either \"id\", \"idx\" or None\n - id_list (list): either a list of indexes or id names\n Output:\n - a sorted list of indexes to subset a dimension by\n "
]
|
Please provide a description of the function:def parse_metadata_df(dim, meta_group, convert_neg_666):
# read values from hdf5 & make a DataFrame
header_values = {}
array_index = 0
for k in meta_group.keys():
curr_dset = meta_group[k]
temp_array = np.empty(curr_dset.shape, dtype=curr_dset.dtype)
curr_dset.read_direct(temp_array)
# convert all values to str in temp_array so that
# to_numeric works consistently with gct and gct_x parser
temp_array = temp_array.astype('str')
header_values[str(k)] = temp_array
array_index = array_index + 1
meta_df = pd.DataFrame.from_dict(header_values)
# save the ids for later use in the index; we do not want to convert them to
# numeric
ids = meta_df["id"].copy()
del meta_df["id"]
# Convert metadata to numeric if possible, after converting everything to string first
# Note: This conversion first to string is to ensure consistent behavior between
# the gctx and gct parser (which by default reads the entire text file into a string)
meta_df = meta_df.apply(lambda x: pd.to_numeric(x, errors="ignore"))
meta_df.set_index(pd.Index(ids, dtype=str), inplace=True)
# Replace -666 and -666.0 with NaN; also replace "-666" if convert_neg_666 is True
meta_df = replace_666(meta_df, convert_neg_666)
# set index and columns appropriately
set_metadata_index_and_column_names(dim, meta_df)
return meta_df | [
"\n Reads in all metadata from .gctx file to pandas DataFrame\n with proper GCToo specifications.\n Input:\n - dim (str): Dimension of metadata; either \"row\" or \"column\"\n - meta_group (HDF5 group): Group from which to read metadata values\n - convert_neg_666 (bool): whether to convert \"-666\" values to np.nan or not\n Output:\n - meta_df (pandas DataFrame): data frame corresponding to metadata fields\n of dimension specified.\n "
]
|
Please provide a description of the function:def replace_666(meta_df, convert_neg_666):
if convert_neg_666:
out_df = meta_df.replace([-666, "-666", -666.0], np.nan)
else:
out_df = meta_df.replace([-666, -666.0], "-666")
return out_df | [
" Replace -666, -666.0, and optionally \"-666\".\n Args:\n meta_df (pandas df):\n convert_neg_666 (bool):\n Returns:\n out_df (pandas df): updated meta_df\n "
]
|
Please provide a description of the function:def set_metadata_index_and_column_names(dim, meta_df):
if dim == "row":
meta_df.index.name = "rid"
meta_df.columns.name = "rhd"
elif dim == "col":
meta_df.index.name = "cid"
meta_df.columns.name = "chd" | [
"\n Sets index and column names to GCTX convention.\n Input:\n - dim (str): Dimension of metadata to read. Must be either \"row\" or \"col\"\n - meta_df (pandas.DataFrame): data frame corresponding to metadata fields\n of dimension specified.\n Output:\n None\n "
]
|
Please provide a description of the function:def parse_data_df(data_dset, ridx, cidx, row_meta, col_meta):
if len(ridx) == len(row_meta.index) and len(cidx) == len(col_meta.index): # no subset
data_array = np.empty(data_dset.shape, dtype=np.float32)
data_dset.read_direct(data_array)
data_array = data_array.transpose()
elif len(ridx) <= len(cidx):
first_subset = data_dset[:, ridx].astype(np.float32)
data_array = first_subset[cidx, :].transpose()
elif len(cidx) < len(ridx):
first_subset = data_dset[cidx, :].astype(np.float32)
data_array = first_subset[:, ridx].transpose()
# make DataFrame instance
data_df = pd.DataFrame(data_array, index=row_meta.index[ridx], columns=col_meta.index[cidx])
return data_df | [
"\n Parses in data_df from hdf5, subsetting if specified.\n\n Input:\n -data_dset (h5py dset): HDF5 dataset from which to read data_df\n -ridx (list): list of indexes to subset from data_df\n (may be all of them if no subsetting)\n -cidx (list): list of indexes to subset from data_df\n (may be all of them if no subsetting)\n -row_meta (pandas DataFrame): the parsed in row metadata\n -col_meta (pandas DataFrame): the parsed in col metadata\n "
]
|
Please provide a description of the function:def get_column_metadata(gctx_file_path, convert_neg_666=True):
full_path = os.path.expanduser(gctx_file_path)
# open file
gctx_file = h5py.File(full_path, "r")
col_dset = gctx_file[col_meta_group_node]
col_meta = parse_metadata_df("col", col_dset, convert_neg_666)
gctx_file.close()
return col_meta | [
"\n Opens .gctx file and returns only column metadata\n\n Input:\n Mandatory:\n - gctx_file_path (str): full path to gctx file you want to parse.\n\n Optional:\n - convert_neg_666 (bool): whether to convert -666 values to num\n\n Output:\n - col_meta (pandas DataFrame): a DataFrame of all column metadata values.\n "
]
|
Please provide a description of the function:def get_row_metadata(gctx_file_path, convert_neg_666=True):
full_path = os.path.expanduser(gctx_file_path)
# open file
gctx_file = h5py.File(full_path, "r")
row_dset = gctx_file[row_meta_group_node]
row_meta = parse_metadata_df("row", row_dset, convert_neg_666)
gctx_file.close()
return row_meta | [
"\n Opens .gctx file and returns only row metadata\n\n Input:\n Mandatory:\n - gctx_file_path (str): full path to gctx file you want to parse.\n\n Optional:\n - convert_neg_666 (bool): whether to convert -666 values to num\n\n Output:\n - row_meta (pandas DataFrame): a DataFrame of all row metadata values.\n "
]
|
Please provide a description of the function:def multi_index_df_to_component_dfs(multi_index_df, rid="rid", cid="cid"):
# Id level of the multiindex will become the index
rids = list(multi_index_df.index.get_level_values(rid))
cids = list(multi_index_df.columns.get_level_values(cid))
# It's possible that the index and/or columns of multi_index_df are not
# actually multi-index; need to check for this and there are more than one level in index(python3)
if isinstance(multi_index_df.index, pd.MultiIndex):
# check if there are more than one levels in index (python3)
if len(multi_index_df.index.names) > 1:
# If so, drop rid because it won't go into the body of the metadata
mi_df_index = multi_index_df.index.droplevel(rid)
# Names of the multiindex levels become the headers
rhds = list(mi_df_index.names)
# Assemble metadata values
row_metadata = np.array([mi_df_index.get_level_values(level).values for level in list(rhds)]).T
# if there is one level in index (python3), then rhds and row metadata should be empty
else:
rhds = []
row_metadata = []
# If the index is not multi-index, then rhds and row metadata should be empty
else:
rhds = []
row_metadata = []
# Check if columns of multi_index_df are in fact multi-index
if isinstance(multi_index_df.columns, pd.MultiIndex):
# Check if there are more than one levels in columns(python3)
if len(multi_index_df.columns.names) > 1:
# If so, drop cid because it won't go into the body of the metadata
mi_df_columns = multi_index_df.columns.droplevel(cid)
# Names of the multiindex levels become the headers
chds = list(mi_df_columns.names)
# Assemble metadata values
col_metadata = np.array([mi_df_columns.get_level_values(level).values for level in list(chds)]).T
# If there is one level in columns (python3), then rhds and row metadata should be empty
else:
chds = []
col_metadata = []
# If the columns are not multi-index, then rhds and row metadata should be empty
else:
chds = []
col_metadata = []
# Create component dfs
row_metadata_df = pd.DataFrame.from_records(row_metadata, index=pd.Index(rids, name="rid"), columns=pd.Index(rhds, name="rhd"))
col_metadata_df = pd.DataFrame.from_records(col_metadata, index=pd.Index(cids, name="cid"), columns=pd.Index(chds, name="chd"))
data_df = pd.DataFrame(multi_index_df.values, index=pd.Index(rids, name="rid"), columns=pd.Index(cids, name="cid"))
return data_df, row_metadata_df, col_metadata_df | [
" Convert a multi-index df into 3 component dfs. "
]
|
Please provide a description of the function:def check_df(self, df):
if isinstance(df, pd.DataFrame):
if not df.index.is_unique:
repeats = df.index[df.index.duplicated()].values
msg = "Index values must be unique but aren't. The following entries appear more than once: {}".format(repeats)
self.logger.error(msg)
raise Exception("GCToo GCToo.check_df " + msg)
if not df.columns.is_unique:
repeats = df.columns[df.columns.duplicated()].values
msg = "Columns values must be unique but aren't. The following entries appear more than once: {}".format(repeats)
raise Exception("GCToo GCToo.check_df " + msg)
else:
return True
else:
msg = "expected Pandas DataFrame, got something else: {} of type: {}".format(df, type(df))
self.logger.error(msg)
raise Exception("GCToo GCToo.check_df " + msg) | [
"\n Verifies that df is a pandas DataFrame instance and\n that its index and column values are unique.\n "
]
|
Please provide a description of the function:def id_match_check(self, data_df, meta_df, dim):
if dim == "row":
if len(data_df.index) == len(meta_df.index) and set(data_df.index) == set(meta_df.index):
return True
else:
msg = ("The rids are inconsistent between data_df and row_metadata_df.\n" +
"data_df.index.values:\n{}\nrow_metadata_df.index.values:\n{}").format(data_df.index.values, meta_df.index.values)
self.logger.error(msg)
raise Exception("GCToo GCToo.id_match_check " + msg)
elif dim == "col":
if len(data_df.columns) == len(meta_df.index) and set(data_df.columns) == set(meta_df.index):
return True
else:
msg = ("The cids are inconsistent between data_df and col_metadata_df.\n" +
"data_df.columns.values:\n{}\ncol_metadata_df.index.values:\n{}").format(data_df.columns.values, meta_df.index.values)
self.logger.error(msg)
raise Exception("GCToo GCToo.id_match_check " + msg) | [
"\n Verifies that id values match between:\n - row case: index of data_df & index of row metadata\n - col case: columns of data_df & index of column metadata\n "
]
|
Please provide a description of the function:def assemble_multi_index_df(self):
#prepare row index
self.logger.debug("Row metadata shape: {}".format(self.row_metadata_df.shape))
self.logger.debug("Is empty? {}".format(self.row_metadata_df.empty))
row_copy = pd.DataFrame(self.row_metadata_df.index) if self.row_metadata_df.empty else self.row_metadata_df.copy()
row_copy["rid"] = row_copy.index
row_index = pd.MultiIndex.from_arrays(row_copy.T.values, names=row_copy.columns)
#prepare column index
self.logger.debug("Col metadata shape: {}".format(self.col_metadata_df.shape))
col_copy = pd.DataFrame(self.col_metadata_df.index) if self.col_metadata_df.empty else self.col_metadata_df.copy()
col_copy["cid"] = col_copy.index
transposed_col_metadata = col_copy.T
col_index = pd.MultiIndex.from_arrays(transposed_col_metadata.values, names=transposed_col_metadata.index)
# Create multi index dataframe using the values of data_df and the indexes created above
self.logger.debug("Data df shape: {}".format(self.data_df.shape))
self.multi_index_df = pd.DataFrame(data=self.data_df.values, index=row_index, columns=col_index) | [
"Assembles three component dataframes into a multiindex dataframe.\n Sets the result to self.multi_index_df.\n IMPORTANT: Cross-section (\"xs\") is the best command for selecting\n data. Be sure to use the flag \"drop_level=False\" with this command,\n or else the dataframe that is returned will not have the same\n metadata as the input.\n N.B. \"level\" means metadata header.\n N.B. \"axis=1\" indicates column annotations.\n Examples:\n 1) Select the probe with pr_lua_id=\"LUA-3404\":\n lua3404_df = multi_index_df.xs(\"LUA-3404\", level=\"pr_lua_id\", drop_level=False)\n 2) Select all DMSO samples:\n DMSO_df = multi_index_df.xs(\"DMSO\", level=\"pert_iname\", axis=1, drop_level=False)\n "
]
|
Please provide a description of the function:def parse(file_path, convert_neg_666=True, rid=None, cid=None,
ridx=None, cidx=None, row_meta_only=False, col_meta_only=False, make_multiindex=False):
assert sum([row_meta_only, col_meta_only]) <= 1, (
"row_meta_only and col_meta_only cannot both be requested.")
nan_values = [
"#N/A", "N/A", "NA", "#NA", "NULL", "NaN", "-NaN",
"nan", "-nan", "#N/A!", "na", "NA", "None", "#VALUE!"]
# Add "-666" to the list of NaN values
if convert_neg_666:
nan_values.append("-666")
# Verify that the gct path exists
if not os.path.exists(file_path):
err_msg = "The given path to the gct file cannot be found. gct_path: {}"
logger.error(err_msg.format(file_path))
raise Exception(err_msg.format(file_path))
logger.info("Reading GCT: {}".format(file_path))
# Read version and dimensions
(version, num_data_rows, num_data_cols,
num_row_metadata, num_col_metadata) = read_version_and_dims(file_path)
# Read in metadata and data
(row_metadata, col_metadata, data) = parse_into_3_df(
file_path, num_data_rows, num_data_cols,
num_row_metadata, num_col_metadata, nan_values)
# Create the gctoo object and assemble 3 component dataframes
# Not the most efficient if only metadata requested (i.e. creating the
# whole GCToo just to return the metadata df), but simplest
myGCToo = create_gctoo_obj(file_path, version, row_metadata, col_metadata,
data, make_multiindex)
# Subset if requested
if (rid is not None) or (ridx is not None) or (cid is not None) or (cidx is not None):
logger.info("Subsetting GCT... (note that there are no speed gains when subsetting GCTs)")
myGCToo = sg.subset_gctoo(myGCToo, rid=rid, cid=cid, ridx=ridx, cidx=cidx)
if row_meta_only:
return myGCToo.row_metadata_df
elif col_meta_only:
return myGCToo.col_metadata_df
else:
return myGCToo | [
"\n The main method.\n\n Args:\n - file_path (string): full path to gct(x) file you want to parse\n - convert_neg_666 (bool): whether to convert -666 values to numpy.nan\n (see Note below for more details). Default = False.\n - rid (list of strings): list of row ids to specifically keep from gct. Default=None.\n - cid (list of strings): list of col ids to specifically keep from gct. Default=None.\n - ridx (list of integers): only read the rows corresponding to this\n list of integer ids. Default=None.\n - cidx (list of integers): only read the columns corresponding to this\n list of integer ids. Default=None.\n - row_meta_only (bool): Whether to load data + metadata (if False), or\n just row metadata (if True) as pandas DataFrame\n - col_meta_only (bool): Whether to load data + metadata (if False), or\n just col metadata (if True) as pandas DataFrame\n - make_multiindex (bool): whether to create a multi-index df combining\n the 3 component dfs\n\n Returns:\n - myGCToo (GCToo object): A GCToo instance containing content of\n parsed gct file ** OR **\n - row_metadata (pandas df) ** OR ** col_metadata (pandas df)\n\n Note: why is convert_neg_666 even a thing?\n In CMap--for somewhat obscure historical reasons--we use \"-666\" as our null value\n for metadata. However (so that users can take full advantage of pandas' methods,\n including those for filtering nan's etc) we provide the option of converting these\n into numpy.nan values, the pandas default.\n\n "
]
|
Please provide a description of the function:def are_genes_in_api(my_clue_api_client, gene_symbols):
if len(gene_symbols) > 0:
query_gene_symbols = gene_symbols if type(gene_symbols) is list else list(gene_symbols)
query_result = my_clue_api_client.run_filter_query(resource_name,
{"where":{"gene_symbol":{"inq":query_gene_symbols}}, "fields":{"gene_symbol":True}})
logger.debug("query_result: {}".format(query_result))
r = set([x["gene_symbol"] for x in query_result])
return r
else:
logger.warning("provided gene_symbols was empty, cannot run query")
return set() | [
"determine if genes are present in the API\n\n Args:\n my_clue_api_client:\n gene_symbols: collection of gene symbols to query the API with\n\n Returns: set of the found gene symbols\n\n "
]
|
Please provide a description of the function:def write(gctoo, out_fname, data_null="NaN", metadata_null="-666", filler_null="-666", data_float_format="%.4f"):
# Create handle for output file
if not out_fname.endswith(".gct"):
out_fname += ".gct"
f = open(out_fname, "w")
# Write first two lines
dims = [str(gctoo.data_df.shape[0]), str(gctoo.data_df.shape[1]),
str(gctoo.row_metadata_df.shape[1]), str(gctoo.col_metadata_df.shape[1])]
write_version_and_dims(VERSION, dims, f)
# Write top half of the gct
write_top_half(f, gctoo.row_metadata_df, gctoo.col_metadata_df,
metadata_null, filler_null)
# Write bottom half of the gct
write_bottom_half(f, gctoo.row_metadata_df, gctoo.data_df,
data_null, data_float_format, metadata_null)
f.close()
logger.info("GCT has been written to {}".format(out_fname)) | [
"Write a gctoo object to a gct file.\n\n Args:\n gctoo (gctoo object)\n out_fname (string): filename for output gct file\n data_null (string): how to represent missing values in the data (default = \"NaN\")\n metadata_null (string): how to represent missing values in the metadata (default = \"-666\")\n filler_null (string): what value to fill the top-left filler block with (default = \"-666\")\n data_float_format (string): how many decimal points to keep in representing data\n (default = 4 digits; None will keep all digits)\n\n Returns:\n None\n\n "
]
|
Please provide a description of the function:def write_version_and_dims(version, dims, f):
f.write(("#" + version + "\n"))
f.write((dims[0] + "\t" + dims[1] + "\t" + dims[2] + "\t" + dims[3] + "\n")) | [
"Write first two lines of gct file.\n\n Args:\n version (string): 1.3 by default\n dims (list of strings): length = 4\n f (file handle): handle of output file\n Returns:\n nothing\n "
]
|
Please provide a description of the function:def write_top_half(f, row_metadata_df, col_metadata_df, metadata_null, filler_null):
# Initialize the top half of the gct including the third line
size_of_top_half_df = (1 + col_metadata_df.shape[1],
1 + row_metadata_df.shape[1] + col_metadata_df.shape[0])
top_half_df = pd.DataFrame(np.full(size_of_top_half_df, filler_null, dtype=object))
# Assemble the third line of the gct: "id", then rhds, then cids
top_half_df.iloc[0, :] = np.hstack(("id", row_metadata_df.columns.values, col_metadata_df.index.values))
# Insert the chds
top_half_df.iloc[range(1, top_half_df.shape[0]), 0] = col_metadata_df.columns.values
# Insert the column metadata, but first convert to strings and replace NaNs
col_metadata_indices = (range(1, top_half_df.shape[0]),
range(1 + row_metadata_df.shape[1], top_half_df.shape[1]))
# pd.DataFrame.at to insert into dataframe(python3)
top_half_df.at[col_metadata_indices[0], col_metadata_indices[1]] = (
col_metadata_df.astype(str).replace("nan", value=metadata_null).T.values)
# Write top_half_df to file
top_half_df.to_csv(f, header=False, index=False, sep="\t") | [
" Write the top half of the gct file: top-left filler values, row metadata\n headers, and top-right column metadata.\n\n Args:\n f (file handle): handle for output file\n row_metadata_df (pandas df)\n col_metadata_df (pandas df)\n metadata_null (string): how to represent missing values in the metadata\n filler_null (string): what value to fill the top-left filler block with\n\n Returns:\n None\n "
]
|
Please provide a description of the function:def write_bottom_half(f, row_metadata_df, data_df, data_null, data_float_format, metadata_null):
# create the left side of the bottom half of the gct (for the row metadata)
size_of_left_bottom_half_df = (row_metadata_df.shape[0],
1 + row_metadata_df.shape[1])
left_bottom_half_df = pd.DataFrame(np.full(size_of_left_bottom_half_df, metadata_null, dtype=object))
#create the full bottom half by combining with the above with the matrix data
bottom_half_df = pd.concat([left_bottom_half_df, data_df.reset_index(drop=True)], axis=1)
bottom_half_df.columns = range(bottom_half_df.shape[1])
# Insert the rids
bottom_half_df.iloc[:, 0] = row_metadata_df.index.values
# Insert the row metadata, but first convert to strings and replace NaNs
row_metadata_col_indices = range(1, 1 + row_metadata_df.shape[1])
bottom_half_df.iloc[:, row_metadata_col_indices] = (
row_metadata_df.astype(str).replace("nan", value=metadata_null).values)
# Write bottom_half_df to file
bottom_half_df.to_csv(f, header=False, index=False, sep="\t",
na_rep=data_null,
float_format=data_float_format) | [
" Write the bottom half of the gct file: row metadata and data.\n\n Args:\n f (file handle): handle for output file\n row_metadata_df (pandas df)\n data_df (pandas df)\n data_null (string): how to represent missing values in the data\n metadata_null (string): how to represent missing values in the metadata\n data_float_format (string): how many decimal points to keep in representing data\n\n Returns:\n None\n "
]
|
Please provide a description of the function:def append_dims_and_file_extension(fname, data_df):
# If there's no .gct at the end of output file name, add the dims and .gct
if not fname.endswith(".gct"):
out_fname = '{0}_n{1}x{2}.gct'.format(fname, data_df.shape[1], data_df.shape[0])
return out_fname
# Otherwise, only add the dims
else:
basename = os.path.splitext(fname)[0]
out_fname = '{0}_n{1}x{2}.gct'.format(basename, data_df.shape[1], data_df.shape[0])
return out_fname | [
"Append dimensions and file extension to output filename.\n N.B. Dimensions are cols x rows.\n\n Args:\n fname (string): output filename\n data_df (pandas df)\n Returns:\n out_fname (string): output filename with matrix dims and .gct appended\n "
]
|
Please provide a description of the function:def robust_zscore(mat, ctrl_mat=None, min_mad=0.1):
''' Robustly z-score a pandas df along the rows.
Args:
mat (pandas df): Matrix of data that z-scoring will be applied to
ctrl_mat (pandas df): Optional matrix from which to compute medians and MADs
(e.g. vehicle control)
min_mad (float): Minimum MAD to threshold to; tiny MAD values will cause
z-scores to blow up
Returns:
zscore_df (pandas_df): z-scored data
'''
# If optional df exists, calc medians and mads from it
if ctrl_mat is not None:
medians = ctrl_mat.median(axis=1)
median_devs = abs(ctrl_mat.subtract(medians, axis=0))
# Else just use plate medians
else:
medians = mat.median(axis=1)
median_devs = abs(mat.subtract(medians, axis=0))
sub = mat.subtract(medians, axis='index')
mads = median_devs.median(axis=1)
# Threshold mads
mads = mads.clip(lower=min_mad)
# Must multiply values by 1.4826 to make MAD comparable to SD
# (https://en.wikipedia.org/wiki/Median_absolute_deviation)
zscore_df = sub.divide(mads * 1.4826, axis='index')
return zscore_df.round(rounding_precision) | []
|
Please provide a description of the function:def gct2gctx_main(args):
in_gctoo = parse_gct.parse(args.filename, convert_neg_666=False)
if args.output_filepath is None:
basename = os.path.basename(args.filename)
out_name = os.path.splitext(basename)[0] + ".gctx"
else:
out_name = args.output_filepath
if args.row_annot_path is None:
pass
else:
row_metadata = pd.read_csv(args.row_annot_path, sep='\t', index_col=0, header=0, low_memory=False)
assert all(in_gctoo.data_df.index.isin(row_metadata.index)), \
"Row ids in matrix missing from annotations file"
in_gctoo.row_metadata_df = row_metadata.loc[row_metadata.index.isin(in_gctoo.data_df.index)]
if args.col_annot_path is None:
pass
else:
col_metadata = pd.read_csv(args.col_annot_path, sep='\t', index_col=0, header=0, low_memory=False)
assert all(in_gctoo.data_df.columns.isin(col_metadata.index)), \
"Column ids in matrix missing from annotations file"
in_gctoo.col_metadata_df = col_metadata.loc[col_metadata.index.isin(in_gctoo.data_df.columns)]
write_gctx.write(in_gctoo, out_name) | [
" Separate from main() in order to make command-line tool. ",
" If annotations are supplied, parse table and set metadata_df "
]
|
Please provide a description of the function:def parse(file_path, convert_neg_666=True, rid=None, cid=None, ridx=None, cidx=None,
row_meta_only=False, col_meta_only=False, make_multiindex=False):
if file_path.endswith(".gct"):
out = parse_gct.parse(file_path, convert_neg_666=convert_neg_666,
rid=rid, cid=cid, ridx=ridx, cidx=cidx,
row_meta_only=row_meta_only, col_meta_only=col_meta_only,
make_multiindex=make_multiindex)
elif file_path.endswith(".gctx"):
out = parse_gctx.parse(file_path, convert_neg_666=convert_neg_666,
rid=rid, cid=cid, ridx=ridx, cidx=cidx,
row_meta_only=row_meta_only, col_meta_only=col_meta_only,
make_multiindex=make_multiindex)
else:
err_msg = "File to parse must be .gct or .gctx!"
logger.error(err_msg)
raise Exception(err_msg)
return out | [
"\n Identifies whether file_path corresponds to a .gct or .gctx file and calls the\n correct corresponding parse method.\n\n Input:\n Mandatory:\n - gct(x)_file_path (str): full path to gct(x) file you want to parse.\n\n Optional:\n - convert_neg_666 (bool): whether to convert -666 values to numpy.nan or not\n (see Note below for more details on this). Default = False.\n - rid (list of strings): list of row ids to specifically keep from gctx. Default=None.\n - cid (list of strings): list of col ids to specifically keep from gctx. Default=None.\n - ridx (list of integers): only read the rows corresponding to this\n list of integer ids. Default=None.\n - cidx (list of integers): only read the columns corresponding to this\n list of integer ids. Default=None.\n - row_meta_only (bool): Whether to load data + metadata (if False), or just row metadata (if True)\n as pandas DataFrame\n - col_meta_only (bool): Whether to load data + metadata (if False), or just col metadata (if True)\n as pandas DataFrame\n - make_multiindex (bool): whether to create a multi-index df combining\n the 3 component dfs\n\n Output:\n - out (GCToo object or pandas df): if row_meta_only or col_meta_only, then\n out is a metadata df; otherwise, it's a GCToo instance containing\n content of parsed gct(x) file\n\n Note: why does convert_neg_666 exist?\n - In CMap--for somewhat obscure historical reasons--we use \"-666\" as our null value\n for metadata. However (so that users can take full advantage of pandas' methods,\n including those for filtering nan's etc) we provide the option of converting these\n into numpy.NaN values, the pandas default.\n "
]
|
Please provide a description of the function:def get_upper_triangle(correlation_matrix):
''' Extract upper triangle from a square matrix. Negative values are
set to 0.
Args:
correlation_matrix (pandas df): Correlations between all replicates
Returns:
upper_tri_df (pandas df): Upper triangle extracted from
correlation_matrix; rid is the row index, cid is the column index,
corr is the extracted correlation value
'''
upper_triangle = correlation_matrix.where(np.triu(np.ones(correlation_matrix.shape), k=1).astype(np.bool))
# convert matrix into long form description
upper_tri_df = upper_triangle.stack().reset_index(level=1)
upper_tri_df.columns = ['rid', 'corr']
# Index at this point is cid, it now becomes a column
upper_tri_df.reset_index(level=0, inplace=True)
# Get rid of negative values
upper_tri_df['corr'] = upper_tri_df['corr'].clip(lower=0)
return upper_tri_df.round(rounding_precision) | []
|
Please provide a description of the function:def calculate_weights(correlation_matrix, min_wt):
''' Calculate a weight for each profile based on its correlation to other
replicates. Negative correlations are clipped to 0, and weights are clipped
to be min_wt at the least.
Args:
correlation_matrix (pandas df): Correlations between all replicates
min_wt (float): Minimum raw weight when calculating weighted average
Returns:
raw weights (pandas series): Mean correlation to other replicates
weights (pandas series): raw_weights normalized such that they add to 1
'''
# fill diagonal of correlation_matrix with np.nan
np.fill_diagonal(correlation_matrix.values, np.nan)
# remove negative values
correlation_matrix = correlation_matrix.clip(lower=0)
# get average correlation for each profile (will ignore NaN)
raw_weights = correlation_matrix.mean(axis=1)
# threshold weights
raw_weights = raw_weights.clip(lower=min_wt)
# normalize raw_weights so that they add to 1
weights = raw_weights / sum(raw_weights)
return raw_weights.round(rounding_precision), weights.round(rounding_precision) | []
|
Please provide a description of the function:def agg_wt_avg(mat, min_wt = 0.01, corr_metric='spearman'):
''' Aggregate a set of replicate profiles into a single signature using
a weighted average.
Args:
mat (pandas df): a matrix of replicate profiles, where the columns are
samples and the rows are features; columns correspond to the
replicates of a single perturbagen
min_wt (float): Minimum raw weight when calculating weighted average
corr_metric (string): Spearman or Pearson; the correlation method
Returns:
out_sig (pandas series): weighted average values
upper_tri_df (pandas df): the correlations between each profile that went into the signature
raw weights (pandas series): weights before normalization
weights (pandas series): weights after normalization
'''
assert mat.shape[1] > 0, "mat is empty! mat: {}".format(mat)
if mat.shape[1] == 1:
out_sig = mat
upper_tri_df = None
raw_weights = None
weights = None
else:
assert corr_metric in ["spearman", "pearson"]
# Make correlation matrix column wise
corr_mat = mat.corr(method=corr_metric)
# Save the values in the upper triangle
upper_tri_df = get_upper_triangle(corr_mat)
# Calculate weight per replicate
raw_weights, weights = calculate_weights(corr_mat, min_wt)
# Apply weights to values
weighted_values = mat * weights
out_sig = weighted_values.sum(axis=1)
return out_sig, upper_tri_df, raw_weights, weights | []
|
Please provide a description of the function:def concat_main(args):
# Get files directly
if args.input_filepaths is not None:
files = args.input_filepaths
# Or find them
else:
files = get_file_list(args.file_wildcard)
# No files found
if len(files) == 0:
msg = "No files were found. args.file_wildcard: {}".format(args.file_wildcard)
logger.error(msg)
raise Exception(msg)
# Only 1 file found
if len(files) == 1:
logger.warning("Only 1 file found. No concatenation needs to be done, exiting")
return
# More than 1 file found
else:
# Parse each file and append to a list
gctoos = []
for f in files:
gctoos.append(parse.parse(f))
# Create concatenated gctoo object
if args.concat_direction == "horiz":
out_gctoo = hstack(gctoos, args.remove_all_metadata_fields, args.error_report_output_file,
args.fields_to_remove, args.reset_ids)
elif args.concat_direction == "vert":
out_gctoo = vstack(gctoos, args.remove_all_metadata_fields, args.error_report_output_file,
args.fields_to_remove, args.reset_ids)
# Write out_gctoo to file
logger.info("Writing to output file args.out_name: {}".format(args.out_name))
if args.out_type == "gctx":
write_gctx.write(out_gctoo, args.out_name)
elif args.out_type == "gct":
write_gct.write(out_gctoo, args.out_name,
filler_null=args.filler_null,
metadata_null=args.metadata_null,
data_null=args.data_null) | [
" Separate method from main() in order to make testing easier and to\n enable command-line access. "
]
|
Please provide a description of the function:def get_file_list(wildcard):
files = glob.glob(os.path.expanduser(wildcard))
return files | [
" Search for files to be concatenated. Currently very basic, but could\n expand to be more sophisticated.\n\n Args:\n wildcard (regular expression string)\n\n Returns:\n files (list of full file paths)\n\n "
]
|
Please provide a description of the function:def hstack(gctoos, remove_all_metadata_fields=False, error_report_file=None, fields_to_remove=[], reset_ids=False):
# Separate each gctoo into its component dfs
row_meta_dfs = []
col_meta_dfs = []
data_dfs = []
srcs = []
for g in gctoos:
row_meta_dfs.append(g.row_metadata_df)
col_meta_dfs.append(g.col_metadata_df)
data_dfs.append(g.data_df)
srcs.append(g.src)
logger.debug("shapes of row_meta_dfs: {}".format([x.shape for x in row_meta_dfs]))
# Concatenate row metadata
all_row_metadata_df = assemble_common_meta(row_meta_dfs, fields_to_remove, srcs, remove_all_metadata_fields, error_report_file)
# Concatenate col metadata
all_col_metadata_df = assemble_concatenated_meta(col_meta_dfs, remove_all_metadata_fields)
# Concatenate the data_dfs
all_data_df = assemble_data(data_dfs, "horiz")
# Make sure df shapes are correct
assert all_data_df.shape[0] == all_row_metadata_df.shape[0], "Number of rows in metadata does not match number of rows in data - all_data_df.shape[0]: {} all_row_metadata_df.shape[0]: {}".format(all_data_df.shape[0], all_row_metadata_df.shape[0])
assert all_data_df.shape[1] == all_col_metadata_df.shape[0], "Number of columns in data does not match number of columns metadata - all_data_df.shape[1]: {} all_col_metadata_df.shape[0]: {}".format(all_data_df.shape[1], all_col_metadata_df.shape[0])
# If requested, reset sample ids to be unique integers and move old sample
# ids into column metadata
if reset_ids:
do_reset_ids(all_col_metadata_df, all_data_df, "horiz")
logger.info("Build GCToo of all...")
concated = GCToo.GCToo(row_metadata_df=all_row_metadata_df,
col_metadata_df=all_col_metadata_df,
data_df=all_data_df)
return concated | [
" Horizontally concatenate gctoos.\n\n Args:\n gctoos (list of gctoo objects)\n remove_all_metadata_fields (bool): ignore/strip all common metadata when combining gctoos\n error_report_file (string): path to write file containing error report indicating \n problems that occurred during hstack, mainly for inconsistencies in common metadata\n fields_to_remove (list of strings): fields to be removed from the\n common metadata because they don't agree across files\n reset_ids (bool): set to True if sample ids are not unique\n\n Return:\n concated (gctoo object)\n "
]
|
Please provide a description of the function:def assemble_common_meta(common_meta_dfs, fields_to_remove, sources, remove_all_metadata_fields, error_report_file):
all_meta_df, all_meta_df_with_dups = build_common_all_meta_df(common_meta_dfs, fields_to_remove, remove_all_metadata_fields)
if not all_meta_df.index.is_unique:
all_report_df = build_mismatched_common_meta_report([x.shape for x in common_meta_dfs],
sources, all_meta_df, all_meta_df_with_dups)
unique_duplicate_ids = all_report_df.index.unique()
if error_report_file is not None:
all_report_df.to_csv(error_report_file, sep="\t")
msg = .format(unique_duplicate_ids, all_report_df)
raise MismatchCommonMetadataConcatException(msg)
# Finally, sort the index
all_meta_df_sorted = all_meta_df.sort_index(axis=0)
return all_meta_df_sorted | [
" Assemble the common metadata dfs together. Both indices are sorted.\n Fields that are not in all the dfs are dropped.\n\n Args:\n common_meta_dfs (list of pandas dfs)\n fields_to_remove (list of strings): fields to be removed from the\n common metadata because they don't agree across files\n\n Returns:\n all_meta_df_sorted (pandas df)\n\n ",
"There are inconsistencies in common_metadata_df between different files. Try excluding metadata fields\nusing the fields_to_remove argument. unique_duplicate_ids: {}\nall_report_df:\n{}"
]
|
Please provide a description of the function:def build_common_all_meta_df(common_meta_dfs, fields_to_remove, remove_all_metadata_fields):
if remove_all_metadata_fields:
trimmed_common_meta_dfs = [pd.DataFrame(index=df.index) for df in common_meta_dfs]
else:
shared_column_headers = sorted(set.intersection(*[set(df.columns) for df in common_meta_dfs]))
logger.debug("shared_column_headers: {}".format(shared_column_headers))
trimmed_common_meta_dfs = [df[shared_column_headers] for df in common_meta_dfs]
# Remove any column headers that will prevent dfs from being identical
for df in trimmed_common_meta_dfs:
df.drop(fields_to_remove, axis=1, errors="ignore", inplace=True)
# Concatenate all dfs and then remove duplicate rows
all_meta_df_with_dups = pd.concat(trimmed_common_meta_dfs, axis=0)
logger.debug("all_meta_df_with_dups.shape: {}".format(all_meta_df_with_dups.shape))
logger.debug("all_meta_df_with_dups.columns: {}".format(all_meta_df_with_dups.columns))
logger.debug("all_meta_df_with_dups.index: {}".format(all_meta_df_with_dups.index))
# If all metadata dfs were empty, df will be empty
if all_meta_df_with_dups.empty:
# Simply return unique ids
all_meta_df = pd.DataFrame(index=all_meta_df_with_dups.index.unique())
else:
all_meta_df_with_dups["concat_column_for_index"] = all_meta_df_with_dups.index
all_meta_df = all_meta_df_with_dups.copy(deep=True).drop_duplicates()
all_meta_df.drop("concat_column_for_index", axis=1, inplace=True)
all_meta_df_with_dups.drop("concat_column_for_index", axis=1, inplace=True)
logger.debug("all_meta_df_with_dups.shape: {}".format(all_meta_df_with_dups.shape))
logger.debug("all_meta_df.shape: {}".format(all_meta_df.shape))
return (all_meta_df, all_meta_df_with_dups) | [
"\n concatenate the entries in common_meta_dfs, removing columns selectively (fields_to_remove) or entirely (\n remove_all_metadata_fields=True; in this case, effectively just merges all the indexes in common_meta_dfs).\n\n Returns 2 dataframes (in a tuple): the first has duplicates removed, the second does not.\n\n Args:\n common_meta_dfs: collection of pandas DataFrames containing the metadata in the \"common\" direction of the\n concatenation operation\n fields_to_remove: columns to be removed (if present) from the common_meta_dfs\n remove_all_metadata_fields: boolean indicating that all metadata fields should be removed from the\n common_meta_dfs; overrides fields_to_remove if present\n\n Returns:\n tuple containing\n all_meta_df: pandas dataframe that is the concatenation of the dataframes in common_meta_dfs,\n all_meta_df_with_dups:\n "
]
|
Please provide a description of the function:def assemble_concatenated_meta(concated_meta_dfs, remove_all_metadata_fields):
# Concatenate the concated_meta_dfs
if remove_all_metadata_fields:
for df in concated_meta_dfs:
df.drop(df.columns, axis=1, inplace=True)
all_concated_meta_df = pd.concat(concated_meta_dfs, axis=0)
# Sanity check: the number of rows in all_concated_meta_df should correspond
# to the sum of the number of rows in the input dfs
n_rows = all_concated_meta_df.shape[0]
logger.debug("all_concated_meta_df.shape[0]: {}".format(n_rows))
n_rows_cumulative = sum([df.shape[0] for df in concated_meta_dfs])
assert n_rows == n_rows_cumulative
# Sort the index and columns
all_concated_meta_df_sorted = all_concated_meta_df.sort_index(axis=0).sort_index(axis=1)
return all_concated_meta_df_sorted | [
" Assemble the concatenated metadata dfs together. For example,\n if horizontally concatenating, the concatenated metadata dfs are the\n column metadata dfs. Both indices are sorted.\n\n Args:\n concated_meta_dfs (list of pandas dfs)\n\n Returns:\n all_concated_meta_df_sorted (pandas df)\n\n "
]
|
Please provide a description of the function:def assemble_data(data_dfs, concat_direction):
if concat_direction == "horiz":
# Concatenate the data_dfs horizontally
all_data_df = pd.concat(data_dfs, axis=1)
# Sanity check: the number of columns in all_data_df should
# correspond to the sum of the number of columns in the input dfs
n_cols = all_data_df.shape[1]
logger.debug("all_data_df.shape[1]: {}".format(n_cols))
n_cols_cumulative = sum([df.shape[1] for df in data_dfs])
assert n_cols == n_cols_cumulative
elif concat_direction == "vert":
# Concatenate the data_dfs vertically
all_data_df = pd.concat(data_dfs, axis=0)
# Sanity check: the number of rows in all_data_df should
# correspond to the sum of the number of rows in the input dfs
n_rows = all_data_df.shape[0]
logger.debug("all_data_df.shape[0]: {}".format(n_rows))
n_rows_cumulative = sum([df.shape[0] for df in data_dfs])
assert n_rows == n_rows_cumulative
# Sort both indices
all_data_df_sorted = all_data_df.sort_index(axis=0).sort_index(axis=1)
return all_data_df_sorted | [
" Assemble the data dfs together. Both indices are sorted.\n\n Args:\n data_dfs (list of pandas dfs)\n concat_direction (string): 'horiz' or 'vert'\n\n Returns:\n all_data_df_sorted (pandas df)\n\n "
]
|
Please provide a description of the function:def do_reset_ids(concatenated_meta_df, data_df, concat_direction):
if concat_direction == "horiz":
# Make sure cids agree between data_df and concatenated_meta_df
assert concatenated_meta_df.index.equals(data_df.columns), (
"cids in concatenated_meta_df do not agree with cids in data_df.")
# Reset cids in concatenated_meta_df
reset_ids_in_meta_df(concatenated_meta_df)
# Replace cids in data_df with the new ones from concatenated_meta_df
# (just an array of unique integers, zero-indexed)
data_df.columns = pd.Index(concatenated_meta_df.index.values)
elif concat_direction == "vert":
# Make sure rids agree between data_df and concatenated_meta_df
assert concatenated_meta_df.index.equals(data_df.index), (
"rids in concatenated_meta_df do not agree with rids in data_df.")
# Reset rids in concatenated_meta_df
reset_ids_in_meta_df(concatenated_meta_df)
# Replace rids in data_df with the new ones from concatenated_meta_df
# (just an array of unique integers, zero-indexed)
data_df.index = pd.Index(concatenated_meta_df.index.values) | [
" Reset ids in concatenated metadata and data dfs to unique integers and\n save the old ids in a metadata column.\n\n Note that the dataframes are modified in-place.\n\n Args:\n concatenated_meta_df (pandas df)\n data_df (pandas df)\n concat_direction (string): 'horiz' or 'vert'\n\n Returns:\n None (dfs modified in-place)\n\n "
]
|
Please provide a description of the function:def reset_ids_in_meta_df(meta_df):
# Record original index name, and then change it so that the column that it
# becomes will be appropriately named
original_index_name = meta_df.index.name
meta_df.index.name = "old_id"
# Reset index
meta_df.reset_index(inplace=True)
# Change the index name back to what it was
meta_df.index.name = original_index_name | [
" Meta_df is modified inplace. "
]
|
Please provide a description of the function:def subset_gctoo(gctoo, row_bool=None, col_bool=None, rid=None, cid=None,
ridx=None, cidx=None, exclude_rid=None, exclude_cid=None):
assert sum([(rid is not None), (row_bool is not None), (ridx is not None)]) <= 1, (
"Only one of rid, row_bool, and ridx can be provided.")
assert sum([(cid is not None), (col_bool is not None), (cidx is not None)]) <= 1, (
"Only one of cid, col_bool, and cidx can be provided.")
# Figure out what rows and columns to keep
rows_to_keep = get_rows_to_keep(gctoo, rid, row_bool, ridx, exclude_rid)
cols_to_keep = get_cols_to_keep(gctoo, cid, col_bool, cidx, exclude_cid)
# Convert labels to boolean array to preserve order
rows_to_keep_bools = gctoo.data_df.index.isin(rows_to_keep)
cols_to_keep_bools = gctoo.data_df.columns.isin(cols_to_keep)
# Make the output gct
out_gctoo = GCToo.GCToo(
src=gctoo.src, version=gctoo.version,
data_df=gctoo.data_df.loc[rows_to_keep_bools, cols_to_keep_bools],
row_metadata_df=gctoo.row_metadata_df.loc[rows_to_keep_bools, :],
col_metadata_df=gctoo.col_metadata_df.loc[cols_to_keep_bools, :])
assert out_gctoo.data_df.size > 0, "Subsetting yielded an empty gct!"
logger.info(("Initial GCToo with {} rows and {} columns subsetted down to " +
"{} rows and {} columns.").format(
gctoo.data_df.shape[0], gctoo.data_df.shape[1],
out_gctoo.data_df.shape[0], out_gctoo.data_df.shape[1]))
return out_gctoo | [
" Extract a subset of data from a GCToo object in a variety of ways.\n The order of rows and columns will be preserved.\n\n Args:\n gctoo (GCToo object)\n row_bool (list of bools): length must equal gctoo.data_df.shape[0]\n col_bool (list of bools): length must equal gctoo.data_df.shape[1]\n rid (list of strings): rids to include\n cid (list of strings): cids to include\n ridx (list of integers): row integer ids to include\n cidx (list of integers): col integer ids to include\n exclude_rid (list of strings): rids to exclude\n exclude_cid (list of strings): cids to exclude\n\n Returns:\n out_gctoo (GCToo object): gctoo after subsetting\n "
]
|
Please provide a description of the function:def get_rows_to_keep(gctoo, rid=None, row_bool=None, ridx=None, exclude_rid=None):
# Use rid if provided
if rid is not None:
assert type(rid) == list, "rid must be a list. rid: {}".format(rid)
rows_to_keep = [gctoo_row for gctoo_row in gctoo.data_df.index if gctoo_row in rid]
# Tell user if some rids not found
num_missing_rids = len(rid) - len(rows_to_keep)
if num_missing_rids != 0:
logger.info("{} rids were not found in the GCT.".format(num_missing_rids))
# Use row_bool if provided
elif row_bool is not None:
assert len(row_bool) == gctoo.data_df.shape[0], (
"row_bool must have length equal to gctoo.data_df.shape[0]. " +
"len(row_bool): {}, gctoo.data_df.shape[0]: {}".format(
len(row_bool), gctoo.data_df.shape[0]))
rows_to_keep = gctoo.data_df.index[row_bool].values
# Use ridx if provided
elif ridx is not None:
assert type(ridx[0]) is int, (
"ridx must be a list of integers. ridx[0]: {}, " +
"type(ridx[0]): {}").format(ridx[0], type(ridx[0]))
assert max(ridx) <= gctoo.data_df.shape[0], (
"ridx contains an integer larger than the number of rows in " +
"the GCToo. max(ridx): {}, gctoo.data_df.shape[0]: {}").format(
max(ridx), gctoo.data_df.shape[0])
rows_to_keep = gctoo.data_df.index[ridx].values
# If rid, row_bool, and ridx are all None, return all rows
else:
rows_to_keep = gctoo.data_df.index.values
# Use exclude_rid if provided
if exclude_rid is not None:
# Keep only those rows that are not in exclude_rid
rows_to_keep = [row_to_keep for row_to_keep in rows_to_keep if row_to_keep not in exclude_rid]
return rows_to_keep | [
" Figure out based on the possible row inputs which rows to keep.\n\n Args:\n gctoo (GCToo object):\n rid (list of strings):\n row_bool (boolean array):\n ridx (list of integers):\n exclude_rid (list of strings):\n\n Returns:\n rows_to_keep (list of strings): row ids to be kept\n\n "
]
|
Please provide a description of the function:def get_cols_to_keep(gctoo, cid=None, col_bool=None, cidx=None, exclude_cid=None):
# Use cid if provided
if cid is not None:
assert type(cid) == list, "cid must be a list. cid: {}".format(cid)
cols_to_keep = [gctoo_col for gctoo_col in gctoo.data_df.columns if gctoo_col in cid]
# Tell user if some cids not found
num_missing_cids = len(cid) - len(cols_to_keep)
if num_missing_cids != 0:
logger.info("{} cids were not found in the GCT.".format(num_missing_cids))
# Use col_bool if provided
elif col_bool is not None:
assert len(col_bool) == gctoo.data_df.shape[1], (
"col_bool must have length equal to gctoo.data_df.shape[1]. " +
"len(col_bool): {}, gctoo.data_df.shape[1]: {}".format(
len(col_bool), gctoo.data_df.shape[1]))
cols_to_keep = gctoo.data_df.columns[col_bool].values
# Use cidx if provided
elif cidx is not None:
assert type(cidx[0]) is int, (
"cidx must be a list of integers. cidx[0]: {}, " +
"type(cidx[0]): {}").format(cidx[0], type(cidx[0]))
assert max(cidx) <= gctoo.data_df.shape[1], (
"cidx contains an integer larger than the number of columns in " +
"the GCToo. max(cidx): {}, gctoo.data_df.shape[1]: {}").format(
max(cidx), gctoo.data_df.shape[1])
cols_to_keep = gctoo.data_df.columns[cidx].values
# If cid, col_bool, and cidx are all None, return all columns
else:
cols_to_keep = gctoo.data_df.columns.values
# Use exclude_cid if provided
if exclude_cid is not None:
# Keep only those columns that are not in exclude_cid
cols_to_keep = [col_to_keep for col_to_keep in cols_to_keep if col_to_keep not in exclude_cid]
return cols_to_keep | [
" Figure out based on the possible columns inputs which columns to keep.\n\n Args:\n gctoo (GCToo object):\n cid (list of strings):\n col_bool (boolean array):\n cidx (list of integers):\n exclude_cid (list of strings):\n\n Returns:\n cols_to_keep (list of strings): col ids to be kept\n\n "
]
|
Please provide a description of the function:def read(in_path):
assert os.path.exists(in_path), "The following GRP file can't be found. in_path: {}".format(in_path)
with open(in_path, "r") as f:
lines = f.readlines()
# need the second conditional to ignore comment lines
grp = [line.strip() for line in lines if line and not re.match("^#", line)]
return grp | [
" Read a grp file at the path specified by in_path.\n\n Args:\n in_path (string): path to GRP file\n\n Returns:\n grp (list)\n\n "
]
|
Please provide a description of the function:def write(grp, out_path):
with open(out_path, "w") as f:
for x in grp:
f.write(str(x) + "\n") | [
" Write a GRP to a text file.\n\n Args:\n grp (list): GRP object to write to new-line delimited text file\n out_path (string): output path\n\n Returns:\n None\n\n "
]
|
Please provide a description of the function:def fast_corr(x, y=None, destination=None):
if y is None:
y = x
r = fast_cov.fast_cov(x, y, destination)
std_x = numpy.std(x, axis=0, ddof=1)
std_y = numpy.std(y, axis=0, ddof=1)
numpy.divide(r, std_x[:, numpy.newaxis], out=r)
numpy.divide(r, std_y[numpy.newaxis, :], out=r)
return r | [
"calculate the pearson correlation matrix for the columns of x (with dimensions MxN), or optionally, the pearson correlaton matrix\n between x and y (with dimensions OxP). If destination is provided, put the results there. \n In the language of statistics the columns are the variables and the rows are the observations.\n\n Args:\n x (numpy array-like) MxN in shape\n y (optional, numpy array-like) OxP in shape. M (# rows in x) must equal O (# rows in y)\n destination (numpy array-like) optional location where to store the results as they are calculated (e.g. a numpy\n memmap of a file)\n\n returns (numpy array-like) array of the covariance values\n for defaults (y=None), shape is NxN\n if y is provied, shape is NxP\n "
]
|
Please provide a description of the function:def make_specified_size_gctoo(og_gctoo, num_entries, dim):
assert dim in ["row", "col"], "dim specified must be either 'row' or 'col'"
dim_index = 0 if "row" == dim else 1
assert num_entries <= og_gctoo.data_df.shape[dim_index], ("number of entries must be smaller than dimension being "
"subsetted - num_entries: {} dim: {} dim_index: {} og_gctoo.data_df.shape[dim_index]: {}".format(
num_entries, dim, dim_index, og_gctoo.data_df.shape[dim_index]))
if dim == "col":
columns = [x for x in og_gctoo.data_df.columns.values]
numpy.random.shuffle(columns)
columns = columns[0:num_entries]
rows = og_gctoo.data_df.index.values
else:
rows = [x for x in og_gctoo.data_df.index.values]
numpy.random.shuffle(rows)
rows = rows[0:num_entries]
columns = og_gctoo.data_df.columns.values
new_data_df = og_gctoo.data_df.loc[rows, columns]
new_row_meta = og_gctoo.row_metadata_df.loc[rows]
new_col_meta = og_gctoo.col_metadata_df.loc[columns]
logger.debug(
"after slice - new_col_meta.shape: {} new_row_meta.shape: {}".format(new_col_meta.shape, new_row_meta.shape))
# make & return new gctoo instance
new_gctoo = GCToo.GCToo(data_df=new_data_df, row_metadata_df=new_row_meta, col_metadata_df=new_col_meta)
return new_gctoo | [
"\n\tSubsets a GCToo instance along either rows or columns to obtain a specified size.\n\n\tInput:\n\t\t- og_gctoo (GCToo): a GCToo instance \n\t\t- num_entries (int): the number of entries to keep\n\t\t- dim (str): the dimension along which to subset. Must be \"row\" or \"col\"\n\n\tOutput:\n\t\t- new_gctoo (GCToo): the GCToo instance subsetted as specified. \n\t"
]
|
Please provide a description of the function:def run_filter_query(self, resource_name, filter_clause):
url = self.base_url + "/" + resource_name
params = {"filter":json.dumps(filter_clause)}
r = requests.get(url, headers=self.headers, params=params)
logger.debug("requests.get result r.status_code: {}".format(r.status_code))
ClueApiClient._check_request_response(r)
return r.json() | [
"run a query (get) against the CLUE api, using the API and user key fields of self and the fitler_clause provided\n\n Args:\n resource_name: str - name of the resource / collection to query - e.g. genes, perts, cells etc.\n filter_clause: dictionary - contains filter to pass to API to; uses loopback specification\n\n Returns: list of dictionaries containing the results of the query\n "
]
|
Please provide a description of the function:def write(gctoo_object, out_file_name, convert_back_to_neg_666=True, gzip_compression_level=6,
max_chunk_kb=1024, matrix_dtype=numpy.float32):
# make sure out file has a .gctx suffix
gctx_out_name = add_gctx_to_out_name(out_file_name)
# open an hdf5 file to write to
hdf5_out = h5py.File(gctx_out_name, "w")
# write version
write_version(hdf5_out)
# write src
write_src(hdf5_out, gctoo_object, gctx_out_name)
# set chunk size for data matrix
elem_per_kb = calculate_elem_per_kb(max_chunk_kb, matrix_dtype)
chunk_size = set_data_matrix_chunk_size(gctoo_object.data_df.shape, max_chunk_kb, elem_per_kb)
# write data matrix
hdf5_out.create_dataset(data_matrix_node, data=gctoo_object.data_df.transpose().values,
dtype=matrix_dtype)
# write col metadata
write_metadata(hdf5_out, "col", gctoo_object.col_metadata_df, convert_back_to_neg_666,
gzip_compression=gzip_compression_level)
# write row metadata
write_metadata(hdf5_out, "row", gctoo_object.row_metadata_df, convert_back_to_neg_666,
gzip_compression=gzip_compression_level)
# close gctx file
hdf5_out.close() | [
"\n\tWrites a GCToo instance to specified file.\n\n\tInput:\n\t\t- gctoo_object (GCToo): A GCToo instance.\n\t\t- out_file_name (str): file name to write gctoo_object to.\n - convert_back_to_neg_666 (bool): whether to convert np.NAN in metadata back to \"-666\"\n - gzip_compression_level (int, default=6): Compression level to use for metadata. \n - max_chunk_kb (int, default=1024): The maximum number of KB a given chunk will occupy\n - matrix_dtype (numpy dtype, default=numpy.float32): Storage data type for data matrix. \n\t"
]
|
Please provide a description of the function:def write_src(hdf5_out, gctoo_object, out_file_name):
if gctoo_object.src == None:
hdf5_out.attrs[src_attr] = out_file_name
else:
hdf5_out.attrs[src_attr] = gctoo_object.src | [
"\n\tWrites src as attribute of gctx out file. \n\n\tInput:\n\t\t- hdf5_out (h5py): hdf5 file to write to \n\t\t- gctoo_object (GCToo): GCToo instance to be written to .gctx\n\t\t- out_file_name (str): name of hdf5 out file. \n\t"
]
|
Please provide a description of the function:def calculate_elem_per_kb(max_chunk_kb, matrix_dtype):
if matrix_dtype == numpy.float32:
return (max_chunk_kb * 8)/32
elif matrix_dtype == numpy.float64:
return (max_chunk_kb * 8)/64
else:
msg = "Invalid matrix_dtype: {}; only numpy.float32 and numpy.float64 are currently supported".format(matrix_dtype)
logger.error(msg)
raise Exception("write_gctx.calculate_elem_per_kb " + msg) | [
"\n Calculates the number of elem per kb depending on the max chunk size set. \n\n Input: \n - max_chunk_kb (int, default=1024): The maximum number of KB a given chunk will occupy\n - matrix_dtype (numpy dtype, default=numpy.float32): Storage data type for data matrix. \n Currently needs to be np.float32 or np.float64 (TODO: figure out a better way to get bits from a numpy dtype).\n\n Returns: \n elem_per_kb (int), the number of elements per kb for matrix dtype specified. \n "
]
|
Please provide a description of the function:def set_data_matrix_chunk_size(df_shape, max_chunk_kb, elem_per_kb):
row_chunk_size = min(df_shape[0], 1000)
col_chunk_size = min(((max_chunk_kb*elem_per_kb)//row_chunk_size), df_shape[1])
return (row_chunk_size, col_chunk_size) | [
"\n Sets chunk size to use for writing data matrix. \n Note. Calculation used here is for compatibility with cmapM and cmapR. \n\n Input:\n - df_shape (tuple): shape of input data_df. \n - max_chunk_kb (int, default=1024): The maximum number of KB a given chunk will occupy\n - elem_per_kb (int): Number of elements per kb \n\n Returns:\n chunk size (tuple) to use for chunking the data matrix \n "
]
|
Please provide a description of the function:def write_metadata(hdf5_out, dim, metadata_df, convert_back_to_neg_666, gzip_compression):
if dim == "col":
hdf5_out.create_group(col_meta_group_node)
metadata_node_name = col_meta_group_node
elif dim == "row":
hdf5_out.create_group(row_meta_group_node)
metadata_node_name = row_meta_group_node
else:
logger.error("'dim' argument must be either 'row' or 'col'!")
# write id field to expected node
hdf5_out.create_dataset(metadata_node_name + "/id", data=[numpy.string_(x) for x in metadata_df.index],
compression=gzip_compression)
metadata_fields = list(metadata_df.columns.copy())
# if specified, convert numpy.nans in metadata back to -666
if convert_back_to_neg_666:
for c in metadata_fields:
metadata_df[[c]] = metadata_df[[c]].replace([numpy.nan], ["-666"])
# write metadata columns to their own arrays
for field in [entry for entry in metadata_fields if entry != "ind"]:
if numpy.array(metadata_df.loc[:, field]).dtype.type in (numpy.str_, numpy.object_):
array_write = numpy.array(metadata_df.loc[:, field]).astype('S')
else:
array_write = numpy.array(metadata_df.loc[:, field])
hdf5_out.create_dataset(metadata_node_name + "/" + field,
data=array_write,
compression=gzip_compression) | [
"\n\tWrites either column or row metadata to proper node of gctx out (hdf5) file.\n\n\tInput:\n\t\t- hdf5_out (h5py): open hdf5 file to write to\n\t\t- dim (str; must be \"row\" or \"col\"): dimension of metadata to write to \n\t\t- metadata_df (pandas DataFrame): metadata DataFrame to write to file \n\t\t- convert_back_to_neg_666 (bool): Whether to convert numpy.nans back to \"-666\",\n\t\t\t\tas per CMap metadata null convention \n\t"
]
|
Please provide a description of the function:def create_lazy_user(self):
user_class = self.model.get_user_class()
username = self.generate_username(user_class)
user = user_class.objects.create_user(username, '')
self.create(user=user)
return user, username | [
" Create a lazy user. Returns a 2-tuple of the underlying User\n object (which may be of a custom class), and the username.\n "
]
|
Please provide a description of the function:def convert(self, form):
if not is_lazy_user(form.instance):
raise NotLazyError('You cannot convert a non-lazy user')
user = form.save()
# We need to remove the LazyUser instance assocated with the
# newly-converted user
self.filter(user=user).delete()
converted.send(self, user=user)
return user | [
" Convert a lazy user to a non-lazy one. The form passed\n in is expected to be a ModelForm instance, bound to the user\n to be converted.\n\n The converted ``User`` object is returned.\n\n Raises a TypeError if the user is not lazy.\n "
]
|
Please provide a description of the function:def generate_username(self, user_class):
m = getattr(user_class, 'generate_username', None)
if m:
return m()
else:
max_length = user_class._meta.get_field(
self.username_field).max_length
return uuid.uuid4().hex[:max_length] | [
" Generate a new username for a user\n "
]
|
Please provide a description of the function:def convert(request, form_class=None,
redirect_field_name='redirect_to',
anonymous_redirect=settings.LOGIN_URL,
template_name='lazysignup/convert.html',
ajax_template_name='lazysignup/convert_ajax.html'):
redirect_to = 'lazysignup_convert_done'
if form_class is None:
if constants.LAZYSIGNUP_CUSTOM_USER_CREATION_FORM is not None:
form_class = import_string(constants.LAZYSIGNUP_CUSTOM_USER_CREATION_FORM)
else:
form_class = UserCreationForm
# If we've got an anonymous user, redirect to login
if request.user.is_anonymous:
return HttpResponseRedirect(anonymous_redirect)
if request.method == 'POST':
redirect_to = request.POST.get(redirect_field_name) or redirect_to
form = form_class(request.POST, instance=request.user)
if form.is_valid():
try:
LazyUser.objects.convert(form)
except NotLazyError:
# If the user already has a usable password, return a Bad
# Request to an Ajax client, or just redirect back for a
# regular client.
if request.is_ajax():
return HttpResponseBadRequest(
content=_(u"Already converted."))
else:
return redirect(redirect_to)
# Re-log the user in, as they'll now not be authenticatable with
# the Lazy backend
login(request, authenticate(**form.get_credentials()))
# If we're being called via AJAX, then we just return a 200
# directly to the client. If not, then we redirect to a
# confirmation page or to redirect_to, if it's set.
if request.is_ajax():
return HttpResponse()
else:
return redirect(redirect_to)
# Invalid form, now check to see if is an ajax call
if request.is_ajax():
return HttpResponseBadRequest(content=str(form.errors))
else:
form = form_class()
# If this is an ajax request, prepend the ajax template to the list of
# templates to be searched.
if request.is_ajax():
template_name = [ajax_template_name, template_name]
return render(
request,
template_name,
{
'form': form,
'redirect_to': redirect_to
},
) | [
" Convert a temporary user to a real one. Reject users who don't\n appear to be temporary users (ie. they have a usable password)\n "
]
|
Please provide a description of the function:def is_lazy_user(user):
# Anonymous users are not lazy.
if user.is_anonymous:
return False
# Check the user backend. If the lazy signup backend
# authenticated them, then the user is lazy.
backend = getattr(user, 'backend', None)
if backend == 'lazysignup.backends.LazySignupBackend':
return True
# Otherwise, we have to fall back to checking the database.
from lazysignup.models import LazyUser
return bool(LazyUser.objects.filter(user=user).count() > 0) | [
" Return True if the passed user is a lazy user. "
]
|
Please provide a description of the function:def add(queue_name, payload=None, content_type=None, source=None, task_id=None,
build_id=None, release_id=None, run_id=None):
if task_id:
task = WorkQueue.query.filter_by(task_id=task_id).first()
if task:
return task.task_id
else:
task_id = uuid.uuid4().hex
if payload and not content_type and not isinstance(payload, basestring):
payload = json.dumps(payload)
content_type = 'application/json'
now = datetime.datetime.utcnow()
task = WorkQueue(
task_id=task_id,
queue_name=queue_name,
eta=now,
source=source,
build_id=build_id,
release_id=release_id,
run_id=run_id,
payload=payload,
content_type=content_type)
db.session.add(task)
return task.task_id | [
"Adds a work item to a queue.\n\n Args:\n queue_name: Name of the queue to add the work item to.\n payload: Optional. Payload that describes the work to do as a string.\n If not a string and content_type is not provided, then this\n function assumes the payload is a JSON-able Python object.\n content_type: Optional. Content type of the payload.\n source: Optional. Who or what originally created the task.\n task_id: Optional. When supplied, only enqueue this task if a task\n with this ID does not already exist. If a task with this ID already\n exists, then this function will do nothing.\n build_id: Build ID to associate with this task. May be None.\n release_id: Release ID to associate with this task. May be None.\n run_id: Run ID to associate with this task. May be None.\n\n Returns:\n ID of the task that was added.\n "
]
|
Please provide a description of the function:def _task_to_dict(task):
payload = task.payload
if payload and task.content_type == 'application/json':
payload = json.loads(payload)
return dict(
task_id=task.task_id,
queue_name=task.queue_name,
eta=_datetime_to_epoch_seconds(task.eta),
source=task.source,
created=_datetime_to_epoch_seconds(task.created),
lease_attempts=task.lease_attempts,
last_lease=_datetime_to_epoch_seconds(task.last_lease),
payload=payload,
content_type=task.content_type) | [
"Converts a WorkQueue to a JSON-able dictionary."
]
|
Please provide a description of the function:def lease(queue_name, owner, count=1, timeout_seconds=60):
now = datetime.datetime.utcnow()
query = (
WorkQueue.query
.filter_by(queue_name=queue_name, status=WorkQueue.LIVE)
.filter(WorkQueue.eta <= now)
.order_by(WorkQueue.eta)
.with_lockmode('update')
.limit(count))
task_list = query.all()
if not task_list:
return None
next_eta = now + datetime.timedelta(seconds=timeout_seconds)
for task in task_list:
task.eta = next_eta
task.lease_attempts += 1
task.last_owner = owner
task.last_lease = now
task.heartbeat = None
task.heartbeat_number = 0
db.session.add(task)
return [_task_to_dict(task) for task in task_list] | [
"Leases a work item from a queue, usually the oldest task available.\n\n Args:\n queue_name: Name of the queue to lease work from.\n owner: Who or what is leasing the task.\n count: Lease up to this many tasks. Return value will never have more\n than this many items present.\n timeout_seconds: Number of seconds to lock the task for before\n allowing another owner to lease it.\n\n Returns:\n List of dictionaries representing the task that was leased, or\n an empty list if no tasks are available to be leased.\n "
]
|
Please provide a description of the function:def _get_task_with_policy(queue_name, task_id, owner):
now = datetime.datetime.utcnow()
task = (
WorkQueue.query
.filter_by(queue_name=queue_name, task_id=task_id)
.with_lockmode('update')
.first())
if not task:
raise TaskDoesNotExistError('task_id=%r' % task_id)
# Lease delta should be positive, meaning it has not yet expired!
lease_delta = now - task.eta
if lease_delta > datetime.timedelta(0):
db.session.rollback()
raise LeaseExpiredError('queue=%r, task_id=%r expired %s' % (
task.queue_name, task_id, lease_delta))
if task.last_owner != owner:
db.session.rollback()
raise NotOwnerError('queue=%r, task_id=%r, owner=%r' % (
task.queue_name, task_id, task.last_owner))
return task | [
"Fetches the specified task and enforces ownership policy.\n\n Args:\n queue_name: Name of the queue the work item is on.\n task_id: ID of the task that is finished.\n owner: Who or what has the current lease on the task.\n\n Returns:\n The valid WorkQueue task that is currently owned.\n\n Raises:\n TaskDoesNotExistError if the task does not exist.\n LeaseExpiredError if the lease is no longer active.\n NotOwnerError if the specified owner no longer owns the task.\n "
]
|
Please provide a description of the function:def heartbeat(queue_name, task_id, owner, message, index):
task = _get_task_with_policy(queue_name, task_id, owner)
if task.heartbeat_number > index:
return False
task.heartbeat = message
task.heartbeat_number = index
# Extend the lease by the time of the last lease.
now = datetime.datetime.utcnow()
timeout_delta = task.eta - task.last_lease
task.eta = now + timeout_delta
task.last_lease = now
db.session.add(task)
signals.task_updated.send(app, task=task)
return True | [
"Sets the heartbeat status of the task and extends its lease.\n\n The task's lease is extended by the same amount as its last lease to\n ensure that any operations following the heartbeat will still hold the\n lock for the original lock period.\n\n Args:\n queue_name: Name of the queue the work item is on.\n task_id: ID of the task that is finished.\n owner: Who or what has the current lease on the task.\n message: Message to report as the task's current status.\n index: Number of this message in the sequence of messages from the\n current task owner, starting at zero. This lets the API receive\n heartbeats out of order, yet ensure that the most recent message\n is actually saved to the database. This requires the owner issuing\n heartbeat messages to issue heartbeat indexes sequentially.\n\n Returns:\n True if the heartbeat message was set, False if it is lower than the\n current heartbeat index.\n\n Raises:\n TaskDoesNotExistError if the task does not exist.\n LeaseExpiredError if the lease is no longer active.\n NotOwnerError if the specified owner no longer owns the task.\n "
]
|
Please provide a description of the function:def finish(queue_name, task_id, owner, error=False):
task = _get_task_with_policy(queue_name, task_id, owner)
if not task.status == WorkQueue.LIVE:
logging.warning('Finishing already dead task. queue=%r, task_id=%r, '
'owner=%r, status=%r',
task.queue_name, task_id, owner, task.status)
return False
if not error:
task.status = WorkQueue.DONE
else:
task.status = WorkQueue.ERROR
task.finished = datetime.datetime.utcnow()
db.session.add(task)
signals.task_updated.send(app, task=task)
return True | [
"Marks a work item on a queue as finished.\n\n Args:\n queue_name: Name of the queue the work item is on.\n task_id: ID of the task that is finished.\n owner: Who or what has the current lease on the task.\n error: Defaults to false. True if this task's final state is an error.\n\n Returns:\n True if the task has been finished for the first time; False if the\n task was already finished.\n\n Raises:\n TaskDoesNotExistError if the task does not exist.\n LeaseExpiredError if the lease is no longer active.\n NotOwnerError if the specified owner no longer owns the task.\n "
]
|
Please provide a description of the function:def _query(queue_name=None, build_id=None, release_id=None, run_id=None,
count=None):
assert queue_name or build_id or release_id or run_id
q = WorkQueue.query
if queue_name:
q = q.filter_by(queue_name=queue_name)
if build_id:
q = q.filter_by(build_id=build_id)
if release_id:
q = q.filter_by(release_id=release_id)
if run_id:
q = q.filter_by(run_id=run_id)
q = q.order_by(WorkQueue.created.desc())
if count is not None:
q = q.limit(count)
return q.all() | [
"Queries for work items based on their criteria.\n\n Args:\n queue_name: Optional queue name to restrict to.\n build_id: Optional build ID to restrict to.\n release_id: Optional release ID to restrict to.\n run_id: Optional run ID to restrict to.\n count: How many tasks to fetch. Defaults to None, which means all\n tasks are fetch that match the query.\n\n Returns:\n List of WorkQueue items.\n "
]
|
Please provide a description of the function:def query(**kwargs):
count = kwargs.get('count', None)
task_list = _query(**kwargs)
task_dict_list = [_task_to_dict(task) for task in task_list]
if count == 1:
if not task_dict_list:
return None
else:
return task_dict_list[0]
return task_dict_list | [
"Queries for work items based on their criteria.\n\n Args:\n queue_name: Optional queue name to restrict to.\n build_id: Optional build ID to restrict to.\n release_id: Optional release ID to restrict to.\n run_id: Optional run ID to restrict to.\n count: How many tasks to fetch. Defaults to None, which means all\n tasks are fetch that match the query.\n\n Returns:\n Dictionaries of the most recent tasks that match the criteria, in\n order of most recently created. When count is 1 the return value will\n be the most recent task or None. When count is not 1 the return value\n will be a list of tasks.\n "
]
|
Please provide a description of the function:def cancel(**kwargs):
task_list = _query(**kwargs)
for task in task_list:
task.status = WorkQueue.CANCELED
task.finished = datetime.datetime.utcnow()
db.session.add(task)
return len(task_list) | [
"Cancels work items based on their criteria.\n\n Args:\n **kwargs: Same parameters as the query() method.\n\n Returns:\n The number of tasks that were canceled.\n "
]
|
Please provide a description of the function:def handle_add(queue_name):
source = request.form.get('source', request.remote_addr, type=str)
try:
task_id = work_queue.add(
queue_name,
payload=request.form.get('payload', type=str),
content_type=request.form.get('content_type', type=str),
source=source,
task_id=request.form.get('task_id', type=str))
except work_queue.Error, e:
return utils.jsonify_error(e)
db.session.commit()
logging.info('Task added: queue=%r, task_id=%r, source=%r',
queue_name, task_id, source)
return flask.jsonify(task_id=task_id) | [
"Adds a task to a queue."
]
|
Please provide a description of the function:def handle_lease(queue_name):
owner = request.form.get('owner', request.remote_addr, type=str)
try:
task_list = work_queue.lease(
queue_name,
owner,
request.form.get('count', 1, type=int),
request.form.get('timeout', 60, type=int))
except work_queue.Error, e:
return utils.jsonify_error(e)
if not task_list:
return flask.jsonify(tasks=[])
db.session.commit()
task_ids = [t['task_id'] for t in task_list]
logging.debug('Task leased: queue=%r, task_ids=%r, owner=%r',
queue_name, task_ids, owner)
return flask.jsonify(tasks=task_list) | [
"Leases a task from a queue."
]
|
Please provide a description of the function:def handle_heartbeat(queue_name):
task_id = request.form.get('task_id', type=str)
message = request.form.get('message', type=str)
index = request.form.get('index', type=int)
try:
work_queue.heartbeat(
queue_name,
task_id,
request.form.get('owner', request.remote_addr, type=str),
message,
index)
except work_queue.Error, e:
return utils.jsonify_error(e)
db.session.commit()
logging.debug('Task heartbeat: queue=%r, task_id=%r, message=%r, index=%d',
queue_name, task_id, message, index)
return flask.jsonify(success=True) | [
"Updates the heartbeat message for a task."
]
|
Please provide a description of the function:def handle_finish(queue_name):
task_id = request.form.get('task_id', type=str)
owner = request.form.get('owner', request.remote_addr, type=str)
error = request.form.get('error', type=str) is not None
try:
work_queue.finish(queue_name, task_id, owner, error=error)
except work_queue.Error, e:
return utils.jsonify_error(e)
db.session.commit()
logging.debug('Task finished: queue=%r, task_id=%r, owner=%r, error=%r',
queue_name, task_id, owner, error)
return flask.jsonify(success=True) | [
"Marks a task on a queue as finished."
]
|
Please provide a description of the function:def view_all_work_queues():
count_list = list(
db.session.query(
work_queue.WorkQueue.queue_name,
work_queue.WorkQueue.status,
func.count(work_queue.WorkQueue.task_id))
.group_by(work_queue.WorkQueue.queue_name,
work_queue.WorkQueue.status))
queue_dict = {}
for name, status, count in count_list:
queue_dict[(name, status)] = dict(
name=name, status=status, count=count)
max_created_list = list(
db.session.query(
work_queue.WorkQueue.queue_name,
work_queue.WorkQueue.status,
func.max(work_queue.WorkQueue.created))
.group_by(work_queue.WorkQueue.queue_name,
work_queue.WorkQueue.status))
for name, status, newest_created in max_created_list:
queue_dict[(name, status)]['newest_created'] = newest_created
min_eta_list = list(
db.session.query(
work_queue.WorkQueue.queue_name,
work_queue.WorkQueue.status,
func.min(work_queue.WorkQueue.eta))
.group_by(work_queue.WorkQueue.queue_name,
work_queue.WorkQueue.status))
for name, status, oldest_eta in min_eta_list:
queue_dict[(name, status)]['oldest_eta'] = oldest_eta
queue_list = list(queue_dict.values())
queue_list.sort(key=lambda x: (x['name'], x['status']))
context = dict(
queue_list=queue_list,
)
return render_template('view_work_queue_index.html', **context) | [
"Page for viewing the index of all active work queues."
]
|
Please provide a description of the function:def manage_work_queue(queue_name):
modify_form = forms.ModifyWorkQueueTaskForm()
if modify_form.validate_on_submit():
primary_key = (modify_form.task_id.data, queue_name)
task = work_queue.WorkQueue.query.get(primary_key)
if task:
logging.info('Action: %s task_id=%r',
modify_form.action.data, modify_form.task_id.data)
if modify_form.action.data == 'retry':
task.status = work_queue.WorkQueue.LIVE
task.lease_attempts = 0
task.heartbeat = 'Retrying ...'
db.session.add(task)
else:
db.session.delete(task)
db.session.commit()
else:
logging.warning('Could not find task_id=%r to delete',
modify_form.task_id.data)
return redirect(url_for('manage_work_queue', queue_name=queue_name))
query = (
work_queue.WorkQueue.query
.filter_by(queue_name=queue_name)
.order_by(work_queue.WorkQueue.created.desc()))
status = request.args.get('status', '', type=str).lower()
if status in work_queue.WorkQueue.STATES:
query = query.filter_by(status=status)
else:
status = None
item_list = list(query.limit(100))
work_list = []
for item in item_list:
form = forms.ModifyWorkQueueTaskForm()
form.task_id.data = item.task_id
form.delete.data = True
work_list.append((item, form))
context = dict(
queue_name=queue_name,
status=status,
work_list=work_list,
)
return render_template('view_work_queue.html', **context) | [
"Page for viewing the contents of a work queue."
]
|
Please provide a description of the function:def retryable_transaction(attempts=3, exceptions=(OperationalError,)):
assert len(exceptions) > 0
assert attempts > 0
def wrapper(f):
@functools.wraps(f)
def wrapped(*args, **kwargs):
for i in xrange(attempts):
try:
return f(*args, **kwargs)
except exceptions, e:
if i == (attempts - 1):
raise
logging.warning(
'Retryable error in transaction on attempt %d. %s: %s',
i + 1, e.__class__.__name__, e)
db.session.rollback()
return wrapped
return wrapper | [
"Decorator retries a function when expected exceptions are raised."
]
|
Please provide a description of the function:def jsonify_assert(asserted, message, status_code=400):
if asserted:
return
try:
raise AssertionError(message)
except AssertionError, e:
stack = traceback.extract_stack()
stack.pop()
logging.error('Assertion failed: %s\n%s',
str(e), ''.join(traceback.format_list(stack)))
abort(jsonify_error(e, status_code=status_code)) | [
"Asserts something is true, aborts the request if not."
]
|
Please provide a description of the function:def jsonify_error(message_or_exception, status_code=400):
if isinstance(message_or_exception, Exception):
message = '%s: %s' % (
message_or_exception.__class__.__name__, message_or_exception)
else:
message = message_or_exception
logging.debug('Returning status=%s, error message: %s',
status_code, message)
response = jsonify(error=message)
response.status_code = status_code
return response | [
"Returns a JSON payload that indicates the request had an error."
]
|
Please provide a description of the function:def ignore_exceptions(f):
@functools.wraps(f)
def wrapped(*args, **kwargs):
try:
return f(*args, **kwargs)
except:
logging.exception("Ignoring exception in %r", f)
return wrapped | [
"Decorator catches and ignores any exceptions raised by this function."
]
|
Please provide a description of the function:def timesince(when):
if not when:
return ''
now = datetime.datetime.utcnow()
if now > when:
diff = now - when
suffix = 'ago'
else:
diff = when - now
suffix = 'from now'
periods = (
(diff.days / 365, 'year', 'years'),
(diff.days / 30, 'month', 'months'),
(diff.days / 7, 'week', 'weeks'),
(diff.days, 'day', 'days'),
(diff.seconds / 3600, 'hour', 'hours'),
(diff.seconds / 60, 'minute', 'minutes'),
(diff.seconds, 'second', 'seconds'),
)
for period, singular, plural in periods:
if period:
return '%d %s %s' % (
period,
singular if period == 1 else plural,
suffix)
return 'now' | [
"Returns string representing \"time since\" or \"time until\".\n\n Examples:\n 3 days ago, 5 hours ago, 3 minutes from now, 5 hours from now, now.\n "
]
|
Please provide a description of the function:def human_uuid():
return base64.b32encode(
hashlib.sha1(uuid.uuid4().bytes).digest()).lower().strip('=') | [
"Returns a good UUID for using as a human readable string."
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.