code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def ccmod_class_label_lookup(label):
"""Get a CCMOD class from a label string."""
clsmod = {'ism': admm_ccmod.ConvCnstrMOD_IterSM,
'cg': admm_ccmod.ConvCnstrMOD_CG,
'cns': admm_ccmod.ConvCnstrMOD_Consensus,
'fista': fista_ccmod.ConvCnstrMOD}
if label in clsmod:
return clsmod[label]
else:
raise ValueError('Unknown ConvCnstrMOD solver method %s' % label) | Get a CCMOD class from a label string. | Below is the the instruction that describes the task:
### Input:
Get a CCMOD class from a label string.
### Response:
def ccmod_class_label_lookup(label):
"""Get a CCMOD class from a label string."""
clsmod = {'ism': admm_ccmod.ConvCnstrMOD_IterSM,
'cg': admm_ccmod.ConvCnstrMOD_CG,
'cns': admm_ccmod.ConvCnstrMOD_Consensus,
'fista': fista_ccmod.ConvCnstrMOD}
if label in clsmod:
return clsmod[label]
else:
raise ValueError('Unknown ConvCnstrMOD solver method %s' % label) |
def publish (self):
'''
Function to publish cmdvel.
'''
self.lock.acquire()
tw = cmdvel2Twist(self.data)
self.lock.release()
self.pub.publish(tw) | Function to publish cmdvel. | Below is the the instruction that describes the task:
### Input:
Function to publish cmdvel.
### Response:
def publish (self):
'''
Function to publish cmdvel.
'''
self.lock.acquire()
tw = cmdvel2Twist(self.data)
self.lock.release()
self.pub.publish(tw) |
def parse_reports(self):
""" Find RSeQC read_distribution reports and parse their data """
# Set up vars
self.read_dist = dict()
first_regexes = {
'total_reads': r"Total Reads\s+(\d+)\s*",
'total_tags': r"Total Tags\s+(\d+)\s*",
'total_assigned_tags': r"Total Assigned Tags\s+(\d+)\s*",
}
second_regexes = {
'cds_exons': r"CDS_Exons\s+(\d+)\s+(\d+)\s+([\d\.]+)\s*",
'5_utr_exons': r"5'UTR_Exons\s+(\d+)\s+(\d+)\s+([\d\.]+)\s*",
'3_utr_exons': r"3'UTR_Exons\s+(\d+)\s+(\d+)\s+([\d\.]+)\s*",
'introns': r"Introns\s+(\d+)\s+(\d+)\s+([\d\.]+)\s*",
'tss_up_1kb': r"TSS_up_1kb\s+(\d+)\s+(\d+)\s+([\d\.]+)\s*",
'tss_up_5kb': r"TSS_up_5kb\s+(\d+)\s+(\d+)\s+([\d\.]+)\s*",
'tss_up_10kb': r"TSS_up_10kb\s+(\d+)\s+(\d+)\s+([\d\.]+)\s*",
'tes_down_1kb': r"TES_down_1kb\s+(\d+)\s+(\d+)\s+([\d\.]+)\s*",
'tes_down_5kb': r"TES_down_5kb\s+(\d+)\s+(\d+)\s+([\d\.]+)\s*",
'tes_down_10kb': r"TES_down_10kb\s+(\d+)\s+(\d+)\s+([\d\.]+)\s*",
}
# Go through files and parse data using regexes
for f in self.find_log_files('rseqc/read_distribution'):
d = dict()
for k, r in first_regexes.items():
r_search = re.search(r, f['f'], re.MULTILINE)
if r_search:
d[k] = int(r_search.group(1))
for k, r in second_regexes.items():
r_search = re.search(r, f['f'], re.MULTILINE)
if r_search:
d['{}_total_bases'.format(k)] = int(r_search.group(1))
d['{}_tag_count'.format(k)] = int(r_search.group(2))
d['{}_tags_kb'.format(k)] = float(r_search.group(2))
d['other_intergenic_tag_count'] = d['total_tags']-d['total_assigned_tags']
# Calculate some percentages for parsed file
if 'total_tags' in d:
t = float(d['total_tags'])
pcts = dict()
for k in d:
if k.endswith('_tag_count'):
pk = '{}_tag_pct'.format(k[:-10])
pcts[pk] = (float(d[k]) / t)*100.0
d.update(pcts)
if len(d) > 0:
if f['s_name'] in self.read_dist:
log.debug("Duplicate sample name found! Overwriting: {}".format(f['s_name']))
self.add_data_source(f, section='read_distribution')
self.read_dist[f['s_name']] = d
# Filter to strip out ignored sample names
self.read_dist = self.ignore_samples(self.read_dist)
if len(self.read_dist) > 0:
# Write to file
self.write_data_file(self.read_dist, 'multiqc_rseqc_read_distribution')
# Plot bar graph of groups
keys = OrderedDict()
keys['cds_exons_tag_count'] = {'name': "CDS_Exons"}
keys['5_utr_exons_tag_count'] = {'name': "5'UTR_Exons"}
keys['3_utr_exons_tag_count'] = {'name': "3'UTR_Exons"}
keys['introns_tag_count'] = {'name': "Introns"}
keys['tss_up_1kb_tag_count'] = {'name': "TSS_up_1kb"}
keys['tss_up_5kb_tag_count'] = {'name': "TSS_up_5kb"}
keys['tss_up_10kb_tag_count'] = {'name': "TSS_up_10kb"}
keys['tes_down_1kb_tag_count'] = {'name': "TES_down_1kb"}
keys['tes_down_5kb_tag_count'] = {'name': "TES_down_5kb"}
keys['tes_down_10kb_tag_count'] = {'name': "TES_down_10kb"}
keys['other_intergenic_tag_count'] = {'name': "Other_intergenic"}
# Config for the plot
pconfig = {
'id': 'rseqc_read_distribution_plot',
'title': 'RSeQC: Read Distribution',
'ylab': '# Tags',
'cpswitch_counts_label': 'Number of Tags',
'cpswitch_c_active': False
}
self.add_section (
name = 'Read Distribution',
anchor = 'rseqc-read_distribution',
description = '<a href="http://rseqc.sourceforge.net/#read-distribution-py" target="_blank">Read Distribution</a>' \
" calculates how mapped reads are distributed over genome features.",
plot = bargraph.plot(self.read_dist, keys, pconfig)
)
# Return number of samples found
return len(self.read_dist) | Find RSeQC read_distribution reports and parse their data | Below is the the instruction that describes the task:
### Input:
Find RSeQC read_distribution reports and parse their data
### Response:
def parse_reports(self):
""" Find RSeQC read_distribution reports and parse their data """
# Set up vars
self.read_dist = dict()
first_regexes = {
'total_reads': r"Total Reads\s+(\d+)\s*",
'total_tags': r"Total Tags\s+(\d+)\s*",
'total_assigned_tags': r"Total Assigned Tags\s+(\d+)\s*",
}
second_regexes = {
'cds_exons': r"CDS_Exons\s+(\d+)\s+(\d+)\s+([\d\.]+)\s*",
'5_utr_exons': r"5'UTR_Exons\s+(\d+)\s+(\d+)\s+([\d\.]+)\s*",
'3_utr_exons': r"3'UTR_Exons\s+(\d+)\s+(\d+)\s+([\d\.]+)\s*",
'introns': r"Introns\s+(\d+)\s+(\d+)\s+([\d\.]+)\s*",
'tss_up_1kb': r"TSS_up_1kb\s+(\d+)\s+(\d+)\s+([\d\.]+)\s*",
'tss_up_5kb': r"TSS_up_5kb\s+(\d+)\s+(\d+)\s+([\d\.]+)\s*",
'tss_up_10kb': r"TSS_up_10kb\s+(\d+)\s+(\d+)\s+([\d\.]+)\s*",
'tes_down_1kb': r"TES_down_1kb\s+(\d+)\s+(\d+)\s+([\d\.]+)\s*",
'tes_down_5kb': r"TES_down_5kb\s+(\d+)\s+(\d+)\s+([\d\.]+)\s*",
'tes_down_10kb': r"TES_down_10kb\s+(\d+)\s+(\d+)\s+([\d\.]+)\s*",
}
# Go through files and parse data using regexes
for f in self.find_log_files('rseqc/read_distribution'):
d = dict()
for k, r in first_regexes.items():
r_search = re.search(r, f['f'], re.MULTILINE)
if r_search:
d[k] = int(r_search.group(1))
for k, r in second_regexes.items():
r_search = re.search(r, f['f'], re.MULTILINE)
if r_search:
d['{}_total_bases'.format(k)] = int(r_search.group(1))
d['{}_tag_count'.format(k)] = int(r_search.group(2))
d['{}_tags_kb'.format(k)] = float(r_search.group(2))
d['other_intergenic_tag_count'] = d['total_tags']-d['total_assigned_tags']
# Calculate some percentages for parsed file
if 'total_tags' in d:
t = float(d['total_tags'])
pcts = dict()
for k in d:
if k.endswith('_tag_count'):
pk = '{}_tag_pct'.format(k[:-10])
pcts[pk] = (float(d[k]) / t)*100.0
d.update(pcts)
if len(d) > 0:
if f['s_name'] in self.read_dist:
log.debug("Duplicate sample name found! Overwriting: {}".format(f['s_name']))
self.add_data_source(f, section='read_distribution')
self.read_dist[f['s_name']] = d
# Filter to strip out ignored sample names
self.read_dist = self.ignore_samples(self.read_dist)
if len(self.read_dist) > 0:
# Write to file
self.write_data_file(self.read_dist, 'multiqc_rseqc_read_distribution')
# Plot bar graph of groups
keys = OrderedDict()
keys['cds_exons_tag_count'] = {'name': "CDS_Exons"}
keys['5_utr_exons_tag_count'] = {'name': "5'UTR_Exons"}
keys['3_utr_exons_tag_count'] = {'name': "3'UTR_Exons"}
keys['introns_tag_count'] = {'name': "Introns"}
keys['tss_up_1kb_tag_count'] = {'name': "TSS_up_1kb"}
keys['tss_up_5kb_tag_count'] = {'name': "TSS_up_5kb"}
keys['tss_up_10kb_tag_count'] = {'name': "TSS_up_10kb"}
keys['tes_down_1kb_tag_count'] = {'name': "TES_down_1kb"}
keys['tes_down_5kb_tag_count'] = {'name': "TES_down_5kb"}
keys['tes_down_10kb_tag_count'] = {'name': "TES_down_10kb"}
keys['other_intergenic_tag_count'] = {'name': "Other_intergenic"}
# Config for the plot
pconfig = {
'id': 'rseqc_read_distribution_plot',
'title': 'RSeQC: Read Distribution',
'ylab': '# Tags',
'cpswitch_counts_label': 'Number of Tags',
'cpswitch_c_active': False
}
self.add_section (
name = 'Read Distribution',
anchor = 'rseqc-read_distribution',
description = '<a href="http://rseqc.sourceforge.net/#read-distribution-py" target="_blank">Read Distribution</a>' \
" calculates how mapped reads are distributed over genome features.",
plot = bargraph.plot(self.read_dist, keys, pconfig)
)
# Return number of samples found
return len(self.read_dist) |
def delete(self, namespace, key):
"""Remove a configuration item from the database
Args:
namespace (`str`): Namespace of the config item
key (`str`): Key to delete
Returns:
`None`
"""
if self.key_exists(namespace, key):
obj = db.ConfigItem.find_one(
ConfigItem.namespace_prefix == namespace,
ConfigItem.key == key
)
del self.__data[namespace][key]
db.session.delete(obj)
db.session.commit()
else:
raise KeyError('{}/{}'.format(namespace, key)) | Remove a configuration item from the database
Args:
namespace (`str`): Namespace of the config item
key (`str`): Key to delete
Returns:
`None` | Below is the the instruction that describes the task:
### Input:
Remove a configuration item from the database
Args:
namespace (`str`): Namespace of the config item
key (`str`): Key to delete
Returns:
`None`
### Response:
def delete(self, namespace, key):
"""Remove a configuration item from the database
Args:
namespace (`str`): Namespace of the config item
key (`str`): Key to delete
Returns:
`None`
"""
if self.key_exists(namespace, key):
obj = db.ConfigItem.find_one(
ConfigItem.namespace_prefix == namespace,
ConfigItem.key == key
)
del self.__data[namespace][key]
db.session.delete(obj)
db.session.commit()
else:
raise KeyError('{}/{}'.format(namespace, key)) |
def _set_queryset(self, queryset):
"""
Set the queryset on the ``ModelChoiceField`` and choices on the widget.
"""
self.fields[0].queryset = self.widget.queryset = queryset
self.widget.choices = self.fields[0].choices | Set the queryset on the ``ModelChoiceField`` and choices on the widget. | Below is the the instruction that describes the task:
### Input:
Set the queryset on the ``ModelChoiceField`` and choices on the widget.
### Response:
def _set_queryset(self, queryset):
"""
Set the queryset on the ``ModelChoiceField`` and choices on the widget.
"""
self.fields[0].queryset = self.widget.queryset = queryset
self.widget.choices = self.fields[0].choices |
def write_release_version(version):
"""Write the release version to ``_version.py``."""
dirname = os.path.abspath(os.path.dirname(__file__))
f = open(os.path.join(dirname, "_version.py"), "wt")
f.write("__version__ = '%s'\n" % version)
f.close() | Write the release version to ``_version.py``. | Below is the the instruction that describes the task:
### Input:
Write the release version to ``_version.py``.
### Response:
def write_release_version(version):
"""Write the release version to ``_version.py``."""
dirname = os.path.abspath(os.path.dirname(__file__))
f = open(os.path.join(dirname, "_version.py"), "wt")
f.write("__version__ = '%s'\n" % version)
f.close() |
def entrez(args):
"""
%prog entrez <filename|term>
`filename` contains a list of terms to search. Or just one term. If the
results are small in size, e.g. "--format=acc", use "--batchsize=100" to speed
the download.
"""
p = OptionParser(entrez.__doc__)
allowed_databases = {"fasta": ["genome", "nuccore", "nucgss", "protein", "nucest"],
"asn.1": ["genome", "nuccore", "nucgss", "protein", "gene"],
"xml": ["genome", "nuccore", "nucgss", "nucest", "gene"],
"gb": ["genome", "nuccore", "nucgss"],
"est": ["nucest"],
"gss": ["nucgss"],
"acc": ["nuccore"],
}
valid_formats = tuple(allowed_databases.keys())
valid_databases = ("genome", "nuccore", "nucest",
"nucgss", "protein", "gene")
p.add_option("--noversion", dest="noversion",
default=False, action="store_true",
help="Remove trailing accession versions")
p.add_option("--format", default="fasta", choices=valid_formats,
help="download format [default: %default]")
p.add_option("--database", default="nuccore", choices=valid_databases,
help="search database [default: %default]")
p.add_option("--retmax", default=1000000, type="int",
help="how many results to return [default: %default]")
p.add_option("--skipcheck", default=False, action="store_true",
help="turn off prompt to check file existence [default: %default]")
p.add_option("--batchsize", default=500, type="int",
help="download the results in batch for speed-up [default: %default]")
p.set_outdir(outdir=None)
p.add_option("--outprefix", default="out",
help="output file name prefix [default: %default]")
p.set_email()
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(p.print_help())
filename, = args
if op.exists(filename):
pf = filename.rsplit(".", 1)[0]
list_of_terms = [row.strip() for row in open(filename)]
if opts.noversion:
list_of_terms = [x.rsplit(".", 1)[0] for x in list_of_terms]
else:
pf = filename
# the filename is the search term
list_of_terms = [filename.strip()]
fmt = opts.format
database = opts.database
batchsize = opts.batchsize
assert database in allowed_databases[fmt], \
"For output format '{0}', allowed databases are: {1}".\
format(fmt, allowed_databases[fmt])
assert batchsize >= 1, "batchsize must >= 1"
if " " in pf:
pf = opts.outprefix
outfile = "{0}.{1}".format(pf, fmt)
outdir = opts.outdir
if outdir:
mkdir(outdir)
# If noprompt, will not check file existence
if not outdir:
fw = must_open(outfile, "w", checkexists=True,
skipcheck=opts.skipcheck)
if fw is None:
return
seen = set()
totalsize = 0
for id, size, term, handle in batch_entrez(list_of_terms, retmax=opts.retmax,
rettype=fmt, db=database, batchsize=batchsize,
email=opts.email):
if outdir:
outfile = urljoin(outdir, "{0}.{1}".format(term, fmt))
fw = must_open(outfile, "w", checkexists=True,
skipcheck=opts.skipcheck)
if fw is None:
continue
rec = handle.read()
if id in seen:
logging.error("Duplicate key ({0}) found".format(rec))
continue
totalsize += size
print(rec, file=fw)
print(file=fw)
seen.add(id)
if seen:
print("A total of {0} {1} records downloaded.".
format(totalsize, fmt.upper()), file=sys.stderr)
return outfile | %prog entrez <filename|term>
`filename` contains a list of terms to search. Or just one term. If the
results are small in size, e.g. "--format=acc", use "--batchsize=100" to speed
the download. | Below is the the instruction that describes the task:
### Input:
%prog entrez <filename|term>
`filename` contains a list of terms to search. Or just one term. If the
results are small in size, e.g. "--format=acc", use "--batchsize=100" to speed
the download.
### Response:
def entrez(args):
"""
%prog entrez <filename|term>
`filename` contains a list of terms to search. Or just one term. If the
results are small in size, e.g. "--format=acc", use "--batchsize=100" to speed
the download.
"""
p = OptionParser(entrez.__doc__)
allowed_databases = {"fasta": ["genome", "nuccore", "nucgss", "protein", "nucest"],
"asn.1": ["genome", "nuccore", "nucgss", "protein", "gene"],
"xml": ["genome", "nuccore", "nucgss", "nucest", "gene"],
"gb": ["genome", "nuccore", "nucgss"],
"est": ["nucest"],
"gss": ["nucgss"],
"acc": ["nuccore"],
}
valid_formats = tuple(allowed_databases.keys())
valid_databases = ("genome", "nuccore", "nucest",
"nucgss", "protein", "gene")
p.add_option("--noversion", dest="noversion",
default=False, action="store_true",
help="Remove trailing accession versions")
p.add_option("--format", default="fasta", choices=valid_formats,
help="download format [default: %default]")
p.add_option("--database", default="nuccore", choices=valid_databases,
help="search database [default: %default]")
p.add_option("--retmax", default=1000000, type="int",
help="how many results to return [default: %default]")
p.add_option("--skipcheck", default=False, action="store_true",
help="turn off prompt to check file existence [default: %default]")
p.add_option("--batchsize", default=500, type="int",
help="download the results in batch for speed-up [default: %default]")
p.set_outdir(outdir=None)
p.add_option("--outprefix", default="out",
help="output file name prefix [default: %default]")
p.set_email()
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(p.print_help())
filename, = args
if op.exists(filename):
pf = filename.rsplit(".", 1)[0]
list_of_terms = [row.strip() for row in open(filename)]
if opts.noversion:
list_of_terms = [x.rsplit(".", 1)[0] for x in list_of_terms]
else:
pf = filename
# the filename is the search term
list_of_terms = [filename.strip()]
fmt = opts.format
database = opts.database
batchsize = opts.batchsize
assert database in allowed_databases[fmt], \
"For output format '{0}', allowed databases are: {1}".\
format(fmt, allowed_databases[fmt])
assert batchsize >= 1, "batchsize must >= 1"
if " " in pf:
pf = opts.outprefix
outfile = "{0}.{1}".format(pf, fmt)
outdir = opts.outdir
if outdir:
mkdir(outdir)
# If noprompt, will not check file existence
if not outdir:
fw = must_open(outfile, "w", checkexists=True,
skipcheck=opts.skipcheck)
if fw is None:
return
seen = set()
totalsize = 0
for id, size, term, handle in batch_entrez(list_of_terms, retmax=opts.retmax,
rettype=fmt, db=database, batchsize=batchsize,
email=opts.email):
if outdir:
outfile = urljoin(outdir, "{0}.{1}".format(term, fmt))
fw = must_open(outfile, "w", checkexists=True,
skipcheck=opts.skipcheck)
if fw is None:
continue
rec = handle.read()
if id in seen:
logging.error("Duplicate key ({0}) found".format(rec))
continue
totalsize += size
print(rec, file=fw)
print(file=fw)
seen.add(id)
if seen:
print("A total of {0} {1} records downloaded.".
format(totalsize, fmt.upper()), file=sys.stderr)
return outfile |
def start(self, max):
"""
Displays the progress bar for a given maximum value.
:param float max: Maximum value of the progress bar.
"""
try:
self.widget.max = max
display(self.widget)
except:
pass | Displays the progress bar for a given maximum value.
:param float max: Maximum value of the progress bar. | Below is the the instruction that describes the task:
### Input:
Displays the progress bar for a given maximum value.
:param float max: Maximum value of the progress bar.
### Response:
def start(self, max):
"""
Displays the progress bar for a given maximum value.
:param float max: Maximum value of the progress bar.
"""
try:
self.widget.max = max
display(self.widget)
except:
pass |
def _compute_dk_dtau_on_partition(self, tau, p):
"""Evaluate the term inside the sum of Faa di Bruno's formula for the given partition.
Parameters
----------
tau : :py:class:`Matrix`, (`M`, `D`)
`M` inputs with dimension `D`.
p : list of :py:class:`Array`
Each element is a block of the partition representing the
derivative orders to use.
Returns
-------
dk_dtau : :py:class:`Array`, (`M`,)
The specified derivatives over the given partition at the specified
locations.
"""
y, r2l2 = self._compute_y(tau, return_r2l2=True)
# Compute the d^(|pi|)f/dy term:
dk_dtau = self._compute_dk_dy(y, len(p))
# Multiply in each of the block terms:
for b in p:
dk_dtau *= self._compute_dy_dtau(tau, b, r2l2)
return dk_dtau | Evaluate the term inside the sum of Faa di Bruno's formula for the given partition.
Parameters
----------
tau : :py:class:`Matrix`, (`M`, `D`)
`M` inputs with dimension `D`.
p : list of :py:class:`Array`
Each element is a block of the partition representing the
derivative orders to use.
Returns
-------
dk_dtau : :py:class:`Array`, (`M`,)
The specified derivatives over the given partition at the specified
locations. | Below is the the instruction that describes the task:
### Input:
Evaluate the term inside the sum of Faa di Bruno's formula for the given partition.
Parameters
----------
tau : :py:class:`Matrix`, (`M`, `D`)
`M` inputs with dimension `D`.
p : list of :py:class:`Array`
Each element is a block of the partition representing the
derivative orders to use.
Returns
-------
dk_dtau : :py:class:`Array`, (`M`,)
The specified derivatives over the given partition at the specified
locations.
### Response:
def _compute_dk_dtau_on_partition(self, tau, p):
"""Evaluate the term inside the sum of Faa di Bruno's formula for the given partition.
Parameters
----------
tau : :py:class:`Matrix`, (`M`, `D`)
`M` inputs with dimension `D`.
p : list of :py:class:`Array`
Each element is a block of the partition representing the
derivative orders to use.
Returns
-------
dk_dtau : :py:class:`Array`, (`M`,)
The specified derivatives over the given partition at the specified
locations.
"""
y, r2l2 = self._compute_y(tau, return_r2l2=True)
# Compute the d^(|pi|)f/dy term:
dk_dtau = self._compute_dk_dy(y, len(p))
# Multiply in each of the block terms:
for b in p:
dk_dtau *= self._compute_dy_dtau(tau, b, r2l2)
return dk_dtau |
def tonet(self, outfile):
"""
Writes the PIL image into a png.
We do not want to flip the image at this stage, as you might have written on it !
"""
self.checkforpilimage()
if self.verbose :
print "Writing image to %s...\n%i x %i pixels, mode %s" % (outfile, self.pilimage.size[0], self.pilimage.size[1], self.pilimage.mode)
self.pilimage.save(outfile, "PNG") | Writes the PIL image into a png.
We do not want to flip the image at this stage, as you might have written on it ! | Below is the the instruction that describes the task:
### Input:
Writes the PIL image into a png.
We do not want to flip the image at this stage, as you might have written on it !
### Response:
def tonet(self, outfile):
"""
Writes the PIL image into a png.
We do not want to flip the image at this stage, as you might have written on it !
"""
self.checkforpilimage()
if self.verbose :
print "Writing image to %s...\n%i x %i pixels, mode %s" % (outfile, self.pilimage.size[0], self.pilimage.size[1], self.pilimage.mode)
self.pilimage.save(outfile, "PNG") |
def receive(xpub, callback, api_key):
"""Call the '/v2/receive' endpoint and create a forwarding address.
:param str xpub: extended public key to generate payment address
:param str callback: callback URI that will be called upon payment
:param str api_key: Blockchain.info API V2 key
:return: an instance of :class:`ReceiveResponse` class
"""
params = {'xpub': xpub, 'key': api_key, 'callback': callback}
resource = 'v2/receive?' + util.urlencode(params)
resp = util.call_api(resource, base_url='https://api.blockchain.info/')
json_resp = json.loads(resp)
payment_response = ReceiveResponse(json_resp['address'],
json_resp['index'],
json_resp['callback'])
return payment_response | Call the '/v2/receive' endpoint and create a forwarding address.
:param str xpub: extended public key to generate payment address
:param str callback: callback URI that will be called upon payment
:param str api_key: Blockchain.info API V2 key
:return: an instance of :class:`ReceiveResponse` class | Below is the the instruction that describes the task:
### Input:
Call the '/v2/receive' endpoint and create a forwarding address.
:param str xpub: extended public key to generate payment address
:param str callback: callback URI that will be called upon payment
:param str api_key: Blockchain.info API V2 key
:return: an instance of :class:`ReceiveResponse` class
### Response:
def receive(xpub, callback, api_key):
"""Call the '/v2/receive' endpoint and create a forwarding address.
:param str xpub: extended public key to generate payment address
:param str callback: callback URI that will be called upon payment
:param str api_key: Blockchain.info API V2 key
:return: an instance of :class:`ReceiveResponse` class
"""
params = {'xpub': xpub, 'key': api_key, 'callback': callback}
resource = 'v2/receive?' + util.urlencode(params)
resp = util.call_api(resource, base_url='https://api.blockchain.info/')
json_resp = json.loads(resp)
payment_response = ReceiveResponse(json_resp['address'],
json_resp['index'],
json_resp['callback'])
return payment_response |
def version_check():
"""Used to verify that h2o-python module and the H2O server are compatible with each other."""
from .__init__ import __version__ as ver_pkg
ci = h2oconn.cluster
if not ci:
raise H2OConnectionError("Connection not initialized. Did you run h2o.connect()?")
ver_h2o = ci.version
if ver_pkg == "SUBST_PROJECT_VERSION": ver_pkg = "UNKNOWN"
if str(ver_h2o) != str(ver_pkg):
branch_name_h2o = ci.branch_name
build_number_h2o = ci.build_number
if build_number_h2o is None or build_number_h2o == "unknown":
raise H2OConnectionError(
"Version mismatch. H2O is version {0}, but the h2o-python package is version {1}. "
"Upgrade H2O and h2o-Python to latest stable version - "
"http://h2o-release.s3.amazonaws.com/h2o/latest_stable.html"
"".format(ver_h2o, ver_pkg))
elif build_number_h2o == "99999":
raise H2OConnectionError(
"Version mismatch. H2O is version {0}, but the h2o-python package is version {1}. "
"This is a developer build, please contact your developer."
"".format(ver_h2o, ver_pkg))
else:
raise H2OConnectionError(
"Version mismatch. H2O is version {0}, but the h2o-python package is version {1}. "
"Install the matching h2o-Python version from - "
"http://h2o-release.s3.amazonaws.com/h2o/{2}/{3}/index.html."
"".format(ver_h2o, ver_pkg, branch_name_h2o, build_number_h2o))
# Check age of the install
if ci.build_too_old:
print("Warning: Your H2O cluster version is too old ({})! Please download and install the latest "
"version from http://h2o.ai/download/".format(ci.build_age)) | Used to verify that h2o-python module and the H2O server are compatible with each other. | Below is the the instruction that describes the task:
### Input:
Used to verify that h2o-python module and the H2O server are compatible with each other.
### Response:
def version_check():
"""Used to verify that h2o-python module and the H2O server are compatible with each other."""
from .__init__ import __version__ as ver_pkg
ci = h2oconn.cluster
if not ci:
raise H2OConnectionError("Connection not initialized. Did you run h2o.connect()?")
ver_h2o = ci.version
if ver_pkg == "SUBST_PROJECT_VERSION": ver_pkg = "UNKNOWN"
if str(ver_h2o) != str(ver_pkg):
branch_name_h2o = ci.branch_name
build_number_h2o = ci.build_number
if build_number_h2o is None or build_number_h2o == "unknown":
raise H2OConnectionError(
"Version mismatch. H2O is version {0}, but the h2o-python package is version {1}. "
"Upgrade H2O and h2o-Python to latest stable version - "
"http://h2o-release.s3.amazonaws.com/h2o/latest_stable.html"
"".format(ver_h2o, ver_pkg))
elif build_number_h2o == "99999":
raise H2OConnectionError(
"Version mismatch. H2O is version {0}, but the h2o-python package is version {1}. "
"This is a developer build, please contact your developer."
"".format(ver_h2o, ver_pkg))
else:
raise H2OConnectionError(
"Version mismatch. H2O is version {0}, but the h2o-python package is version {1}. "
"Install the matching h2o-Python version from - "
"http://h2o-release.s3.amazonaws.com/h2o/{2}/{3}/index.html."
"".format(ver_h2o, ver_pkg, branch_name_h2o, build_number_h2o))
# Check age of the install
if ci.build_too_old:
print("Warning: Your H2O cluster version is too old ({})! Please download and install the latest "
"version from http://h2o.ai/download/".format(ci.build_age)) |
def to_dict(self):
"""
Convert the object into a json serializable dictionary.
Note: It uses the private method _save_to_input_dict of the parent.
:return dict: json serializable dictionary containing the needed information to instantiate the object
"""
input_dict = super(Linear, self)._save_to_input_dict()
input_dict["class"] = "GPy.mappings.Linear"
input_dict["A"] = self.A.values.tolist()
return input_dict | Convert the object into a json serializable dictionary.
Note: It uses the private method _save_to_input_dict of the parent.
:return dict: json serializable dictionary containing the needed information to instantiate the object | Below is the the instruction that describes the task:
### Input:
Convert the object into a json serializable dictionary.
Note: It uses the private method _save_to_input_dict of the parent.
:return dict: json serializable dictionary containing the needed information to instantiate the object
### Response:
def to_dict(self):
"""
Convert the object into a json serializable dictionary.
Note: It uses the private method _save_to_input_dict of the parent.
:return dict: json serializable dictionary containing the needed information to instantiate the object
"""
input_dict = super(Linear, self)._save_to_input_dict()
input_dict["class"] = "GPy.mappings.Linear"
input_dict["A"] = self.A.values.tolist()
return input_dict |
def str_to_mac(mac_string):
"""Convert a readable string to a MAC address
Args:
mac_string (str): a readable string (e.g. '01:02:03:04:05:06')
Returns:
str: a MAC address in hex form
"""
sp = mac_string.split(':')
mac_string = ''.join(sp)
return binascii.unhexlify(mac_string) | Convert a readable string to a MAC address
Args:
mac_string (str): a readable string (e.g. '01:02:03:04:05:06')
Returns:
str: a MAC address in hex form | Below is the the instruction that describes the task:
### Input:
Convert a readable string to a MAC address
Args:
mac_string (str): a readable string (e.g. '01:02:03:04:05:06')
Returns:
str: a MAC address in hex form
### Response:
def str_to_mac(mac_string):
"""Convert a readable string to a MAC address
Args:
mac_string (str): a readable string (e.g. '01:02:03:04:05:06')
Returns:
str: a MAC address in hex form
"""
sp = mac_string.split(':')
mac_string = ''.join(sp)
return binascii.unhexlify(mac_string) |
def Analyze(self, source_path, output_writer):
"""Analyzes the source.
Args:
source_path (str): the source path.
output_writer (StdoutWriter): the output writer.
Raises:
RuntimeError: if the source path does not exists, or if the source path
is not a file or directory, or if the format of or within the source
file is not supported.
"""
if not os.path.exists(source_path):
raise RuntimeError('No such source: {0:s}.'.format(source_path))
scan_context = source_scanner.SourceScannerContext()
scan_path_spec = None
scan_step = 0
scan_context.OpenSourcePath(source_path)
while True:
self._source_scanner.Scan(
scan_context, auto_recurse=self._auto_recurse,
scan_path_spec=scan_path_spec)
if not scan_context.updated:
break
if not self._auto_recurse:
output_writer.WriteScanContext(scan_context, scan_step=scan_step)
scan_step += 1
# The source is a directory or file.
if scan_context.source_type in [
definitions.SOURCE_TYPE_DIRECTORY, definitions.SOURCE_TYPE_FILE]:
break
# The source scanner found a locked volume, e.g. an encrypted volume,
# and we need a credential to unlock the volume.
for locked_scan_node in scan_context.locked_scan_nodes:
self._PromptUserForEncryptedVolumeCredential(
scan_context, locked_scan_node, output_writer)
if not self._auto_recurse:
scan_node = scan_context.GetUnscannedScanNode()
if not scan_node:
return
scan_path_spec = scan_node.path_spec
if self._auto_recurse:
output_writer.WriteScanContext(scan_context) | Analyzes the source.
Args:
source_path (str): the source path.
output_writer (StdoutWriter): the output writer.
Raises:
RuntimeError: if the source path does not exists, or if the source path
is not a file or directory, or if the format of or within the source
file is not supported. | Below is the the instruction that describes the task:
### Input:
Analyzes the source.
Args:
source_path (str): the source path.
output_writer (StdoutWriter): the output writer.
Raises:
RuntimeError: if the source path does not exists, or if the source path
is not a file or directory, or if the format of or within the source
file is not supported.
### Response:
def Analyze(self, source_path, output_writer):
"""Analyzes the source.
Args:
source_path (str): the source path.
output_writer (StdoutWriter): the output writer.
Raises:
RuntimeError: if the source path does not exists, or if the source path
is not a file or directory, or if the format of or within the source
file is not supported.
"""
if not os.path.exists(source_path):
raise RuntimeError('No such source: {0:s}.'.format(source_path))
scan_context = source_scanner.SourceScannerContext()
scan_path_spec = None
scan_step = 0
scan_context.OpenSourcePath(source_path)
while True:
self._source_scanner.Scan(
scan_context, auto_recurse=self._auto_recurse,
scan_path_spec=scan_path_spec)
if not scan_context.updated:
break
if not self._auto_recurse:
output_writer.WriteScanContext(scan_context, scan_step=scan_step)
scan_step += 1
# The source is a directory or file.
if scan_context.source_type in [
definitions.SOURCE_TYPE_DIRECTORY, definitions.SOURCE_TYPE_FILE]:
break
# The source scanner found a locked volume, e.g. an encrypted volume,
# and we need a credential to unlock the volume.
for locked_scan_node in scan_context.locked_scan_nodes:
self._PromptUserForEncryptedVolumeCredential(
scan_context, locked_scan_node, output_writer)
if not self._auto_recurse:
scan_node = scan_context.GetUnscannedScanNode()
if not scan_node:
return
scan_path_spec = scan_node.path_spec
if self._auto_recurse:
output_writer.WriteScanContext(scan_context) |
def register_array_types_from_sources(self, source_files):
'''Add array type definitions from a file list to internal registry
Args:
source_files (list of str): Files to parse for array definitions
'''
for fname in source_files:
if is_vhdl(fname):
self._register_array_types(self.extract_objects(fname)) | Add array type definitions from a file list to internal registry
Args:
source_files (list of str): Files to parse for array definitions | Below is the the instruction that describes the task:
### Input:
Add array type definitions from a file list to internal registry
Args:
source_files (list of str): Files to parse for array definitions
### Response:
def register_array_types_from_sources(self, source_files):
'''Add array type definitions from a file list to internal registry
Args:
source_files (list of str): Files to parse for array definitions
'''
for fname in source_files:
if is_vhdl(fname):
self._register_array_types(self.extract_objects(fname)) |
def _seqfeature_to_coral(feature):
'''Convert a Biopython SeqFeature to a coral.Feature.
:param feature: Biopython SeqFeature
:type feature: Bio.SeqFeature
'''
# Some genomic sequences don't have a label attribute
# TODO: handle genomic cases differently than others. Some features lack
# a label but should still be incorporated somehow.
qualifiers = feature.qualifiers
if 'label' in qualifiers:
feature_name = qualifiers['label'][0]
elif 'locus_tag' in qualifiers:
feature_name = qualifiers['locus_tag'][0]
else:
raise FeatureNameError('Unrecognized feature name')
# Features with gaps are special, require looking at subfeatures
# Assumption: subfeatures are never more than one level deep
if feature.location_operator == 'join':
# Feature has gaps. Have to figure out start/stop from subfeatures,
# calculate gap indices. A nested feature model may be required
# eventually.
# Reorder the sub_feature list by start location
# Assumption: none of the subfeatures overlap so the last entry in
# the reordered list also has the final stop point of the feature.
# FIXME: Getting a deprecation warning about using sub_features
# instead of feature.location being a CompoundFeatureLocation
reordered = sorted(feature.location.parts,
key=lambda location: location.start)
starts = [int(location.start) for location in reordered]
stops = [int(location.end) for location in reordered]
feature_start = starts.pop(0)
feature_stop = stops.pop(-1)
starts = [start - feature_start for start in starts]
stops = [stop - feature_start for stop in stops]
feature_gaps = list(zip(stops, starts))
else:
# Feature doesn't have gaps. Ignore subfeatures.
feature_start = int(feature.location.start)
feature_stop = int(feature.location.end)
feature_gaps = []
feature_type = _process_feature_type(feature.type)
if feature.location.strand == -1:
feature_strand = 1
else:
feature_strand = 0
if 'gene' in qualifiers:
gene = qualifiers['gene']
else:
gene = []
if 'locus_tag' in qualifiers:
locus_tag = qualifiers['locus_tag']
else:
locus_tag = []
coral_feature = coral.Feature(feature_name, feature_start,
feature_stop, feature_type,
gene=gene, locus_tag=locus_tag,
qualifiers=qualifiers,
strand=feature_strand,
gaps=feature_gaps)
return coral_feature | Convert a Biopython SeqFeature to a coral.Feature.
:param feature: Biopython SeqFeature
:type feature: Bio.SeqFeature | Below is the the instruction that describes the task:
### Input:
Convert a Biopython SeqFeature to a coral.Feature.
:param feature: Biopython SeqFeature
:type feature: Bio.SeqFeature
### Response:
def _seqfeature_to_coral(feature):
'''Convert a Biopython SeqFeature to a coral.Feature.
:param feature: Biopython SeqFeature
:type feature: Bio.SeqFeature
'''
# Some genomic sequences don't have a label attribute
# TODO: handle genomic cases differently than others. Some features lack
# a label but should still be incorporated somehow.
qualifiers = feature.qualifiers
if 'label' in qualifiers:
feature_name = qualifiers['label'][0]
elif 'locus_tag' in qualifiers:
feature_name = qualifiers['locus_tag'][0]
else:
raise FeatureNameError('Unrecognized feature name')
# Features with gaps are special, require looking at subfeatures
# Assumption: subfeatures are never more than one level deep
if feature.location_operator == 'join':
# Feature has gaps. Have to figure out start/stop from subfeatures,
# calculate gap indices. A nested feature model may be required
# eventually.
# Reorder the sub_feature list by start location
# Assumption: none of the subfeatures overlap so the last entry in
# the reordered list also has the final stop point of the feature.
# FIXME: Getting a deprecation warning about using sub_features
# instead of feature.location being a CompoundFeatureLocation
reordered = sorted(feature.location.parts,
key=lambda location: location.start)
starts = [int(location.start) for location in reordered]
stops = [int(location.end) for location in reordered]
feature_start = starts.pop(0)
feature_stop = stops.pop(-1)
starts = [start - feature_start for start in starts]
stops = [stop - feature_start for stop in stops]
feature_gaps = list(zip(stops, starts))
else:
# Feature doesn't have gaps. Ignore subfeatures.
feature_start = int(feature.location.start)
feature_stop = int(feature.location.end)
feature_gaps = []
feature_type = _process_feature_type(feature.type)
if feature.location.strand == -1:
feature_strand = 1
else:
feature_strand = 0
if 'gene' in qualifiers:
gene = qualifiers['gene']
else:
gene = []
if 'locus_tag' in qualifiers:
locus_tag = qualifiers['locus_tag']
else:
locus_tag = []
coral_feature = coral.Feature(feature_name, feature_start,
feature_stop, feature_type,
gene=gene, locus_tag=locus_tag,
qualifiers=qualifiers,
strand=feature_strand,
gaps=feature_gaps)
return coral_feature |
def get_debug():
"""
Utility function providing ``debug()`` function.
"""
try:
import IPython
except ImportError:
debug = None
else:
old_excepthook = sys.excepthook
def debug(frame=None):
if IPython.__version__ >= '0.11':
from IPython.core.debugger import Pdb
try:
ip = get_ipython()
except NameError:
from IPython.frontend.terminal.embed \
import InteractiveShellEmbed
ip = InteractiveShellEmbed()
colors = ip.colors
else:
from IPython.Debugger import Pdb
from IPython.Shell import IPShell
from IPython import ipapi
ip = ipapi.get()
if ip is None:
IPShell(argv=[''])
ip = ipapi.get()
colors = ip.options.colors
sys.excepthook = old_excepthook
if frame is None:
frame = sys._getframe().f_back
Pdb(colors).set_trace(frame)
if debug is None:
import pdb
debug = pdb.set_trace
debug.__doc__ = """
Start debugger on line where it is called, roughly equivalent to::
import pdb; pdb.set_trace()
First, this function tries to start an `IPython`-enabled
debugger using the `IPython` API.
When this fails, the plain old `pdb` is used instead.
"""
return debug | Utility function providing ``debug()`` function. | Below is the the instruction that describes the task:
### Input:
Utility function providing ``debug()`` function.
### Response:
def get_debug():
"""
Utility function providing ``debug()`` function.
"""
try:
import IPython
except ImportError:
debug = None
else:
old_excepthook = sys.excepthook
def debug(frame=None):
if IPython.__version__ >= '0.11':
from IPython.core.debugger import Pdb
try:
ip = get_ipython()
except NameError:
from IPython.frontend.terminal.embed \
import InteractiveShellEmbed
ip = InteractiveShellEmbed()
colors = ip.colors
else:
from IPython.Debugger import Pdb
from IPython.Shell import IPShell
from IPython import ipapi
ip = ipapi.get()
if ip is None:
IPShell(argv=[''])
ip = ipapi.get()
colors = ip.options.colors
sys.excepthook = old_excepthook
if frame is None:
frame = sys._getframe().f_back
Pdb(colors).set_trace(frame)
if debug is None:
import pdb
debug = pdb.set_trace
debug.__doc__ = """
Start debugger on line where it is called, roughly equivalent to::
import pdb; pdb.set_trace()
First, this function tries to start an `IPython`-enabled
debugger using the `IPython` API.
When this fails, the plain old `pdb` is used instead.
"""
return debug |
def compact(self):
"""Remove all invalid config entries."""
saved_length = 0
to_remove = []
for i, entry in enumerate(self.entries):
if not entry.valid:
to_remove.append(i)
saved_length += entry.data_space()
for i in reversed(to_remove):
del self.entries[i]
self.data_index -= saved_length | Remove all invalid config entries. | Below is the the instruction that describes the task:
### Input:
Remove all invalid config entries.
### Response:
def compact(self):
"""Remove all invalid config entries."""
saved_length = 0
to_remove = []
for i, entry in enumerate(self.entries):
if not entry.valid:
to_remove.append(i)
saved_length += entry.data_space()
for i in reversed(to_remove):
del self.entries[i]
self.data_index -= saved_length |
def numeric_function_clean_dataframe(self, axis):
"""Preprocesses numeric functions to clean dataframe and pick numeric indices.
Args:
axis: '0' if columns and '1' if rows.
Returns:
Tuple with return value(if any), indices to apply func to & cleaned Manager.
"""
result = None
query_compiler = self
# If no numeric columns and over columns, then return empty Series
if not axis and len(self.index) == 0:
result = pandas.Series(dtype=np.int64)
nonnumeric = [
col
for col, dtype in zip(self.columns, self.dtypes)
if not is_numeric_dtype(dtype)
]
if len(nonnumeric) == len(self.columns):
# If over rows and no numeric columns, return this
if axis:
result = pandas.Series([np.nan for _ in self.index])
else:
result = pandas.Series([0 for _ in self.index])
else:
query_compiler = self.drop(columns=nonnumeric)
return result, query_compiler | Preprocesses numeric functions to clean dataframe and pick numeric indices.
Args:
axis: '0' if columns and '1' if rows.
Returns:
Tuple with return value(if any), indices to apply func to & cleaned Manager. | Below is the the instruction that describes the task:
### Input:
Preprocesses numeric functions to clean dataframe and pick numeric indices.
Args:
axis: '0' if columns and '1' if rows.
Returns:
Tuple with return value(if any), indices to apply func to & cleaned Manager.
### Response:
def numeric_function_clean_dataframe(self, axis):
"""Preprocesses numeric functions to clean dataframe and pick numeric indices.
Args:
axis: '0' if columns and '1' if rows.
Returns:
Tuple with return value(if any), indices to apply func to & cleaned Manager.
"""
result = None
query_compiler = self
# If no numeric columns and over columns, then return empty Series
if not axis and len(self.index) == 0:
result = pandas.Series(dtype=np.int64)
nonnumeric = [
col
for col, dtype in zip(self.columns, self.dtypes)
if not is_numeric_dtype(dtype)
]
if len(nonnumeric) == len(self.columns):
# If over rows and no numeric columns, return this
if axis:
result = pandas.Series([np.nan for _ in self.index])
else:
result = pandas.Series([0 for _ in self.index])
else:
query_compiler = self.drop(columns=nonnumeric)
return result, query_compiler |
def rm_op(l, name, op):
"""Remove an opcode. This is used when basing a new Python release off
of another one, and there is an opcode that is in the old release
that was removed in the new release.
We are pretty aggressive about removing traces of the op.
"""
# opname is an array, so we need to keep the position in there.
l['opname'][op] = '<%s>' % op
if op in l['hasconst']:
l['hasconst'].remove(op)
if op in l['hascompare']:
l['hascompare'].remove(op)
if op in l['hascondition']:
l['hascondition'].remove(op)
if op in l['hasfree']:
l['hasfree'].remove(op)
if op in l['hasjabs']:
l['hasjabs'].remove(op)
if op in l['hasname']:
l['hasname'].remove(op)
if op in l['hasjrel']:
l['hasjrel'].remove(op)
if op in l['haslocal']:
l['haslocal'].remove(op)
if op in l['hasname']:
l['hasname'].remove(op)
if op in l['hasnargs']:
l['hasnargs'].remove(op)
if op in l['hasvargs']:
l['hasvargs'].remove(op)
if op in l['nofollow']:
l['nofollow'].remove(op)
assert l['opmap'][name] == op
del l['opmap'][name] | Remove an opcode. This is used when basing a new Python release off
of another one, and there is an opcode that is in the old release
that was removed in the new release.
We are pretty aggressive about removing traces of the op. | Below is the the instruction that describes the task:
### Input:
Remove an opcode. This is used when basing a new Python release off
of another one, and there is an opcode that is in the old release
that was removed in the new release.
We are pretty aggressive about removing traces of the op.
### Response:
def rm_op(l, name, op):
"""Remove an opcode. This is used when basing a new Python release off
of another one, and there is an opcode that is in the old release
that was removed in the new release.
We are pretty aggressive about removing traces of the op.
"""
# opname is an array, so we need to keep the position in there.
l['opname'][op] = '<%s>' % op
if op in l['hasconst']:
l['hasconst'].remove(op)
if op in l['hascompare']:
l['hascompare'].remove(op)
if op in l['hascondition']:
l['hascondition'].remove(op)
if op in l['hasfree']:
l['hasfree'].remove(op)
if op in l['hasjabs']:
l['hasjabs'].remove(op)
if op in l['hasname']:
l['hasname'].remove(op)
if op in l['hasjrel']:
l['hasjrel'].remove(op)
if op in l['haslocal']:
l['haslocal'].remove(op)
if op in l['hasname']:
l['hasname'].remove(op)
if op in l['hasnargs']:
l['hasnargs'].remove(op)
if op in l['hasvargs']:
l['hasvargs'].remove(op)
if op in l['nofollow']:
l['nofollow'].remove(op)
assert l['opmap'][name] == op
del l['opmap'][name] |
def srbt(peer, pkts, inter=0.1, *args, **kargs):
"""send and receive using a bluetooth socket"""
s = conf.BTsocket(peer=peer)
a,b = sndrcv(s,pkts,inter=inter,*args,**kargs)
s.close()
return a,b | send and receive using a bluetooth socket | Below is the the instruction that describes the task:
### Input:
send and receive using a bluetooth socket
### Response:
def srbt(peer, pkts, inter=0.1, *args, **kargs):
"""send and receive using a bluetooth socket"""
s = conf.BTsocket(peer=peer)
a,b = sndrcv(s,pkts,inter=inter,*args,**kargs)
s.close()
return a,b |
def pyoidcMiddleware(func):
"""Common wrapper for the underlying pyoidc library functions.
Reads GET params and POST data before passing it on the library and
converts the response from oic.utils.http_util to wsgi.
:param func: underlying library function
"""
def wrapper(environ, start_response):
data = get_or_post(environ)
cookies = environ.get("HTTP_COOKIE", "")
resp = func(request=data, cookie=cookies)
return resp(environ, start_response)
return wrapper | Common wrapper for the underlying pyoidc library functions.
Reads GET params and POST data before passing it on the library and
converts the response from oic.utils.http_util to wsgi.
:param func: underlying library function | Below is the the instruction that describes the task:
### Input:
Common wrapper for the underlying pyoidc library functions.
Reads GET params and POST data before passing it on the library and
converts the response from oic.utils.http_util to wsgi.
:param func: underlying library function
### Response:
def pyoidcMiddleware(func):
"""Common wrapper for the underlying pyoidc library functions.
Reads GET params and POST data before passing it on the library and
converts the response from oic.utils.http_util to wsgi.
:param func: underlying library function
"""
def wrapper(environ, start_response):
data = get_or_post(environ)
cookies = environ.get("HTTP_COOKIE", "")
resp = func(request=data, cookie=cookies)
return resp(environ, start_response)
return wrapper |
def get_indentation(line):
"""Return leading whitespace."""
if line.strip():
non_whitespace_index = len(line) - len(line.lstrip())
return line[:non_whitespace_index]
else:
return '' | Return leading whitespace. | Below is the the instruction that describes the task:
### Input:
Return leading whitespace.
### Response:
def get_indentation(line):
"""Return leading whitespace."""
if line.strip():
non_whitespace_index = len(line) - len(line.lstrip())
return line[:non_whitespace_index]
else:
return '' |
def initialize_registry(args: argparse.Namespace, backend: StorageBackend, log: logging.Logger):
"""
Initialize the registry and the index.
:param args: :class:`argparse.Namespace` with "backend", "args", "force" and "log_level".
:param backend: Backend which is responsible for working with model files.
:param log: Logger supplied by supply_backend
:return: None
"""
try:
backend.reset(args.force)
except ExistingBackendError:
return 1
log.info("Resetting the index ...")
backend.index.reset()
try:
backend.index.upload("reset", {})
except ValueError:
return 1
log.info("Successfully initialized") | Initialize the registry and the index.
:param args: :class:`argparse.Namespace` with "backend", "args", "force" and "log_level".
:param backend: Backend which is responsible for working with model files.
:param log: Logger supplied by supply_backend
:return: None | Below is the the instruction that describes the task:
### Input:
Initialize the registry and the index.
:param args: :class:`argparse.Namespace` with "backend", "args", "force" and "log_level".
:param backend: Backend which is responsible for working with model files.
:param log: Logger supplied by supply_backend
:return: None
### Response:
def initialize_registry(args: argparse.Namespace, backend: StorageBackend, log: logging.Logger):
"""
Initialize the registry and the index.
:param args: :class:`argparse.Namespace` with "backend", "args", "force" and "log_level".
:param backend: Backend which is responsible for working with model files.
:param log: Logger supplied by supply_backend
:return: None
"""
try:
backend.reset(args.force)
except ExistingBackendError:
return 1
log.info("Resetting the index ...")
backend.index.reset()
try:
backend.index.upload("reset", {})
except ValueError:
return 1
log.info("Successfully initialized") |
def forward_kinematics(self, joints, full_kinematics=False):
"""Returns the transformation matrix of the forward kinematics
Parameters
----------
joints: list
The list of the positions of each joint. Note : Inactive joints must be in the list.
full_kinematics: bool
Return the transformation matrices of each joint
Returns
-------
frame_matrix:
The transformation matrix
"""
frame_matrix = np.eye(4)
if full_kinematics:
frame_matrixes = []
if len(self.links) != len(joints):
raise ValueError("Your joints vector length is {} but you have {} links".format(len(joints), len(self.links)))
for index, (link, joint_angle) in enumerate(zip(self.links, joints)):
# Compute iteratively the position
# NB : Use asarray to avoid old sympy problems
frame_matrix = np.dot(frame_matrix, np.asarray(link.get_transformation_matrix(joint_angle)))
if full_kinematics:
# rotation_axe = np.dot(frame_matrix, link.rotation)
frame_matrixes.append(frame_matrix)
# Return the matrix, or matrixes
if full_kinematics:
return frame_matrixes
else:
return frame_matrix | Returns the transformation matrix of the forward kinematics
Parameters
----------
joints: list
The list of the positions of each joint. Note : Inactive joints must be in the list.
full_kinematics: bool
Return the transformation matrices of each joint
Returns
-------
frame_matrix:
The transformation matrix | Below is the the instruction that describes the task:
### Input:
Returns the transformation matrix of the forward kinematics
Parameters
----------
joints: list
The list of the positions of each joint. Note : Inactive joints must be in the list.
full_kinematics: bool
Return the transformation matrices of each joint
Returns
-------
frame_matrix:
The transformation matrix
### Response:
def forward_kinematics(self, joints, full_kinematics=False):
"""Returns the transformation matrix of the forward kinematics
Parameters
----------
joints: list
The list of the positions of each joint. Note : Inactive joints must be in the list.
full_kinematics: bool
Return the transformation matrices of each joint
Returns
-------
frame_matrix:
The transformation matrix
"""
frame_matrix = np.eye(4)
if full_kinematics:
frame_matrixes = []
if len(self.links) != len(joints):
raise ValueError("Your joints vector length is {} but you have {} links".format(len(joints), len(self.links)))
for index, (link, joint_angle) in enumerate(zip(self.links, joints)):
# Compute iteratively the position
# NB : Use asarray to avoid old sympy problems
frame_matrix = np.dot(frame_matrix, np.asarray(link.get_transformation_matrix(joint_angle)))
if full_kinematics:
# rotation_axe = np.dot(frame_matrix, link.rotation)
frame_matrixes.append(frame_matrix)
# Return the matrix, or matrixes
if full_kinematics:
return frame_matrixes
else:
return frame_matrix |
def _create_table(self, packet_defn):
''' Creates a database table for the given PacketDefinition
Arguments
packet_defn
The :class:`ait.core.tlm.PacketDefinition` instance for which a table entry
should be made.
'''
cols = ('%s %s' % (defn.name, self._getTypename(defn)) for defn in packet_defn.fields)
sql = 'CREATE TABLE IF NOT EXISTS %s (%s)' % (packet_defn.name, ', '.join(cols))
self._conn.execute(sql)
self._conn.commit() | Creates a database table for the given PacketDefinition
Arguments
packet_defn
The :class:`ait.core.tlm.PacketDefinition` instance for which a table entry
should be made. | Below is the the instruction that describes the task:
### Input:
Creates a database table for the given PacketDefinition
Arguments
packet_defn
The :class:`ait.core.tlm.PacketDefinition` instance for which a table entry
should be made.
### Response:
def _create_table(self, packet_defn):
''' Creates a database table for the given PacketDefinition
Arguments
packet_defn
The :class:`ait.core.tlm.PacketDefinition` instance for which a table entry
should be made.
'''
cols = ('%s %s' % (defn.name, self._getTypename(defn)) for defn in packet_defn.fields)
sql = 'CREATE TABLE IF NOT EXISTS %s (%s)' % (packet_defn.name, ', '.join(cols))
self._conn.execute(sql)
self._conn.commit() |
def stop(self, fileStore):
"""
Stop spark and hdfs worker containers
:param job: The underlying job.
"""
subprocess.call(["docker", "exec", self.sparkContainerID, "rm", "-r", "/ephemeral/spark"])
subprocess.call(["docker", "stop", self.sparkContainerID])
subprocess.call(["docker", "rm", self.sparkContainerID])
_log.info("Stopped Spark worker.")
subprocess.call(["docker", "exec", self.hdfsContainerID, "rm", "-r", "/ephemeral/hdfs"])
subprocess.call(["docker", "stop", self.hdfsContainerID])
subprocess.call(["docker", "rm", self.hdfsContainerID])
_log.info("Stopped HDFS datanode.")
return | Stop spark and hdfs worker containers
:param job: The underlying job. | Below is the the instruction that describes the task:
### Input:
Stop spark and hdfs worker containers
:param job: The underlying job.
### Response:
def stop(self, fileStore):
"""
Stop spark and hdfs worker containers
:param job: The underlying job.
"""
subprocess.call(["docker", "exec", self.sparkContainerID, "rm", "-r", "/ephemeral/spark"])
subprocess.call(["docker", "stop", self.sparkContainerID])
subprocess.call(["docker", "rm", self.sparkContainerID])
_log.info("Stopped Spark worker.")
subprocess.call(["docker", "exec", self.hdfsContainerID, "rm", "-r", "/ephemeral/hdfs"])
subprocess.call(["docker", "stop", self.hdfsContainerID])
subprocess.call(["docker", "rm", self.hdfsContainerID])
_log.info("Stopped HDFS datanode.")
return |
def create_fleet(Name=None, ImageName=None, InstanceType=None, ComputeCapacity=None, VpcConfig=None, MaxUserDurationInSeconds=None, DisconnectTimeoutInSeconds=None, Description=None, DisplayName=None, EnableDefaultInternetAccess=None):
"""
Creates a new fleet.
See also: AWS API Documentation
:example: response = client.create_fleet(
Name='string',
ImageName='string',
InstanceType='string',
ComputeCapacity={
'DesiredInstances': 123
},
VpcConfig={
'SubnetIds': [
'string',
]
},
MaxUserDurationInSeconds=123,
DisconnectTimeoutInSeconds=123,
Description='string',
DisplayName='string',
EnableDefaultInternetAccess=True|False
)
:type Name: string
:param Name: [REQUIRED]
A unique identifier for the fleet.
:type ImageName: string
:param ImageName: [REQUIRED]
Unique name of the image used by the fleet.
:type InstanceType: string
:param InstanceType: [REQUIRED]
The instance type of compute resources for the fleet. Fleet instances are launched from this instance type.
:type ComputeCapacity: dict
:param ComputeCapacity: [REQUIRED]
The parameters for the capacity allocated to the fleet.
DesiredInstances (integer) -- [REQUIRED]The desired number of streaming instances.
:type VpcConfig: dict
:param VpcConfig: The VPC configuration for the fleet.
SubnetIds (list) --The list of subnets to which a network interface is established from the fleet instance.
(string) --
:type MaxUserDurationInSeconds: integer
:param MaxUserDurationInSeconds: The maximum time for which a streaming session can run. The input can be any numeric value in seconds between 600 and 57600.
:type DisconnectTimeoutInSeconds: integer
:param DisconnectTimeoutInSeconds: The time after disconnection when a session is considered to have ended. If a user who got disconnected reconnects within this timeout interval, the user is connected back to their previous session. The input can be any numeric value in seconds between 60 and 57600.
:type Description: string
:param Description: The description of the fleet.
:type DisplayName: string
:param DisplayName: The display name of the fleet.
:type EnableDefaultInternetAccess: boolean
:param EnableDefaultInternetAccess: Enables or disables default Internet access for the fleet.
:rtype: dict
:return: {
'Fleet': {
'Arn': 'string',
'Name': 'string',
'DisplayName': 'string',
'Description': 'string',
'ImageName': 'string',
'InstanceType': 'string',
'ComputeCapacityStatus': {
'Desired': 123,
'Running': 123,
'InUse': 123,
'Available': 123
},
'MaxUserDurationInSeconds': 123,
'DisconnectTimeoutInSeconds': 123,
'State': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED',
'VpcConfig': {
'SubnetIds': [
'string',
]
},
'CreatedTime': datetime(2015, 1, 1),
'FleetErrors': [
{
'ErrorCode': 'IAM_SERVICE_ROLE_MISSING_ENI_DESCRIBE_ACTION'|'IAM_SERVICE_ROLE_MISSING_ENI_CREATE_ACTION'|'IAM_SERVICE_ROLE_MISSING_ENI_DELETE_ACTION'|'NETWORK_INTERFACE_LIMIT_EXCEEDED'|'INTERNAL_SERVICE_ERROR'|'IAM_SERVICE_ROLE_IS_MISSING'|'SUBNET_HAS_INSUFFICIENT_IP_ADDRESSES'|'IAM_SERVICE_ROLE_MISSING_DESCRIBE_SUBNET_ACTION'|'SUBNET_NOT_FOUND'|'IMAGE_NOT_FOUND'|'INVALID_SUBNET_CONFIGURATION',
'ErrorMessage': 'string'
},
],
'EnableDefaultInternetAccess': True|False
}
}
:returns:
(string) --
"""
pass | Creates a new fleet.
See also: AWS API Documentation
:example: response = client.create_fleet(
Name='string',
ImageName='string',
InstanceType='string',
ComputeCapacity={
'DesiredInstances': 123
},
VpcConfig={
'SubnetIds': [
'string',
]
},
MaxUserDurationInSeconds=123,
DisconnectTimeoutInSeconds=123,
Description='string',
DisplayName='string',
EnableDefaultInternetAccess=True|False
)
:type Name: string
:param Name: [REQUIRED]
A unique identifier for the fleet.
:type ImageName: string
:param ImageName: [REQUIRED]
Unique name of the image used by the fleet.
:type InstanceType: string
:param InstanceType: [REQUIRED]
The instance type of compute resources for the fleet. Fleet instances are launched from this instance type.
:type ComputeCapacity: dict
:param ComputeCapacity: [REQUIRED]
The parameters for the capacity allocated to the fleet.
DesiredInstances (integer) -- [REQUIRED]The desired number of streaming instances.
:type VpcConfig: dict
:param VpcConfig: The VPC configuration for the fleet.
SubnetIds (list) --The list of subnets to which a network interface is established from the fleet instance.
(string) --
:type MaxUserDurationInSeconds: integer
:param MaxUserDurationInSeconds: The maximum time for which a streaming session can run. The input can be any numeric value in seconds between 600 and 57600.
:type DisconnectTimeoutInSeconds: integer
:param DisconnectTimeoutInSeconds: The time after disconnection when a session is considered to have ended. If a user who got disconnected reconnects within this timeout interval, the user is connected back to their previous session. The input can be any numeric value in seconds between 60 and 57600.
:type Description: string
:param Description: The description of the fleet.
:type DisplayName: string
:param DisplayName: The display name of the fleet.
:type EnableDefaultInternetAccess: boolean
:param EnableDefaultInternetAccess: Enables or disables default Internet access for the fleet.
:rtype: dict
:return: {
'Fleet': {
'Arn': 'string',
'Name': 'string',
'DisplayName': 'string',
'Description': 'string',
'ImageName': 'string',
'InstanceType': 'string',
'ComputeCapacityStatus': {
'Desired': 123,
'Running': 123,
'InUse': 123,
'Available': 123
},
'MaxUserDurationInSeconds': 123,
'DisconnectTimeoutInSeconds': 123,
'State': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED',
'VpcConfig': {
'SubnetIds': [
'string',
]
},
'CreatedTime': datetime(2015, 1, 1),
'FleetErrors': [
{
'ErrorCode': 'IAM_SERVICE_ROLE_MISSING_ENI_DESCRIBE_ACTION'|'IAM_SERVICE_ROLE_MISSING_ENI_CREATE_ACTION'|'IAM_SERVICE_ROLE_MISSING_ENI_DELETE_ACTION'|'NETWORK_INTERFACE_LIMIT_EXCEEDED'|'INTERNAL_SERVICE_ERROR'|'IAM_SERVICE_ROLE_IS_MISSING'|'SUBNET_HAS_INSUFFICIENT_IP_ADDRESSES'|'IAM_SERVICE_ROLE_MISSING_DESCRIBE_SUBNET_ACTION'|'SUBNET_NOT_FOUND'|'IMAGE_NOT_FOUND'|'INVALID_SUBNET_CONFIGURATION',
'ErrorMessage': 'string'
},
],
'EnableDefaultInternetAccess': True|False
}
}
:returns:
(string) -- | Below is the the instruction that describes the task:
### Input:
Creates a new fleet.
See also: AWS API Documentation
:example: response = client.create_fleet(
Name='string',
ImageName='string',
InstanceType='string',
ComputeCapacity={
'DesiredInstances': 123
},
VpcConfig={
'SubnetIds': [
'string',
]
},
MaxUserDurationInSeconds=123,
DisconnectTimeoutInSeconds=123,
Description='string',
DisplayName='string',
EnableDefaultInternetAccess=True|False
)
:type Name: string
:param Name: [REQUIRED]
A unique identifier for the fleet.
:type ImageName: string
:param ImageName: [REQUIRED]
Unique name of the image used by the fleet.
:type InstanceType: string
:param InstanceType: [REQUIRED]
The instance type of compute resources for the fleet. Fleet instances are launched from this instance type.
:type ComputeCapacity: dict
:param ComputeCapacity: [REQUIRED]
The parameters for the capacity allocated to the fleet.
DesiredInstances (integer) -- [REQUIRED]The desired number of streaming instances.
:type VpcConfig: dict
:param VpcConfig: The VPC configuration for the fleet.
SubnetIds (list) --The list of subnets to which a network interface is established from the fleet instance.
(string) --
:type MaxUserDurationInSeconds: integer
:param MaxUserDurationInSeconds: The maximum time for which a streaming session can run. The input can be any numeric value in seconds between 600 and 57600.
:type DisconnectTimeoutInSeconds: integer
:param DisconnectTimeoutInSeconds: The time after disconnection when a session is considered to have ended. If a user who got disconnected reconnects within this timeout interval, the user is connected back to their previous session. The input can be any numeric value in seconds between 60 and 57600.
:type Description: string
:param Description: The description of the fleet.
:type DisplayName: string
:param DisplayName: The display name of the fleet.
:type EnableDefaultInternetAccess: boolean
:param EnableDefaultInternetAccess: Enables or disables default Internet access for the fleet.
:rtype: dict
:return: {
'Fleet': {
'Arn': 'string',
'Name': 'string',
'DisplayName': 'string',
'Description': 'string',
'ImageName': 'string',
'InstanceType': 'string',
'ComputeCapacityStatus': {
'Desired': 123,
'Running': 123,
'InUse': 123,
'Available': 123
},
'MaxUserDurationInSeconds': 123,
'DisconnectTimeoutInSeconds': 123,
'State': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED',
'VpcConfig': {
'SubnetIds': [
'string',
]
},
'CreatedTime': datetime(2015, 1, 1),
'FleetErrors': [
{
'ErrorCode': 'IAM_SERVICE_ROLE_MISSING_ENI_DESCRIBE_ACTION'|'IAM_SERVICE_ROLE_MISSING_ENI_CREATE_ACTION'|'IAM_SERVICE_ROLE_MISSING_ENI_DELETE_ACTION'|'NETWORK_INTERFACE_LIMIT_EXCEEDED'|'INTERNAL_SERVICE_ERROR'|'IAM_SERVICE_ROLE_IS_MISSING'|'SUBNET_HAS_INSUFFICIENT_IP_ADDRESSES'|'IAM_SERVICE_ROLE_MISSING_DESCRIBE_SUBNET_ACTION'|'SUBNET_NOT_FOUND'|'IMAGE_NOT_FOUND'|'INVALID_SUBNET_CONFIGURATION',
'ErrorMessage': 'string'
},
],
'EnableDefaultInternetAccess': True|False
}
}
:returns:
(string) --
### Response:
def create_fleet(Name=None, ImageName=None, InstanceType=None, ComputeCapacity=None, VpcConfig=None, MaxUserDurationInSeconds=None, DisconnectTimeoutInSeconds=None, Description=None, DisplayName=None, EnableDefaultInternetAccess=None):
"""
Creates a new fleet.
See also: AWS API Documentation
:example: response = client.create_fleet(
Name='string',
ImageName='string',
InstanceType='string',
ComputeCapacity={
'DesiredInstances': 123
},
VpcConfig={
'SubnetIds': [
'string',
]
},
MaxUserDurationInSeconds=123,
DisconnectTimeoutInSeconds=123,
Description='string',
DisplayName='string',
EnableDefaultInternetAccess=True|False
)
:type Name: string
:param Name: [REQUIRED]
A unique identifier for the fleet.
:type ImageName: string
:param ImageName: [REQUIRED]
Unique name of the image used by the fleet.
:type InstanceType: string
:param InstanceType: [REQUIRED]
The instance type of compute resources for the fleet. Fleet instances are launched from this instance type.
:type ComputeCapacity: dict
:param ComputeCapacity: [REQUIRED]
The parameters for the capacity allocated to the fleet.
DesiredInstances (integer) -- [REQUIRED]The desired number of streaming instances.
:type VpcConfig: dict
:param VpcConfig: The VPC configuration for the fleet.
SubnetIds (list) --The list of subnets to which a network interface is established from the fleet instance.
(string) --
:type MaxUserDurationInSeconds: integer
:param MaxUserDurationInSeconds: The maximum time for which a streaming session can run. The input can be any numeric value in seconds between 600 and 57600.
:type DisconnectTimeoutInSeconds: integer
:param DisconnectTimeoutInSeconds: The time after disconnection when a session is considered to have ended. If a user who got disconnected reconnects within this timeout interval, the user is connected back to their previous session. The input can be any numeric value in seconds between 60 and 57600.
:type Description: string
:param Description: The description of the fleet.
:type DisplayName: string
:param DisplayName: The display name of the fleet.
:type EnableDefaultInternetAccess: boolean
:param EnableDefaultInternetAccess: Enables or disables default Internet access for the fleet.
:rtype: dict
:return: {
'Fleet': {
'Arn': 'string',
'Name': 'string',
'DisplayName': 'string',
'Description': 'string',
'ImageName': 'string',
'InstanceType': 'string',
'ComputeCapacityStatus': {
'Desired': 123,
'Running': 123,
'InUse': 123,
'Available': 123
},
'MaxUserDurationInSeconds': 123,
'DisconnectTimeoutInSeconds': 123,
'State': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED',
'VpcConfig': {
'SubnetIds': [
'string',
]
},
'CreatedTime': datetime(2015, 1, 1),
'FleetErrors': [
{
'ErrorCode': 'IAM_SERVICE_ROLE_MISSING_ENI_DESCRIBE_ACTION'|'IAM_SERVICE_ROLE_MISSING_ENI_CREATE_ACTION'|'IAM_SERVICE_ROLE_MISSING_ENI_DELETE_ACTION'|'NETWORK_INTERFACE_LIMIT_EXCEEDED'|'INTERNAL_SERVICE_ERROR'|'IAM_SERVICE_ROLE_IS_MISSING'|'SUBNET_HAS_INSUFFICIENT_IP_ADDRESSES'|'IAM_SERVICE_ROLE_MISSING_DESCRIBE_SUBNET_ACTION'|'SUBNET_NOT_FOUND'|'IMAGE_NOT_FOUND'|'INVALID_SUBNET_CONFIGURATION',
'ErrorMessage': 'string'
},
],
'EnableDefaultInternetAccess': True|False
}
}
:returns:
(string) --
"""
pass |
def check_tool_aux(command):
""" Checks if 'command' can be found either in path
or is a full name to an existing file.
"""
assert isinstance(command, basestring)
dirname = os.path.dirname(command)
if dirname:
if os.path.exists(command):
return command
# Both NT and Cygwin will run .exe files by their unqualified names.
elif on_windows() and os.path.exists(command + '.exe'):
return command
# Only NT will run .bat files by their unqualified names.
elif os_name() == 'NT' and os.path.exists(command + '.bat'):
return command
else:
paths = path.programs_path()
if path.glob(paths, [command]):
return command | Checks if 'command' can be found either in path
or is a full name to an existing file. | Below is the the instruction that describes the task:
### Input:
Checks if 'command' can be found either in path
or is a full name to an existing file.
### Response:
def check_tool_aux(command):
""" Checks if 'command' can be found either in path
or is a full name to an existing file.
"""
assert isinstance(command, basestring)
dirname = os.path.dirname(command)
if dirname:
if os.path.exists(command):
return command
# Both NT and Cygwin will run .exe files by their unqualified names.
elif on_windows() and os.path.exists(command + '.exe'):
return command
# Only NT will run .bat files by their unqualified names.
elif os_name() == 'NT' and os.path.exists(command + '.bat'):
return command
else:
paths = path.programs_path()
if path.glob(paths, [command]):
return command |
def _check_exception(self):
"""if there's a saved exception, raise & clear it"""
if self._saved_exception is not None:
x = self._saved_exception
self._saved_exception = None
raise x | if there's a saved exception, raise & clear it | Below is the the instruction that describes the task:
### Input:
if there's a saved exception, raise & clear it
### Response:
def _check_exception(self):
"""if there's a saved exception, raise & clear it"""
if self._saved_exception is not None:
x = self._saved_exception
self._saved_exception = None
raise x |
def _srels_for(phys_reader, source_uri):
"""
Return |_SerializedRelationshipCollection| instance populated with
relationships for source identified by *source_uri*.
"""
rels_xml = phys_reader.rels_xml_for(source_uri)
return _SerializedRelationshipCollection.load_from_xml(
source_uri.baseURI, rels_xml) | Return |_SerializedRelationshipCollection| instance populated with
relationships for source identified by *source_uri*. | Below is the the instruction that describes the task:
### Input:
Return |_SerializedRelationshipCollection| instance populated with
relationships for source identified by *source_uri*.
### Response:
def _srels_for(phys_reader, source_uri):
"""
Return |_SerializedRelationshipCollection| instance populated with
relationships for source identified by *source_uri*.
"""
rels_xml = phys_reader.rels_xml_for(source_uri)
return _SerializedRelationshipCollection.load_from_xml(
source_uri.baseURI, rels_xml) |
def _TypecheckDecorator(subject=None, **kwargs):
"""Dispatches type checks based on what the subject is.
Functions or methods are annotated directly. If this method is called
with keyword arguments only, return a decorator.
"""
if subject is None:
return _TypecheckDecoratorFactory(kwargs)
elif inspect.isfunction(subject) or inspect.ismethod(subject):
return _TypecheckFunction(subject, {}, 2, None)
else:
raise TypeError() | Dispatches type checks based on what the subject is.
Functions or methods are annotated directly. If this method is called
with keyword arguments only, return a decorator. | Below is the the instruction that describes the task:
### Input:
Dispatches type checks based on what the subject is.
Functions or methods are annotated directly. If this method is called
with keyword arguments only, return a decorator.
### Response:
def _TypecheckDecorator(subject=None, **kwargs):
"""Dispatches type checks based on what the subject is.
Functions or methods are annotated directly. If this method is called
with keyword arguments only, return a decorator.
"""
if subject is None:
return _TypecheckDecoratorFactory(kwargs)
elif inspect.isfunction(subject) or inspect.ismethod(subject):
return _TypecheckFunction(subject, {}, 2, None)
else:
raise TypeError() |
def get_variant_by_name(self, name):
"""Get the genotypes for a given variant (by name).
Args:
name (str): The name of the variant to retrieve the genotypes.
Returns:
list: A list of Genotypes. This is a list in order to keep the same
behaviour as the other functions.
"""
try:
geno = self.df.loc[:, name].values
info = self.map_info.loc[name, :]
except KeyError:
# The variant is not in the data, so we return an empty
# list
logging.variant_name_not_found(name)
return []
else:
return [Genotypes(
Variant(info.name, info.chrom, info.pos, [info.a1, info.a2]),
geno,
reference=info.a2,
coded=info.a1,
multiallelic=False,
)] | Get the genotypes for a given variant (by name).
Args:
name (str): The name of the variant to retrieve the genotypes.
Returns:
list: A list of Genotypes. This is a list in order to keep the same
behaviour as the other functions. | Below is the the instruction that describes the task:
### Input:
Get the genotypes for a given variant (by name).
Args:
name (str): The name of the variant to retrieve the genotypes.
Returns:
list: A list of Genotypes. This is a list in order to keep the same
behaviour as the other functions.
### Response:
def get_variant_by_name(self, name):
"""Get the genotypes for a given variant (by name).
Args:
name (str): The name of the variant to retrieve the genotypes.
Returns:
list: A list of Genotypes. This is a list in order to keep the same
behaviour as the other functions.
"""
try:
geno = self.df.loc[:, name].values
info = self.map_info.loc[name, :]
except KeyError:
# The variant is not in the data, so we return an empty
# list
logging.variant_name_not_found(name)
return []
else:
return [Genotypes(
Variant(info.name, info.chrom, info.pos, [info.a1, info.a2]),
geno,
reference=info.a2,
coded=info.a1,
multiallelic=False,
)] |
def group_data():
""" Load the reference data, and assign each object
a random integer from 0 to 7. Save the IDs. """
tr_obj = np.load("%s/ref_id.npz" %direc_ref)['arr_0']
groups = np.random.randint(0, 8, size=len(tr_obj))
np.savez("ref_groups.npz", groups) | Load the reference data, and assign each object
a random integer from 0 to 7. Save the IDs. | Below is the the instruction that describes the task:
### Input:
Load the reference data, and assign each object
a random integer from 0 to 7. Save the IDs.
### Response:
def group_data():
""" Load the reference data, and assign each object
a random integer from 0 to 7. Save the IDs. """
tr_obj = np.load("%s/ref_id.npz" %direc_ref)['arr_0']
groups = np.random.randint(0, 8, size=len(tr_obj))
np.savez("ref_groups.npz", groups) |
def _generate_trials(self, experiment_spec, output_path=""):
"""Generates trials with configurations from `_suggest`.
Creates a trial_id that is passed into `_suggest`.
Yields:
Trial objects constructed according to `spec`
"""
if "run" not in experiment_spec:
raise TuneError("Must specify `run` in {}".format(experiment_spec))
for _ in range(experiment_spec.get("num_samples", 1)):
trial_id = Trial.generate_id()
while True:
suggested_config = self._suggest(trial_id)
if suggested_config is None:
yield None
else:
break
spec = copy.deepcopy(experiment_spec)
spec["config"] = merge_dicts(spec["config"], suggested_config)
flattened_config = resolve_nested_dict(spec["config"])
self._counter += 1
tag = "{0}_{1}".format(
str(self._counter), format_vars(flattened_config))
yield create_trial_from_spec(
spec,
output_path,
self._parser,
experiment_tag=tag,
trial_id=trial_id) | Generates trials with configurations from `_suggest`.
Creates a trial_id that is passed into `_suggest`.
Yields:
Trial objects constructed according to `spec` | Below is the the instruction that describes the task:
### Input:
Generates trials with configurations from `_suggest`.
Creates a trial_id that is passed into `_suggest`.
Yields:
Trial objects constructed according to `spec`
### Response:
def _generate_trials(self, experiment_spec, output_path=""):
"""Generates trials with configurations from `_suggest`.
Creates a trial_id that is passed into `_suggest`.
Yields:
Trial objects constructed according to `spec`
"""
if "run" not in experiment_spec:
raise TuneError("Must specify `run` in {}".format(experiment_spec))
for _ in range(experiment_spec.get("num_samples", 1)):
trial_id = Trial.generate_id()
while True:
suggested_config = self._suggest(trial_id)
if suggested_config is None:
yield None
else:
break
spec = copy.deepcopy(experiment_spec)
spec["config"] = merge_dicts(spec["config"], suggested_config)
flattened_config = resolve_nested_dict(spec["config"])
self._counter += 1
tag = "{0}_{1}".format(
str(self._counter), format_vars(flattened_config))
yield create_trial_from_spec(
spec,
output_path,
self._parser,
experiment_tag=tag,
trial_id=trial_id) |
def getFixedStars(self):
""" Returns a list with all fixed stars. """
IDs = const.LIST_FIXED_STARS
return ephem.getFixedStarList(IDs, self.date) | Returns a list with all fixed stars. | Below is the the instruction that describes the task:
### Input:
Returns a list with all fixed stars.
### Response:
def getFixedStars(self):
""" Returns a list with all fixed stars. """
IDs = const.LIST_FIXED_STARS
return ephem.getFixedStarList(IDs, self.date) |
def decompile(
bytecode_version, co, out=None, showasm=None, showast=False,
timestamp=None, showgrammar=False, code_objects={},
source_size=None, is_pypy=None, magic_int=None,
mapstream=None, do_fragments=False):
"""
ingests and deparses a given code block 'co'
if `bytecode_version` is None, use the current Python intepreter
version.
Caller is responsible for closing `out` and `mapstream`
"""
if bytecode_version is None:
bytecode_version = sysinfo2float()
# store final output stream for case of error
real_out = out or sys.stdout
def write(s):
s += '\n'
real_out.write(s)
assert iscode(co)
co_pypy_str = 'PyPy ' if is_pypy else ''
run_pypy_str = 'PyPy ' if IS_PYPY else ''
sys_version_lines = sys.version.split('\n')
write('# uncompyle6 version %s\n'
'# %sPython bytecode %s%s\n# Decompiled from: %sPython %s' %
(VERSION, co_pypy_str, bytecode_version,
" (%s)" % str(magic_int) if magic_int else "",
run_pypy_str, '\n# '.join(sys_version_lines)))
if co.co_filename:
write('# Embedded file name: %s' % co.co_filename,)
if timestamp:
write('# Compiled at: %s' % datetime.datetime.fromtimestamp(timestamp))
if source_size:
write('# Size of source mod 2**32: %d bytes' % source_size)
debug_opts = {
'asm': showasm,
'ast': showast,
'grammar': showgrammar
}
try:
if mapstream:
if isinstance(mapstream, str):
mapstream = _get_outstream(mapstream)
deparsed = deparse_code_with_map(bytecode_version, co, out, showasm, showast,
showgrammar,
code_objects = code_objects,
is_pypy = is_pypy,
)
header_count = 3+len(sys_version_lines)
linemap = [(line_no, deparsed.source_linemap[line_no]+header_count)
for line_no in
sorted(deparsed.source_linemap.keys())]
mapstream.write("\n\n# %s\n" % linemap)
else:
if do_fragments:
deparse_fn = code_deparse_fragments
else:
deparse_fn = code_deparse
deparsed = deparse_fn(co, out, bytecode_version,
debug_opts = debug_opts,
is_pypy=is_pypy)
pass
return deparsed
except pysource.SourceWalkerError as e:
# deparsing failed
raise pysource.SourceWalkerError(str(e)) | ingests and deparses a given code block 'co'
if `bytecode_version` is None, use the current Python intepreter
version.
Caller is responsible for closing `out` and `mapstream` | Below is the the instruction that describes the task:
### Input:
ingests and deparses a given code block 'co'
if `bytecode_version` is None, use the current Python intepreter
version.
Caller is responsible for closing `out` and `mapstream`
### Response:
def decompile(
bytecode_version, co, out=None, showasm=None, showast=False,
timestamp=None, showgrammar=False, code_objects={},
source_size=None, is_pypy=None, magic_int=None,
mapstream=None, do_fragments=False):
"""
ingests and deparses a given code block 'co'
if `bytecode_version` is None, use the current Python intepreter
version.
Caller is responsible for closing `out` and `mapstream`
"""
if bytecode_version is None:
bytecode_version = sysinfo2float()
# store final output stream for case of error
real_out = out or sys.stdout
def write(s):
s += '\n'
real_out.write(s)
assert iscode(co)
co_pypy_str = 'PyPy ' if is_pypy else ''
run_pypy_str = 'PyPy ' if IS_PYPY else ''
sys_version_lines = sys.version.split('\n')
write('# uncompyle6 version %s\n'
'# %sPython bytecode %s%s\n# Decompiled from: %sPython %s' %
(VERSION, co_pypy_str, bytecode_version,
" (%s)" % str(magic_int) if magic_int else "",
run_pypy_str, '\n# '.join(sys_version_lines)))
if co.co_filename:
write('# Embedded file name: %s' % co.co_filename,)
if timestamp:
write('# Compiled at: %s' % datetime.datetime.fromtimestamp(timestamp))
if source_size:
write('# Size of source mod 2**32: %d bytes' % source_size)
debug_opts = {
'asm': showasm,
'ast': showast,
'grammar': showgrammar
}
try:
if mapstream:
if isinstance(mapstream, str):
mapstream = _get_outstream(mapstream)
deparsed = deparse_code_with_map(bytecode_version, co, out, showasm, showast,
showgrammar,
code_objects = code_objects,
is_pypy = is_pypy,
)
header_count = 3+len(sys_version_lines)
linemap = [(line_no, deparsed.source_linemap[line_no]+header_count)
for line_no in
sorted(deparsed.source_linemap.keys())]
mapstream.write("\n\n# %s\n" % linemap)
else:
if do_fragments:
deparse_fn = code_deparse_fragments
else:
deparse_fn = code_deparse
deparsed = deparse_fn(co, out, bytecode_version,
debug_opts = debug_opts,
is_pypy=is_pypy)
pass
return deparsed
except pysource.SourceWalkerError as e:
# deparsing failed
raise pysource.SourceWalkerError(str(e)) |
def map_to_resource(self, data_element, resource=None):
"""
Maps the given data element to a new resource or updates the given
resource.
:raises ValueError: If :param:`data_element` does not provide
:class:`everest.representers.interfaces.IDataElement`.
"""
if not IDataElement.providedBy(data_element): # pylint:disable=E1101
raise ValueError('Expected data element, got %s.' % data_element)
if resource is None:
coll = \
create_staging_collection(data_element.mapping.mapped_class)
agg = coll.get_aggregate()
agg.add(data_element)
if IMemberDataElement.providedBy(data_element): # pylint: disable=E1101
ent = next(iter(agg))
resource = \
data_element.mapping.mapped_class.create_from_entity(ent)
else:
resource = coll
else:
resource.update(data_element)
return resource | Maps the given data element to a new resource or updates the given
resource.
:raises ValueError: If :param:`data_element` does not provide
:class:`everest.representers.interfaces.IDataElement`. | Below is the the instruction that describes the task:
### Input:
Maps the given data element to a new resource or updates the given
resource.
:raises ValueError: If :param:`data_element` does not provide
:class:`everest.representers.interfaces.IDataElement`.
### Response:
def map_to_resource(self, data_element, resource=None):
"""
Maps the given data element to a new resource or updates the given
resource.
:raises ValueError: If :param:`data_element` does not provide
:class:`everest.representers.interfaces.IDataElement`.
"""
if not IDataElement.providedBy(data_element): # pylint:disable=E1101
raise ValueError('Expected data element, got %s.' % data_element)
if resource is None:
coll = \
create_staging_collection(data_element.mapping.mapped_class)
agg = coll.get_aggregate()
agg.add(data_element)
if IMemberDataElement.providedBy(data_element): # pylint: disable=E1101
ent = next(iter(agg))
resource = \
data_element.mapping.mapped_class.create_from_entity(ent)
else:
resource = coll
else:
resource.update(data_element)
return resource |
def relaxNGValidatePushElement(self, ctxt, elem):
"""Push a new element start on the RelaxNG validation stack. """
if ctxt is None: ctxt__o = None
else: ctxt__o = ctxt._o
if elem is None: elem__o = None
else: elem__o = elem._o
ret = libxml2mod.xmlRelaxNGValidatePushElement(ctxt__o, self._o, elem__o)
return ret | Push a new element start on the RelaxNG validation stack. | Below is the the instruction that describes the task:
### Input:
Push a new element start on the RelaxNG validation stack.
### Response:
def relaxNGValidatePushElement(self, ctxt, elem):
"""Push a new element start on the RelaxNG validation stack. """
if ctxt is None: ctxt__o = None
else: ctxt__o = ctxt._o
if elem is None: elem__o = None
else: elem__o = elem._o
ret = libxml2mod.xmlRelaxNGValidatePushElement(ctxt__o, self._o, elem__o)
return ret |
def html_escape(s, encoding='utf-8', encoding_errors='strict'):
""" Return the HTML-escaped version of an input. """
return escape(make_unicode(s, encoding, encoding_errors), quote=True) | Return the HTML-escaped version of an input. | Below is the the instruction that describes the task:
### Input:
Return the HTML-escaped version of an input.
### Response:
def html_escape(s, encoding='utf-8', encoding_errors='strict'):
""" Return the HTML-escaped version of an input. """
return escape(make_unicode(s, encoding, encoding_errors), quote=True) |
def from_file_msg(cls, fp):
"""
Init a new object from a Outlook message file,
mime type: application/vnd.ms-outlook
Args:
fp (string): file path of raw Outlook email
Returns:
Instance of MailParser
"""
log.debug("Parsing email from file Outlook")
f, _ = msgconvert(fp)
return cls.from_file(f, True) | Init a new object from a Outlook message file,
mime type: application/vnd.ms-outlook
Args:
fp (string): file path of raw Outlook email
Returns:
Instance of MailParser | Below is the the instruction that describes the task:
### Input:
Init a new object from a Outlook message file,
mime type: application/vnd.ms-outlook
Args:
fp (string): file path of raw Outlook email
Returns:
Instance of MailParser
### Response:
def from_file_msg(cls, fp):
"""
Init a new object from a Outlook message file,
mime type: application/vnd.ms-outlook
Args:
fp (string): file path of raw Outlook email
Returns:
Instance of MailParser
"""
log.debug("Parsing email from file Outlook")
f, _ = msgconvert(fp)
return cls.from_file(f, True) |
def _get_current_object(self):
"""Get current object.
This is useful if you want the real
object behind the proxy at a time for performance reasons or because
you want to pass the object into a different context.
"""
loc = object.__getattribute__(self, '_Proxy__local')
if not hasattr(loc, '__release_local__'):
return loc(*self.__args, **self.__kwargs)
try: # pragma: no cover
# not sure what this is about
return getattr(loc, self.__name__)
except AttributeError: # pragma: no cover
raise RuntimeError('no object bound to {0.__name__}'.format(self)) | Get current object.
This is useful if you want the real
object behind the proxy at a time for performance reasons or because
you want to pass the object into a different context. | Below is the the instruction that describes the task:
### Input:
Get current object.
This is useful if you want the real
object behind the proxy at a time for performance reasons or because
you want to pass the object into a different context.
### Response:
def _get_current_object(self):
"""Get current object.
This is useful if you want the real
object behind the proxy at a time for performance reasons or because
you want to pass the object into a different context.
"""
loc = object.__getattribute__(self, '_Proxy__local')
if not hasattr(loc, '__release_local__'):
return loc(*self.__args, **self.__kwargs)
try: # pragma: no cover
# not sure what this is about
return getattr(loc, self.__name__)
except AttributeError: # pragma: no cover
raise RuntimeError('no object bound to {0.__name__}'.format(self)) |
def remove_team_member(self, account_id=None, email_address=None):
''' Remove a user from your Team
Args:
account_id (str): The id of the account of the user to remove from your team.
email_address (str): The email address of the account to remove from your team. The account id prevails if both account_id and email_address are provided.
Returns:
A Team object
'''
return self._add_remove_team_member(self.TEAM_REMOVE_MEMBER_URL, email_address, account_id) | Remove a user from your Team
Args:
account_id (str): The id of the account of the user to remove from your team.
email_address (str): The email address of the account to remove from your team. The account id prevails if both account_id and email_address are provided.
Returns:
A Team object | Below is the the instruction that describes the task:
### Input:
Remove a user from your Team
Args:
account_id (str): The id of the account of the user to remove from your team.
email_address (str): The email address of the account to remove from your team. The account id prevails if both account_id and email_address are provided.
Returns:
A Team object
### Response:
def remove_team_member(self, account_id=None, email_address=None):
''' Remove a user from your Team
Args:
account_id (str): The id of the account of the user to remove from your team.
email_address (str): The email address of the account to remove from your team. The account id prevails if both account_id and email_address are provided.
Returns:
A Team object
'''
return self._add_remove_team_member(self.TEAM_REMOVE_MEMBER_URL, email_address, account_id) |
def get_minimal_subgraph(g, nodes):
"""
given a set of nodes, extract a subgraph that excludes non-informative nodes - i.e.
those that are not MRCAs of pairs of existing nodes.
Note: no property chain reasoning is performed. As a result, edge labels are lost.
"""
logging.info("Slimming {} to {}".format(g,nodes))
# maps ancestor nodes to members of the focus node set they subsume
mm = {}
subnodes = set()
for n in nodes:
subnodes.add(n)
ancs = nx.ancestors(g, n)
ancs.add(n)
for a in ancs:
subnodes.add(a)
if a not in mm:
mm[a] = set()
mm[a].add(n)
# merge graph
egraph = nx.MultiDiGraph()
# TODO: ensure edge labels are preserved
for a, aset in mm.items():
for p in g.predecessors(a):
logging.info(" cmp {} -> {} // {} {}".format(len(aset),len(mm[p]), a, p))
if p in mm and len(aset) == len(mm[p]):
egraph.add_edge(p, a)
egraph.add_edge(a, p)
logging.info("will merge {} <-> {} (members identical)".format(p,a))
nmap = {}
leafmap = {}
disposable = set()
for cliq in nx.strongly_connected_components(egraph):
leaders = set()
leafs = set()
for n in cliq:
is_src = False
if n in nodes:
logging.info("Preserving: {} in {}".format(n,cliq))
leaders.add(n)
is_src = True
is_leaf = True
for p in g.successors(n):
if p in cliq:
is_leaf = False
if not(is_leaf or is_src):
disposable.add(n)
if is_leaf:
logging.info("Clique leaf: {} in {}".format(n,cliq))
leafs.add(n)
leader = None
if len(leaders) > 1:
logging.info("UHOH: {}".format(leaders))
if len(leaders) > 0:
leader = list(leaders)[0]
else:
leader = list(leafs)[0]
leafmap[n] = leafs
subg = g.subgraph(subnodes)
fg = remove_nodes(subg, disposable)
return fg | given a set of nodes, extract a subgraph that excludes non-informative nodes - i.e.
those that are not MRCAs of pairs of existing nodes.
Note: no property chain reasoning is performed. As a result, edge labels are lost. | Below is the the instruction that describes the task:
### Input:
given a set of nodes, extract a subgraph that excludes non-informative nodes - i.e.
those that are not MRCAs of pairs of existing nodes.
Note: no property chain reasoning is performed. As a result, edge labels are lost.
### Response:
def get_minimal_subgraph(g, nodes):
"""
given a set of nodes, extract a subgraph that excludes non-informative nodes - i.e.
those that are not MRCAs of pairs of existing nodes.
Note: no property chain reasoning is performed. As a result, edge labels are lost.
"""
logging.info("Slimming {} to {}".format(g,nodes))
# maps ancestor nodes to members of the focus node set they subsume
mm = {}
subnodes = set()
for n in nodes:
subnodes.add(n)
ancs = nx.ancestors(g, n)
ancs.add(n)
for a in ancs:
subnodes.add(a)
if a not in mm:
mm[a] = set()
mm[a].add(n)
# merge graph
egraph = nx.MultiDiGraph()
# TODO: ensure edge labels are preserved
for a, aset in mm.items():
for p in g.predecessors(a):
logging.info(" cmp {} -> {} // {} {}".format(len(aset),len(mm[p]), a, p))
if p in mm and len(aset) == len(mm[p]):
egraph.add_edge(p, a)
egraph.add_edge(a, p)
logging.info("will merge {} <-> {} (members identical)".format(p,a))
nmap = {}
leafmap = {}
disposable = set()
for cliq in nx.strongly_connected_components(egraph):
leaders = set()
leafs = set()
for n in cliq:
is_src = False
if n in nodes:
logging.info("Preserving: {} in {}".format(n,cliq))
leaders.add(n)
is_src = True
is_leaf = True
for p in g.successors(n):
if p in cliq:
is_leaf = False
if not(is_leaf or is_src):
disposable.add(n)
if is_leaf:
logging.info("Clique leaf: {} in {}".format(n,cliq))
leafs.add(n)
leader = None
if len(leaders) > 1:
logging.info("UHOH: {}".format(leaders))
if len(leaders) > 0:
leader = list(leaders)[0]
else:
leader = list(leafs)[0]
leafmap[n] = leafs
subg = g.subgraph(subnodes)
fg = remove_nodes(subg, disposable)
return fg |
def calc_containment(self):
"""Calculate PSF containment."""
hists = self.hists
hists_out = self._hists_eff
quantiles = [0.34, 0.68, 0.90, 0.95]
cth_axis_idx = dict(evclass=2, evtype=3)
for k in ['evclass']: # ,'evtype']:
print(k)
non = hists['%s_psf_on' % k]
noff = hists['%s_psf_off' % k]
alpha = hists['%s_alpha' % k][..., None]
if k == 'evclass':
sep = self._sep_bins[None, :, None, 1:]
else:
sep = self._sep_bins[None, None, :, None, 1:]
qval, qerr = calc_quantiles(sep, non, noff, alpha, quantiles)
for i, q in enumerate(quantiles):
hists_out['%s_cth_q%2i' % (k, q * 100)] = qval[i]
hists_out['%s_cth_q%2i_err' % (k, q * 100)] = qerr[i]
non = np.sum(non, axis=cth_axis_idx[k])
noff = np.sum(noff, axis=cth_axis_idx[k])
alpha = np.squeeze(alpha, axis=cth_axis_idx[k])
sep = np.squeeze(sep, axis=cth_axis_idx[k])
qval, qerr = calc_quantiles(sep, non, noff, alpha, quantiles)
for i, q in enumerate(quantiles):
hists_out['%s_q%2i' % (k, q * 100)] = qval[i]
hists_out['%s_q%2i_err' % (k, q * 100)] = qerr[i] | Calculate PSF containment. | Below is the the instruction that describes the task:
### Input:
Calculate PSF containment.
### Response:
def calc_containment(self):
"""Calculate PSF containment."""
hists = self.hists
hists_out = self._hists_eff
quantiles = [0.34, 0.68, 0.90, 0.95]
cth_axis_idx = dict(evclass=2, evtype=3)
for k in ['evclass']: # ,'evtype']:
print(k)
non = hists['%s_psf_on' % k]
noff = hists['%s_psf_off' % k]
alpha = hists['%s_alpha' % k][..., None]
if k == 'evclass':
sep = self._sep_bins[None, :, None, 1:]
else:
sep = self._sep_bins[None, None, :, None, 1:]
qval, qerr = calc_quantiles(sep, non, noff, alpha, quantiles)
for i, q in enumerate(quantiles):
hists_out['%s_cth_q%2i' % (k, q * 100)] = qval[i]
hists_out['%s_cth_q%2i_err' % (k, q * 100)] = qerr[i]
non = np.sum(non, axis=cth_axis_idx[k])
noff = np.sum(noff, axis=cth_axis_idx[k])
alpha = np.squeeze(alpha, axis=cth_axis_idx[k])
sep = np.squeeze(sep, axis=cth_axis_idx[k])
qval, qerr = calc_quantiles(sep, non, noff, alpha, quantiles)
for i, q in enumerate(quantiles):
hists_out['%s_q%2i' % (k, q * 100)] = qval[i]
hists_out['%s_q%2i_err' % (k, q * 100)] = qerr[i] |
def build_update_script(file_name, slot_assignments=None, os_info=None, sensor_graph=None,
app_info=None, use_safeupdate=False):
"""Build a trub script that loads given firmware into the given slots.
slot_assignments should be a list of tuples in the following form:
("slot X" or "controller", firmware_image_name)
The output of this autobuild action will be a trub script in
build/output/<file_name> that assigns the given firmware to each slot in
the order specified in the slot_assignments list.
Args:
file_name (str): The name of the output file that we should create.
This file name should end in .trub
slot_assignments (list of (str, str)): A list of tuples containing
the slot name and the firmware image that we should use to build
our update script. Optional
os_info (tuple(int, str)): A tuple of OS version tag and X.Y version
number that will be set as part of the OTA script if included. Optional.
sensor_graph (str): Name of sgf file. Optional.
app_info (tuple(int, str)): A tuple of App version tag and X.Y version
number that will be set as part of the OTA script if included. Optional.
use_safeupdate (bool): Enables safe firmware update
"""
resolver = ProductResolver.Create()
env = Environment(tools=[])
files = []
if slot_assignments is not None:
slots = [_parse_slot(x[0]) for x in slot_assignments]
files = [ensure_image_is_hex(resolver.find_unique("firmware_image", x[1]).full_path) for x in slot_assignments]
env['SLOTS'] = slots
else:
env['SLOTS'] = None
env['USE_SAFEUPDATE'] = use_safeupdate
env['OS_INFO'] = os_info
env['APP_INFO'] = app_info
env['UPDATE_SENSORGRAPH'] = False
if sensor_graph is not None:
files.append(sensor_graph)
env['UPDATE_SENSORGRAPH'] = True
env.Command([os.path.join('build', 'output', file_name)], files,
action=Action(_build_reflash_script_action, "Building TRUB script at $TARGET")) | Build a trub script that loads given firmware into the given slots.
slot_assignments should be a list of tuples in the following form:
("slot X" or "controller", firmware_image_name)
The output of this autobuild action will be a trub script in
build/output/<file_name> that assigns the given firmware to each slot in
the order specified in the slot_assignments list.
Args:
file_name (str): The name of the output file that we should create.
This file name should end in .trub
slot_assignments (list of (str, str)): A list of tuples containing
the slot name and the firmware image that we should use to build
our update script. Optional
os_info (tuple(int, str)): A tuple of OS version tag and X.Y version
number that will be set as part of the OTA script if included. Optional.
sensor_graph (str): Name of sgf file. Optional.
app_info (tuple(int, str)): A tuple of App version tag and X.Y version
number that will be set as part of the OTA script if included. Optional.
use_safeupdate (bool): Enables safe firmware update | Below is the the instruction that describes the task:
### Input:
Build a trub script that loads given firmware into the given slots.
slot_assignments should be a list of tuples in the following form:
("slot X" or "controller", firmware_image_name)
The output of this autobuild action will be a trub script in
build/output/<file_name> that assigns the given firmware to each slot in
the order specified in the slot_assignments list.
Args:
file_name (str): The name of the output file that we should create.
This file name should end in .trub
slot_assignments (list of (str, str)): A list of tuples containing
the slot name and the firmware image that we should use to build
our update script. Optional
os_info (tuple(int, str)): A tuple of OS version tag and X.Y version
number that will be set as part of the OTA script if included. Optional.
sensor_graph (str): Name of sgf file. Optional.
app_info (tuple(int, str)): A tuple of App version tag and X.Y version
number that will be set as part of the OTA script if included. Optional.
use_safeupdate (bool): Enables safe firmware update
### Response:
def build_update_script(file_name, slot_assignments=None, os_info=None, sensor_graph=None,
app_info=None, use_safeupdate=False):
"""Build a trub script that loads given firmware into the given slots.
slot_assignments should be a list of tuples in the following form:
("slot X" or "controller", firmware_image_name)
The output of this autobuild action will be a trub script in
build/output/<file_name> that assigns the given firmware to each slot in
the order specified in the slot_assignments list.
Args:
file_name (str): The name of the output file that we should create.
This file name should end in .trub
slot_assignments (list of (str, str)): A list of tuples containing
the slot name and the firmware image that we should use to build
our update script. Optional
os_info (tuple(int, str)): A tuple of OS version tag and X.Y version
number that will be set as part of the OTA script if included. Optional.
sensor_graph (str): Name of sgf file. Optional.
app_info (tuple(int, str)): A tuple of App version tag and X.Y version
number that will be set as part of the OTA script if included. Optional.
use_safeupdate (bool): Enables safe firmware update
"""
resolver = ProductResolver.Create()
env = Environment(tools=[])
files = []
if slot_assignments is not None:
slots = [_parse_slot(x[0]) for x in slot_assignments]
files = [ensure_image_is_hex(resolver.find_unique("firmware_image", x[1]).full_path) for x in slot_assignments]
env['SLOTS'] = slots
else:
env['SLOTS'] = None
env['USE_SAFEUPDATE'] = use_safeupdate
env['OS_INFO'] = os_info
env['APP_INFO'] = app_info
env['UPDATE_SENSORGRAPH'] = False
if sensor_graph is not None:
files.append(sensor_graph)
env['UPDATE_SENSORGRAPH'] = True
env.Command([os.path.join('build', 'output', file_name)], files,
action=Action(_build_reflash_script_action, "Building TRUB script at $TARGET")) |
def _remove_word(completer):
"""
Used to remove words from the completors
"""
def inner(word: str):
try:
completer.words.remove(word)
except Exception:
pass
return inner | Used to remove words from the completors | Below is the the instruction that describes the task:
### Input:
Used to remove words from the completors
### Response:
def _remove_word(completer):
"""
Used to remove words from the completors
"""
def inner(word: str):
try:
completer.words.remove(word)
except Exception:
pass
return inner |
def as_member(entity, parent=None):
"""
Adapts an object to a location aware member resource.
:param entity: a domain object for which a resource adapter has been
registered
:type entity: an object implementing
:class:`everest.entities.interfaces.IEntity`
:param parent: optional parent collection resource to make the new member
a child of
:type parent: an object implementing
:class:`everest.resources.interfaces.ICollectionResource`
:returns: an object implementing
:class:`everest.resources.interfaces.IMemberResource`
"""
reg = get_current_registry()
rc = reg.getAdapter(entity, IMemberResource)
if not parent is None:
rc.__parent__ = parent # interface method pylint: disable=E1121
return rc | Adapts an object to a location aware member resource.
:param entity: a domain object for which a resource adapter has been
registered
:type entity: an object implementing
:class:`everest.entities.interfaces.IEntity`
:param parent: optional parent collection resource to make the new member
a child of
:type parent: an object implementing
:class:`everest.resources.interfaces.ICollectionResource`
:returns: an object implementing
:class:`everest.resources.interfaces.IMemberResource` | Below is the the instruction that describes the task:
### Input:
Adapts an object to a location aware member resource.
:param entity: a domain object for which a resource adapter has been
registered
:type entity: an object implementing
:class:`everest.entities.interfaces.IEntity`
:param parent: optional parent collection resource to make the new member
a child of
:type parent: an object implementing
:class:`everest.resources.interfaces.ICollectionResource`
:returns: an object implementing
:class:`everest.resources.interfaces.IMemberResource`
### Response:
def as_member(entity, parent=None):
"""
Adapts an object to a location aware member resource.
:param entity: a domain object for which a resource adapter has been
registered
:type entity: an object implementing
:class:`everest.entities.interfaces.IEntity`
:param parent: optional parent collection resource to make the new member
a child of
:type parent: an object implementing
:class:`everest.resources.interfaces.ICollectionResource`
:returns: an object implementing
:class:`everest.resources.interfaces.IMemberResource`
"""
reg = get_current_registry()
rc = reg.getAdapter(entity, IMemberResource)
if not parent is None:
rc.__parent__ = parent # interface method pylint: disable=E1121
return rc |
def _convert_to_config(self):
"""self.parsed_data->self.config, parse unrecognized extra args via KVLoader."""
# remove subconfigs list from namespace before transforming the Namespace
if '_flags' in self.parsed_data:
subcs = self.parsed_data._flags
del self.parsed_data._flags
else:
subcs = []
for k, v in vars(self.parsed_data).iteritems():
if v is None:
# it was a flag that shares the name of an alias
subcs.append(self.alias_flags[k])
else:
# eval the KV assignment
self._exec_config_str(k, v)
for subc in subcs:
self._load_flag(subc)
if self.extra_args:
sub_parser = KeyValueConfigLoader()
sub_parser.load_config(self.extra_args)
self.config._merge(sub_parser.config)
self.extra_args = sub_parser.extra_args | self.parsed_data->self.config, parse unrecognized extra args via KVLoader. | Below is the the instruction that describes the task:
### Input:
self.parsed_data->self.config, parse unrecognized extra args via KVLoader.
### Response:
def _convert_to_config(self):
"""self.parsed_data->self.config, parse unrecognized extra args via KVLoader."""
# remove subconfigs list from namespace before transforming the Namespace
if '_flags' in self.parsed_data:
subcs = self.parsed_data._flags
del self.parsed_data._flags
else:
subcs = []
for k, v in vars(self.parsed_data).iteritems():
if v is None:
# it was a flag that shares the name of an alias
subcs.append(self.alias_flags[k])
else:
# eval the KV assignment
self._exec_config_str(k, v)
for subc in subcs:
self._load_flag(subc)
if self.extra_args:
sub_parser = KeyValueConfigLoader()
sub_parser.load_config(self.extra_args)
self.config._merge(sub_parser.config)
self.extra_args = sub_parser.extra_args |
def decode_keys(store, encoding='utf-8'):
"""
If a dictionary has keys that are bytes decode them to a str.
Parameters
---------
store : dict
Dictionary with data
Returns
---------
result : dict
Values are untouched but keys that were bytes
are converted to ASCII strings.
Example
-----------
In [1]: d
Out[1]: {1020: 'nah', b'hi': 'stuff'}
In [2]: trimesh.util.decode_keys(d)
Out[2]: {1020: 'nah', 'hi': 'stuff'}
"""
keys = store.keys()
for key in keys:
if hasattr(key, 'decode'):
decoded = key.decode(encoding)
if key != decoded:
store[key.decode(encoding)] = store[key]
store.pop(key)
return store | If a dictionary has keys that are bytes decode them to a str.
Parameters
---------
store : dict
Dictionary with data
Returns
---------
result : dict
Values are untouched but keys that were bytes
are converted to ASCII strings.
Example
-----------
In [1]: d
Out[1]: {1020: 'nah', b'hi': 'stuff'}
In [2]: trimesh.util.decode_keys(d)
Out[2]: {1020: 'nah', 'hi': 'stuff'} | Below is the the instruction that describes the task:
### Input:
If a dictionary has keys that are bytes decode them to a str.
Parameters
---------
store : dict
Dictionary with data
Returns
---------
result : dict
Values are untouched but keys that were bytes
are converted to ASCII strings.
Example
-----------
In [1]: d
Out[1]: {1020: 'nah', b'hi': 'stuff'}
In [2]: trimesh.util.decode_keys(d)
Out[2]: {1020: 'nah', 'hi': 'stuff'}
### Response:
def decode_keys(store, encoding='utf-8'):
"""
If a dictionary has keys that are bytes decode them to a str.
Parameters
---------
store : dict
Dictionary with data
Returns
---------
result : dict
Values are untouched but keys that were bytes
are converted to ASCII strings.
Example
-----------
In [1]: d
Out[1]: {1020: 'nah', b'hi': 'stuff'}
In [2]: trimesh.util.decode_keys(d)
Out[2]: {1020: 'nah', 'hi': 'stuff'}
"""
keys = store.keys()
for key in keys:
if hasattr(key, 'decode'):
decoded = key.decode(encoding)
if key != decoded:
store[key.decode(encoding)] = store[key]
store.pop(key)
return store |
def _buildTerms(self):
""" Builds a data structure indexing the terms
longitude by sign and object.
"""
termLons = tables.termLons(tables.EGYPTIAN_TERMS)
res = {}
for (ID, sign, lon) in termLons:
try:
res[sign][ID] = lon
except KeyError:
res[sign] = {}
res[sign][ID] = lon
return res | Builds a data structure indexing the terms
longitude by sign and object. | Below is the the instruction that describes the task:
### Input:
Builds a data structure indexing the terms
longitude by sign and object.
### Response:
def _buildTerms(self):
""" Builds a data structure indexing the terms
longitude by sign and object.
"""
termLons = tables.termLons(tables.EGYPTIAN_TERMS)
res = {}
for (ID, sign, lon) in termLons:
try:
res[sign][ID] = lon
except KeyError:
res[sign] = {}
res[sign][ID] = lon
return res |
async def sign(self, message: bytes, verkey: str = None) -> bytes:
"""
Derive signing key and Sign message; return signature. Raise WalletState if wallet is closed.
Raise AbsentMessage for missing message, or WalletState if wallet is closed.
:param message: Content to sign, as bytes
:param verkey: verification key corresponding to private signing key (default anchor's own)
:return: signature, as bytes
"""
LOGGER.debug('Wallet.sign >>> message: %s, verkey: %s', message, verkey)
if not message:
LOGGER.debug('Wallet.sign <!< No message to sign')
raise AbsentMessage('No message to sign')
if not self.handle:
LOGGER.debug('Wallet.sign <!< Wallet %s is closed', self.name)
raise WalletState('Wallet {} is closed'.format(self.name))
rv = await crypto.crypto_sign(self.handle, verkey or self.verkey, message)
LOGGER.debug('Wallet.sign <<< %s', rv)
return rv | Derive signing key and Sign message; return signature. Raise WalletState if wallet is closed.
Raise AbsentMessage for missing message, or WalletState if wallet is closed.
:param message: Content to sign, as bytes
:param verkey: verification key corresponding to private signing key (default anchor's own)
:return: signature, as bytes | Below is the the instruction that describes the task:
### Input:
Derive signing key and Sign message; return signature. Raise WalletState if wallet is closed.
Raise AbsentMessage for missing message, or WalletState if wallet is closed.
:param message: Content to sign, as bytes
:param verkey: verification key corresponding to private signing key (default anchor's own)
:return: signature, as bytes
### Response:
async def sign(self, message: bytes, verkey: str = None) -> bytes:
"""
Derive signing key and Sign message; return signature. Raise WalletState if wallet is closed.
Raise AbsentMessage for missing message, or WalletState if wallet is closed.
:param message: Content to sign, as bytes
:param verkey: verification key corresponding to private signing key (default anchor's own)
:return: signature, as bytes
"""
LOGGER.debug('Wallet.sign >>> message: %s, verkey: %s', message, verkey)
if not message:
LOGGER.debug('Wallet.sign <!< No message to sign')
raise AbsentMessage('No message to sign')
if not self.handle:
LOGGER.debug('Wallet.sign <!< Wallet %s is closed', self.name)
raise WalletState('Wallet {} is closed'.format(self.name))
rv = await crypto.crypto_sign(self.handle, verkey or self.verkey, message)
LOGGER.debug('Wallet.sign <<< %s', rv)
return rv |
def stdio_as(stdout_fd, stderr_fd, stdin_fd):
"""Redirect sys.{stdout, stderr, stdin} to alternate file descriptors.
As a special case, if a given destination fd is `-1`, we will replace it with an open file handle
to `/dev/null`.
NB: If the filehandles for sys.{stdout, stderr, stdin} have previously been closed, it's
possible that the OS has repurposed fds `0, 1, 2` to represent other files or sockets. It's
impossible for this method to locate all python objects which refer to those fds, so it's up
to the caller to guarantee that `0, 1, 2` are safe to replace.
In Python3, the streams expect unicode. To write and read bytes, access their buffer, e.g. `stdin.buffer.read()`.
"""
with _stdio_stream_as(stdin_fd, 0, 'stdin', 'r'),\
_stdio_stream_as(stdout_fd, 1, 'stdout', 'w'),\
_stdio_stream_as(stderr_fd, 2, 'stderr', 'w'):
yield | Redirect sys.{stdout, stderr, stdin} to alternate file descriptors.
As a special case, if a given destination fd is `-1`, we will replace it with an open file handle
to `/dev/null`.
NB: If the filehandles for sys.{stdout, stderr, stdin} have previously been closed, it's
possible that the OS has repurposed fds `0, 1, 2` to represent other files or sockets. It's
impossible for this method to locate all python objects which refer to those fds, so it's up
to the caller to guarantee that `0, 1, 2` are safe to replace.
In Python3, the streams expect unicode. To write and read bytes, access their buffer, e.g. `stdin.buffer.read()`. | Below is the the instruction that describes the task:
### Input:
Redirect sys.{stdout, stderr, stdin} to alternate file descriptors.
As a special case, if a given destination fd is `-1`, we will replace it with an open file handle
to `/dev/null`.
NB: If the filehandles for sys.{stdout, stderr, stdin} have previously been closed, it's
possible that the OS has repurposed fds `0, 1, 2` to represent other files or sockets. It's
impossible for this method to locate all python objects which refer to those fds, so it's up
to the caller to guarantee that `0, 1, 2` are safe to replace.
In Python3, the streams expect unicode. To write and read bytes, access their buffer, e.g. `stdin.buffer.read()`.
### Response:
def stdio_as(stdout_fd, stderr_fd, stdin_fd):
"""Redirect sys.{stdout, stderr, stdin} to alternate file descriptors.
As a special case, if a given destination fd is `-1`, we will replace it with an open file handle
to `/dev/null`.
NB: If the filehandles for sys.{stdout, stderr, stdin} have previously been closed, it's
possible that the OS has repurposed fds `0, 1, 2` to represent other files or sockets. It's
impossible for this method to locate all python objects which refer to those fds, so it's up
to the caller to guarantee that `0, 1, 2` are safe to replace.
In Python3, the streams expect unicode. To write and read bytes, access their buffer, e.g. `stdin.buffer.read()`.
"""
with _stdio_stream_as(stdin_fd, 0, 'stdin', 'r'),\
_stdio_stream_as(stdout_fd, 1, 'stdout', 'w'),\
_stdio_stream_as(stderr_fd, 2, 'stderr', 'w'):
yield |
def iter_neurites(obj, mapfun=None, filt=None, neurite_order=NeuriteIter.FileOrder):
'''Iterator to a neurite, neuron or neuron population
Applies optional neurite filter and mapping functions.
Parameters:
obj: a neurite, neuron or neuron population.
mapfun: optional neurite mapping function.
filt: optional neurite filter function.
neurite_order (NeuriteIter): order upon which neurites should be iterated
- NeuriteIter.FileOrder: order of appearance in the file
- NeuriteIter.NRN: NRN simulator order: soma -> axon -> basal -> apical
Examples:
Get the number of points in each neurite in a neuron population
>>> from neurom.core import iter_neurites
>>> n_points = [n for n in iter_neurites(pop, lambda x : len(x.points))]
Get the number of points in each axon in a neuron population
>>> import neurom as nm
>>> from neurom.core import iter_neurites
>>> filter = lambda n : n.type == nm.AXON
>>> mapping = lambda n : len(n.points)
>>> n_points = [n for n in iter_neurites(pop, mapping, filter)]
'''
neurites = ((obj,) if isinstance(obj, Neurite) else
obj.neurites if hasattr(obj, 'neurites') else obj)
if neurite_order == NeuriteIter.NRN:
last_position = max(NRN_ORDER.values()) + 1
neurites = sorted(neurites, key=lambda neurite: NRN_ORDER.get(neurite.type, last_position))
neurite_iter = iter(neurites) if filt is None else filter(filt, neurites)
return neurite_iter if mapfun is None else map(mapfun, neurite_iter) | Iterator to a neurite, neuron or neuron population
Applies optional neurite filter and mapping functions.
Parameters:
obj: a neurite, neuron or neuron population.
mapfun: optional neurite mapping function.
filt: optional neurite filter function.
neurite_order (NeuriteIter): order upon which neurites should be iterated
- NeuriteIter.FileOrder: order of appearance in the file
- NeuriteIter.NRN: NRN simulator order: soma -> axon -> basal -> apical
Examples:
Get the number of points in each neurite in a neuron population
>>> from neurom.core import iter_neurites
>>> n_points = [n for n in iter_neurites(pop, lambda x : len(x.points))]
Get the number of points in each axon in a neuron population
>>> import neurom as nm
>>> from neurom.core import iter_neurites
>>> filter = lambda n : n.type == nm.AXON
>>> mapping = lambda n : len(n.points)
>>> n_points = [n for n in iter_neurites(pop, mapping, filter)] | Below is the the instruction that describes the task:
### Input:
Iterator to a neurite, neuron or neuron population
Applies optional neurite filter and mapping functions.
Parameters:
obj: a neurite, neuron or neuron population.
mapfun: optional neurite mapping function.
filt: optional neurite filter function.
neurite_order (NeuriteIter): order upon which neurites should be iterated
- NeuriteIter.FileOrder: order of appearance in the file
- NeuriteIter.NRN: NRN simulator order: soma -> axon -> basal -> apical
Examples:
Get the number of points in each neurite in a neuron population
>>> from neurom.core import iter_neurites
>>> n_points = [n for n in iter_neurites(pop, lambda x : len(x.points))]
Get the number of points in each axon in a neuron population
>>> import neurom as nm
>>> from neurom.core import iter_neurites
>>> filter = lambda n : n.type == nm.AXON
>>> mapping = lambda n : len(n.points)
>>> n_points = [n for n in iter_neurites(pop, mapping, filter)]
### Response:
def iter_neurites(obj, mapfun=None, filt=None, neurite_order=NeuriteIter.FileOrder):
'''Iterator to a neurite, neuron or neuron population
Applies optional neurite filter and mapping functions.
Parameters:
obj: a neurite, neuron or neuron population.
mapfun: optional neurite mapping function.
filt: optional neurite filter function.
neurite_order (NeuriteIter): order upon which neurites should be iterated
- NeuriteIter.FileOrder: order of appearance in the file
- NeuriteIter.NRN: NRN simulator order: soma -> axon -> basal -> apical
Examples:
Get the number of points in each neurite in a neuron population
>>> from neurom.core import iter_neurites
>>> n_points = [n for n in iter_neurites(pop, lambda x : len(x.points))]
Get the number of points in each axon in a neuron population
>>> import neurom as nm
>>> from neurom.core import iter_neurites
>>> filter = lambda n : n.type == nm.AXON
>>> mapping = lambda n : len(n.points)
>>> n_points = [n for n in iter_neurites(pop, mapping, filter)]
'''
neurites = ((obj,) if isinstance(obj, Neurite) else
obj.neurites if hasattr(obj, 'neurites') else obj)
if neurite_order == NeuriteIter.NRN:
last_position = max(NRN_ORDER.values()) + 1
neurites = sorted(neurites, key=lambda neurite: NRN_ORDER.get(neurite.type, last_position))
neurite_iter = iter(neurites) if filt is None else filter(filt, neurites)
return neurite_iter if mapfun is None else map(mapfun, neurite_iter) |
def get_xml(self, fp, format=FORMAT_NATIVE):
"""
Returns the XML metadata for this source, converted to the requested format.
Converted metadata may not contain all the same information as the native format.
:param file fp: A path, or an open file-like object which the content should be written to.
:param str format: desired format for the output. This should be one of the available
formats from :py:meth:`.get_formats`, or :py:attr:`.FORMAT_NATIVE` for the native format.
If you pass this function an open file-like object as the fp parameter, the function will
not close that file for you.
"""
r = self._client.request('GET', getattr(self, format), stream=True)
filename = stream.stream_response_to_file(r, path=fp)
return filename | Returns the XML metadata for this source, converted to the requested format.
Converted metadata may not contain all the same information as the native format.
:param file fp: A path, or an open file-like object which the content should be written to.
:param str format: desired format for the output. This should be one of the available
formats from :py:meth:`.get_formats`, or :py:attr:`.FORMAT_NATIVE` for the native format.
If you pass this function an open file-like object as the fp parameter, the function will
not close that file for you. | Below is the the instruction that describes the task:
### Input:
Returns the XML metadata for this source, converted to the requested format.
Converted metadata may not contain all the same information as the native format.
:param file fp: A path, or an open file-like object which the content should be written to.
:param str format: desired format for the output. This should be one of the available
formats from :py:meth:`.get_formats`, or :py:attr:`.FORMAT_NATIVE` for the native format.
If you pass this function an open file-like object as the fp parameter, the function will
not close that file for you.
### Response:
def get_xml(self, fp, format=FORMAT_NATIVE):
"""
Returns the XML metadata for this source, converted to the requested format.
Converted metadata may not contain all the same information as the native format.
:param file fp: A path, or an open file-like object which the content should be written to.
:param str format: desired format for the output. This should be one of the available
formats from :py:meth:`.get_formats`, or :py:attr:`.FORMAT_NATIVE` for the native format.
If you pass this function an open file-like object as the fp parameter, the function will
not close that file for you.
"""
r = self._client.request('GET', getattr(self, format), stream=True)
filename = stream.stream_response_to_file(r, path=fp)
return filename |
def on_config_value_changed(self, config_m, prop_name, info):
"""Callback when a config value has been changed
:param ConfigModel config_m: The config model that has been changed
:param str prop_name: Should always be 'config'
:param dict info: Information e.g. about the changed config key
"""
config_key = info['args'][1]
if config_key in ["EXECUTION_TICKER_ENABLED"]:
self.check_configuration() | Callback when a config value has been changed
:param ConfigModel config_m: The config model that has been changed
:param str prop_name: Should always be 'config'
:param dict info: Information e.g. about the changed config key | Below is the the instruction that describes the task:
### Input:
Callback when a config value has been changed
:param ConfigModel config_m: The config model that has been changed
:param str prop_name: Should always be 'config'
:param dict info: Information e.g. about the changed config key
### Response:
def on_config_value_changed(self, config_m, prop_name, info):
"""Callback when a config value has been changed
:param ConfigModel config_m: The config model that has been changed
:param str prop_name: Should always be 'config'
:param dict info: Information e.g. about the changed config key
"""
config_key = info['args'][1]
if config_key in ["EXECUTION_TICKER_ENABLED"]:
self.check_configuration() |
def get_pattern(self, field_name):
"""
Get a regular expression to match a formatting directive that references the given field name.
:param field_name: The name of the field to match (a string).
:returns: A compiled regular expression object.
"""
return re.compile(self.raw_pattern.replace(r'\w+', field_name), re.VERBOSE) | Get a regular expression to match a formatting directive that references the given field name.
:param field_name: The name of the field to match (a string).
:returns: A compiled regular expression object. | Below is the the instruction that describes the task:
### Input:
Get a regular expression to match a formatting directive that references the given field name.
:param field_name: The name of the field to match (a string).
:returns: A compiled regular expression object.
### Response:
def get_pattern(self, field_name):
"""
Get a regular expression to match a formatting directive that references the given field name.
:param field_name: The name of the field to match (a string).
:returns: A compiled regular expression object.
"""
return re.compile(self.raw_pattern.replace(r'\w+', field_name), re.VERBOSE) |
def populate_current_fields(abbr):
"""
Set/update _current_term and _current_session fields on all bills
for a given location.
"""
meta = db.metadata.find_one({'_id': abbr})
current_term = meta['terms'][-1]
current_session = current_term['sessions'][-1]
for bill in db.bills.find({settings.LEVEL_FIELD: abbr}):
if bill['session'] == current_session:
bill['_current_session'] = True
else:
bill['_current_session'] = False
if bill['session'] in current_term['sessions']:
bill['_current_term'] = True
else:
bill['_current_term'] = False
db.bills.save(bill, safe=True) | Set/update _current_term and _current_session fields on all bills
for a given location. | Below is the the instruction that describes the task:
### Input:
Set/update _current_term and _current_session fields on all bills
for a given location.
### Response:
def populate_current_fields(abbr):
"""
Set/update _current_term and _current_session fields on all bills
for a given location.
"""
meta = db.metadata.find_one({'_id': abbr})
current_term = meta['terms'][-1]
current_session = current_term['sessions'][-1]
for bill in db.bills.find({settings.LEVEL_FIELD: abbr}):
if bill['session'] == current_session:
bill['_current_session'] = True
else:
bill['_current_session'] = False
if bill['session'] in current_term['sessions']:
bill['_current_term'] = True
else:
bill['_current_term'] = False
db.bills.save(bill, safe=True) |
def apply_numpy_specials(self, copy=True):
"""Convert isis special pixel values to numpy special pixel values.
======= =======
Isis Numpy
======= =======
Null nan
Lrs -inf
Lis -inf
His inf
Hrs inf
======= =======
Parameters
----------
copy : bool [True]
Whether to apply the new special values to a copy of the
pixel data and leave the original unaffected
Returns
-------
Numpy Array
A numpy array with special values converted to numpy's nan, inf,
and -inf
"""
if copy:
data = self.data.astype(numpy.float64)
elif self.data.dtype != numpy.float64:
data = self.data = self.data.astype(numpy.float64)
else:
data = self.data
data[data == self.specials['Null']] = numpy.nan
data[data < self.specials['Min']] = numpy.NINF
data[data > self.specials['Max']] = numpy.inf
return data | Convert isis special pixel values to numpy special pixel values.
======= =======
Isis Numpy
======= =======
Null nan
Lrs -inf
Lis -inf
His inf
Hrs inf
======= =======
Parameters
----------
copy : bool [True]
Whether to apply the new special values to a copy of the
pixel data and leave the original unaffected
Returns
-------
Numpy Array
A numpy array with special values converted to numpy's nan, inf,
and -inf | Below is the the instruction that describes the task:
### Input:
Convert isis special pixel values to numpy special pixel values.
======= =======
Isis Numpy
======= =======
Null nan
Lrs -inf
Lis -inf
His inf
Hrs inf
======= =======
Parameters
----------
copy : bool [True]
Whether to apply the new special values to a copy of the
pixel data and leave the original unaffected
Returns
-------
Numpy Array
A numpy array with special values converted to numpy's nan, inf,
and -inf
### Response:
def apply_numpy_specials(self, copy=True):
"""Convert isis special pixel values to numpy special pixel values.
======= =======
Isis Numpy
======= =======
Null nan
Lrs -inf
Lis -inf
His inf
Hrs inf
======= =======
Parameters
----------
copy : bool [True]
Whether to apply the new special values to a copy of the
pixel data and leave the original unaffected
Returns
-------
Numpy Array
A numpy array with special values converted to numpy's nan, inf,
and -inf
"""
if copy:
data = self.data.astype(numpy.float64)
elif self.data.dtype != numpy.float64:
data = self.data = self.data.astype(numpy.float64)
else:
data = self.data
data[data == self.specials['Null']] = numpy.nan
data[data < self.specials['Min']] = numpy.NINF
data[data > self.specials['Max']] = numpy.inf
return data |
def prefixes(self):
"""
list all prefixes used
"""
pset = set()
for n in self.nodes():
pfx = self.prefix(n)
if pfx is not None:
pset.add(pfx)
return list(pset) | list all prefixes used | Below is the the instruction that describes the task:
### Input:
list all prefixes used
### Response:
def prefixes(self):
"""
list all prefixes used
"""
pset = set()
for n in self.nodes():
pfx = self.prefix(n)
if pfx is not None:
pset.add(pfx)
return list(pset) |
def AddEvent(self, event):
"""Adds an event.
Args:
event (EventObject): event.
Raises:
IOError: when the storage writer is closed or
if the event data identifier type is not supported.
OSError: when the storage writer is closed or
if the event data identifier type is not supported.
"""
self._RaiseIfNotWritable()
# TODO: change to no longer allow event_data_identifier is None
# after refactoring every parser to generate event data.
event_data_identifier = event.GetEventDataIdentifier()
if event_data_identifier:
if not isinstance(event_data_identifier, identifiers.FakeIdentifier):
raise IOError('Unsupported event data identifier type: {0:s}'.format(
type(event_data_identifier)))
event = self._PrepareAttributeContainer(event)
self._events.append(event)
self.number_of_events += 1 | Adds an event.
Args:
event (EventObject): event.
Raises:
IOError: when the storage writer is closed or
if the event data identifier type is not supported.
OSError: when the storage writer is closed or
if the event data identifier type is not supported. | Below is the the instruction that describes the task:
### Input:
Adds an event.
Args:
event (EventObject): event.
Raises:
IOError: when the storage writer is closed or
if the event data identifier type is not supported.
OSError: when the storage writer is closed or
if the event data identifier type is not supported.
### Response:
def AddEvent(self, event):
"""Adds an event.
Args:
event (EventObject): event.
Raises:
IOError: when the storage writer is closed or
if the event data identifier type is not supported.
OSError: when the storage writer is closed or
if the event data identifier type is not supported.
"""
self._RaiseIfNotWritable()
# TODO: change to no longer allow event_data_identifier is None
# after refactoring every parser to generate event data.
event_data_identifier = event.GetEventDataIdentifier()
if event_data_identifier:
if not isinstance(event_data_identifier, identifiers.FakeIdentifier):
raise IOError('Unsupported event data identifier type: {0:s}'.format(
type(event_data_identifier)))
event = self._PrepareAttributeContainer(event)
self._events.append(event)
self.number_of_events += 1 |
def get_items_of_delivery_note_per_page(self, delivery_note_id, per_page=1000, page=1):
"""
Get items of delivery note per page
:param delivery_note_id: the delivery note id
:param per_page: How many objects per page. Default: 1000
:param page: Which page. Default: 1
:return: list
"""
return self._get_resource_per_page(
resource=DELIVERY_NOTE_ITEMS,
per_page=per_page,
page=page,
params={'delivery_note_id': delivery_note_id},
) | Get items of delivery note per page
:param delivery_note_id: the delivery note id
:param per_page: How many objects per page. Default: 1000
:param page: Which page. Default: 1
:return: list | Below is the the instruction that describes the task:
### Input:
Get items of delivery note per page
:param delivery_note_id: the delivery note id
:param per_page: How many objects per page. Default: 1000
:param page: Which page. Default: 1
:return: list
### Response:
def get_items_of_delivery_note_per_page(self, delivery_note_id, per_page=1000, page=1):
"""
Get items of delivery note per page
:param delivery_note_id: the delivery note id
:param per_page: How many objects per page. Default: 1000
:param page: Which page. Default: 1
:return: list
"""
return self._get_resource_per_page(
resource=DELIVERY_NOTE_ITEMS,
per_page=per_page,
page=page,
params={'delivery_note_id': delivery_note_id},
) |
def list(self, **kwargs):
""" https://api.slack.com/methods/groups.list
"""
if kwargs:
self.params.update(kwargs)
return FromUrl('https://slack.com/api/groups.list', self._requests)(data=self.params).get() | https://api.slack.com/methods/groups.list | Below is the the instruction that describes the task:
### Input:
https://api.slack.com/methods/groups.list
### Response:
def list(self, **kwargs):
""" https://api.slack.com/methods/groups.list
"""
if kwargs:
self.params.update(kwargs)
return FromUrl('https://slack.com/api/groups.list', self._requests)(data=self.params).get() |
def start(ctx, file): # pylint:disable=redefined-builtin
"""Start a tensorboard deployment for project/experiment/experiment group.
Project tensorboard will aggregate all experiments under the project.
Experiment group tensorboard will aggregate all experiments under the group.
Experiment tensorboard will show all metrics for an experiment.
Uses [Caching](/references/polyaxon-cli/#caching)
Example: using the default tensorflow image 1.4.1.
\b
```bash
$ polyaxon tensorboard start
```
Example: with custom image and resources
\b
```bash
$ polyaxon tensorboard start -f file -f file_override ...
```
Example: starting a tensorboard for an experiment group
\b
```bash
$ polyaxon tensorboard -g 1 start -f file
```
Example: starting a tensorboard for an experiment
\b
```bash
$ polyaxon tensorboard -xp 112 start -f file
```
"""
specification = None
job_config = None
if file:
specification = check_polyaxonfile(file, log=False).specification
if specification:
# pylint:disable=protected-access
check_polyaxonfile_kind(specification=specification, kind=specification._TENSORBOARD)
job_config = specification.parsed_data
user, project_name = get_project_or_local(ctx.obj.get('project'))
group = ctx.obj.get('group')
experiment = ctx.obj.get('experiment')
if experiment:
try:
response = PolyaxonClient().experiment.start_tensorboard(
username=user,
project_name=project_name,
experiment_id=experiment,
job_config=job_config)
obj = 'experiment `{}`'.format(experiment)
except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:
Printer.print_error('Could not start tensorboard experiment `{}`.'.format(experiment))
Printer.print_error('Error message `{}`.'.format(e))
sys.exit(1)
elif group:
try:
response = PolyaxonClient().experiment_group.start_tensorboard(
username=user,
project_name=project_name,
group_id=group,
job_config=job_config)
obj = 'group `{}`'.format(group)
except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:
Printer.print_error('Could not start tensorboard group `{}`.'.format(group))
Printer.print_error('Error message `{}`.'.format(e))
sys.exit(1)
else:
try:
response = PolyaxonClient().project.start_tensorboard(
username=user,
project_name=project_name,
job_config=job_config)
obj = 'project `{}`'.format(project_name)
except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:
Printer.print_error('Could not start tensorboard project `{}`.'.format(project_name))
Printer.print_error('Error message `{}`.'.format(e))
sys.exit(1)
if response.status_code == 200:
Printer.print_header("A tensorboard for this {} is already running on:".format(obj))
click.echo(get_tensorboard_url(user=user,
project_name=project_name,
experiment=experiment,
group=group))
sys.exit(0)
if response.status_code != 201:
Printer.print_error('Something went wrong, Tensorboard was not created.')
sys.exit(1)
Printer.print_success('Tensorboard is being deployed for {}'.format(obj))
clint.textui.puts("It may take some time before you can access tensorboard.\n")
clint.textui.puts("Your tensorboard will be available on:\n")
with clint.textui.indent(4):
clint.textui.puts(get_tensorboard_url(user, project_name, experiment, group)) | Start a tensorboard deployment for project/experiment/experiment group.
Project tensorboard will aggregate all experiments under the project.
Experiment group tensorboard will aggregate all experiments under the group.
Experiment tensorboard will show all metrics for an experiment.
Uses [Caching](/references/polyaxon-cli/#caching)
Example: using the default tensorflow image 1.4.1.
\b
```bash
$ polyaxon tensorboard start
```
Example: with custom image and resources
\b
```bash
$ polyaxon tensorboard start -f file -f file_override ...
```
Example: starting a tensorboard for an experiment group
\b
```bash
$ polyaxon tensorboard -g 1 start -f file
```
Example: starting a tensorboard for an experiment
\b
```bash
$ polyaxon tensorboard -xp 112 start -f file
``` | Below is the the instruction that describes the task:
### Input:
Start a tensorboard deployment for project/experiment/experiment group.
Project tensorboard will aggregate all experiments under the project.
Experiment group tensorboard will aggregate all experiments under the group.
Experiment tensorboard will show all metrics for an experiment.
Uses [Caching](/references/polyaxon-cli/#caching)
Example: using the default tensorflow image 1.4.1.
\b
```bash
$ polyaxon tensorboard start
```
Example: with custom image and resources
\b
```bash
$ polyaxon tensorboard start -f file -f file_override ...
```
Example: starting a tensorboard for an experiment group
\b
```bash
$ polyaxon tensorboard -g 1 start -f file
```
Example: starting a tensorboard for an experiment
\b
```bash
$ polyaxon tensorboard -xp 112 start -f file
```
### Response:
def start(ctx, file): # pylint:disable=redefined-builtin
"""Start a tensorboard deployment for project/experiment/experiment group.
Project tensorboard will aggregate all experiments under the project.
Experiment group tensorboard will aggregate all experiments under the group.
Experiment tensorboard will show all metrics for an experiment.
Uses [Caching](/references/polyaxon-cli/#caching)
Example: using the default tensorflow image 1.4.1.
\b
```bash
$ polyaxon tensorboard start
```
Example: with custom image and resources
\b
```bash
$ polyaxon tensorboard start -f file -f file_override ...
```
Example: starting a tensorboard for an experiment group
\b
```bash
$ polyaxon tensorboard -g 1 start -f file
```
Example: starting a tensorboard for an experiment
\b
```bash
$ polyaxon tensorboard -xp 112 start -f file
```
"""
specification = None
job_config = None
if file:
specification = check_polyaxonfile(file, log=False).specification
if specification:
# pylint:disable=protected-access
check_polyaxonfile_kind(specification=specification, kind=specification._TENSORBOARD)
job_config = specification.parsed_data
user, project_name = get_project_or_local(ctx.obj.get('project'))
group = ctx.obj.get('group')
experiment = ctx.obj.get('experiment')
if experiment:
try:
response = PolyaxonClient().experiment.start_tensorboard(
username=user,
project_name=project_name,
experiment_id=experiment,
job_config=job_config)
obj = 'experiment `{}`'.format(experiment)
except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:
Printer.print_error('Could not start tensorboard experiment `{}`.'.format(experiment))
Printer.print_error('Error message `{}`.'.format(e))
sys.exit(1)
elif group:
try:
response = PolyaxonClient().experiment_group.start_tensorboard(
username=user,
project_name=project_name,
group_id=group,
job_config=job_config)
obj = 'group `{}`'.format(group)
except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:
Printer.print_error('Could not start tensorboard group `{}`.'.format(group))
Printer.print_error('Error message `{}`.'.format(e))
sys.exit(1)
else:
try:
response = PolyaxonClient().project.start_tensorboard(
username=user,
project_name=project_name,
job_config=job_config)
obj = 'project `{}`'.format(project_name)
except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:
Printer.print_error('Could not start tensorboard project `{}`.'.format(project_name))
Printer.print_error('Error message `{}`.'.format(e))
sys.exit(1)
if response.status_code == 200:
Printer.print_header("A tensorboard for this {} is already running on:".format(obj))
click.echo(get_tensorboard_url(user=user,
project_name=project_name,
experiment=experiment,
group=group))
sys.exit(0)
if response.status_code != 201:
Printer.print_error('Something went wrong, Tensorboard was not created.')
sys.exit(1)
Printer.print_success('Tensorboard is being deployed for {}'.format(obj))
clint.textui.puts("It may take some time before you can access tensorboard.\n")
clint.textui.puts("Your tensorboard will be available on:\n")
with clint.textui.indent(4):
clint.textui.puts(get_tensorboard_url(user, project_name, experiment, group)) |
def read_channel_list_file(*source):
"""Read a `~gwpy.detector.ChannelList` from a Channel List File
"""
# read file(s)
config = configparser.ConfigParser(dict_type=OrderedDict)
source = file_list(source)
success_ = config.read(*source)
if len(success_) != len(source):
raise IOError("Failed to read one or more CLF files")
# create channel list
out = ChannelList()
out.source = source
append = out.append
# loop over all groups and channels
for group in config.sections():
params = OrderedDict(config.items(group))
channels = params.pop('channels').strip('\n').split('\n')
if 'flow' in params or 'fhigh' in params:
low = params.pop('flow', 0)
high = params.pop('fhigh', inf)
if isinstance(high, string_types) and high.lower() == 'nyquist':
high = inf
frange = float(low), float(high)
else:
frange = None
for channel in channels:
try:
match = CHANNEL_DEFINITION.match(channel).groupdict()
except AttributeError as exc:
exc.args = ('Cannot parse %r as channel list entry' % channel,)
raise
# remove Nones from match
match = dict((k, v) for k, v in match.items() if v is not None)
match.setdefault('safe', 'safe')
match.setdefault('fidelity', 'clean')
# create channel and copy group params
safe = match.get('safe', 'safe').lower() != 'unsafe'
channel = Channel(match.pop('name'), frequency_range=frange,
safe=safe, sample_rate=match.pop('sample_rate'))
channel.params = params.copy()
channel.params.update(match)
channel.group = group
# extract those params for which the Channel has an attribute
for key in ['frametype']:
setattr(channel, key, channel.params.pop(key, None))
append(channel)
return out | Read a `~gwpy.detector.ChannelList` from a Channel List File | Below is the the instruction that describes the task:
### Input:
Read a `~gwpy.detector.ChannelList` from a Channel List File
### Response:
def read_channel_list_file(*source):
"""Read a `~gwpy.detector.ChannelList` from a Channel List File
"""
# read file(s)
config = configparser.ConfigParser(dict_type=OrderedDict)
source = file_list(source)
success_ = config.read(*source)
if len(success_) != len(source):
raise IOError("Failed to read one or more CLF files")
# create channel list
out = ChannelList()
out.source = source
append = out.append
# loop over all groups and channels
for group in config.sections():
params = OrderedDict(config.items(group))
channels = params.pop('channels').strip('\n').split('\n')
if 'flow' in params or 'fhigh' in params:
low = params.pop('flow', 0)
high = params.pop('fhigh', inf)
if isinstance(high, string_types) and high.lower() == 'nyquist':
high = inf
frange = float(low), float(high)
else:
frange = None
for channel in channels:
try:
match = CHANNEL_DEFINITION.match(channel).groupdict()
except AttributeError as exc:
exc.args = ('Cannot parse %r as channel list entry' % channel,)
raise
# remove Nones from match
match = dict((k, v) for k, v in match.items() if v is not None)
match.setdefault('safe', 'safe')
match.setdefault('fidelity', 'clean')
# create channel and copy group params
safe = match.get('safe', 'safe').lower() != 'unsafe'
channel = Channel(match.pop('name'), frequency_range=frange,
safe=safe, sample_rate=match.pop('sample_rate'))
channel.params = params.copy()
channel.params.update(match)
channel.group = group
# extract those params for which the Channel has an attribute
for key in ['frametype']:
setattr(channel, key, channel.params.pop(key, None))
append(channel)
return out |
async def connect(self, host, port=DEFAULT_PORT):
"""
:py:func:`asyncio.coroutine`
Connect to server.
:param host: host name for connection
:type host: :py:class:`str`
:param port: port number for connection
:type port: :py:class:`int`
"""
await super().connect(host, port)
code, info = await self.command(None, "220", "120")
return info | :py:func:`asyncio.coroutine`
Connect to server.
:param host: host name for connection
:type host: :py:class:`str`
:param port: port number for connection
:type port: :py:class:`int` | Below is the the instruction that describes the task:
### Input:
:py:func:`asyncio.coroutine`
Connect to server.
:param host: host name for connection
:type host: :py:class:`str`
:param port: port number for connection
:type port: :py:class:`int`
### Response:
async def connect(self, host, port=DEFAULT_PORT):
"""
:py:func:`asyncio.coroutine`
Connect to server.
:param host: host name for connection
:type host: :py:class:`str`
:param port: port number for connection
:type port: :py:class:`int`
"""
await super().connect(host, port)
code, info = await self.command(None, "220", "120")
return info |
def values(self):
"""A :class:`werkzeug.datastructures.CombinedMultiDict` that combines
:attr:`args` and :attr:`form`."""
args = []
for d in self.args, self.form:
if not isinstance(d, MultiDict):
d = MultiDict(d)
args.append(d)
return CombinedMultiDict(args) | A :class:`werkzeug.datastructures.CombinedMultiDict` that combines
:attr:`args` and :attr:`form`. | Below is the the instruction that describes the task:
### Input:
A :class:`werkzeug.datastructures.CombinedMultiDict` that combines
:attr:`args` and :attr:`form`.
### Response:
def values(self):
"""A :class:`werkzeug.datastructures.CombinedMultiDict` that combines
:attr:`args` and :attr:`form`."""
args = []
for d in self.args, self.form:
if not isinstance(d, MultiDict):
d = MultiDict(d)
args.append(d)
return CombinedMultiDict(args) |
def cat_hist(val, shade, ax, **kwargs_shade):
"""Auxiliary function to plot discrete-violinplots."""
bins = get_bins(val)
binned_d, _ = np.histogram(val, bins=bins, normed=True)
bin_edges = np.linspace(np.min(val), np.max(val), len(bins))
centers = 0.5 * (bin_edges + np.roll(bin_edges, 1))[:-1]
heights = np.diff(bin_edges)
lefts = -0.5 * binned_d
ax.barh(centers, binned_d, height=heights, left=lefts, alpha=shade, **kwargs_shade) | Auxiliary function to plot discrete-violinplots. | Below is the the instruction that describes the task:
### Input:
Auxiliary function to plot discrete-violinplots.
### Response:
def cat_hist(val, shade, ax, **kwargs_shade):
"""Auxiliary function to plot discrete-violinplots."""
bins = get_bins(val)
binned_d, _ = np.histogram(val, bins=bins, normed=True)
bin_edges = np.linspace(np.min(val), np.max(val), len(bins))
centers = 0.5 * (bin_edges + np.roll(bin_edges, 1))[:-1]
heights = np.diff(bin_edges)
lefts = -0.5 * binned_d
ax.barh(centers, binned_d, height=heights, left=lefts, alpha=shade, **kwargs_shade) |
def term_all_jobs():
'''
Sends a termination signal (SIGTERM 15) to all currently running jobs
CLI Example:
.. code-block:: bash
salt '*' saltutil.term_all_jobs
'''
ret = []
for data in running():
ret.append(signal_job(data['jid'], signal.SIGTERM))
return ret | Sends a termination signal (SIGTERM 15) to all currently running jobs
CLI Example:
.. code-block:: bash
salt '*' saltutil.term_all_jobs | Below is the the instruction that describes the task:
### Input:
Sends a termination signal (SIGTERM 15) to all currently running jobs
CLI Example:
.. code-block:: bash
salt '*' saltutil.term_all_jobs
### Response:
def term_all_jobs():
'''
Sends a termination signal (SIGTERM 15) to all currently running jobs
CLI Example:
.. code-block:: bash
salt '*' saltutil.term_all_jobs
'''
ret = []
for data in running():
ret.append(signal_job(data['jid'], signal.SIGTERM))
return ret |
def convert_ensembl_to_entrez(self, ensembl):
"""Convert Ensembl Id to Entrez Gene Id"""
if 'ENST' in ensembl:
pass
else:
raise (IndexError)
# Submit resquest to NCBI eutils/Gene database
server = "http://eutils.ncbi.nlm.nih.gov/entrez/eutils/esearch.fcgi?" + self.options + "&db=gene&term={0}".format(
ensembl)
r = requests.get(server, headers={"Content-Type": "text/xml"})
if not r.ok:
r.raise_for_status()
sys.exit()
# Process Request
response = r.text
info = xmltodict.parse(response)
try:
geneId = info['eSearchResult']['IdList']['Id']
except TypeError:
raise (TypeError)
return geneId | Convert Ensembl Id to Entrez Gene Id | Below is the the instruction that describes the task:
### Input:
Convert Ensembl Id to Entrez Gene Id
### Response:
def convert_ensembl_to_entrez(self, ensembl):
"""Convert Ensembl Id to Entrez Gene Id"""
if 'ENST' in ensembl:
pass
else:
raise (IndexError)
# Submit resquest to NCBI eutils/Gene database
server = "http://eutils.ncbi.nlm.nih.gov/entrez/eutils/esearch.fcgi?" + self.options + "&db=gene&term={0}".format(
ensembl)
r = requests.get(server, headers={"Content-Type": "text/xml"})
if not r.ok:
r.raise_for_status()
sys.exit()
# Process Request
response = r.text
info = xmltodict.parse(response)
try:
geneId = info['eSearchResult']['IdList']['Id']
except TypeError:
raise (TypeError)
return geneId |
def get_metadata(self, refresh=True):
"""
return cached metadata by default
:param refresh: bool, returns up to date metadata if set to True
:return: dict
"""
if refresh or not self._metadata:
ident = self._id or self.name
if not ident:
raise ConuException(
"This container does not have a valid identifier.")
out = run_cmd(["machinectl", "--no-pager", "show", ident], return_output=True, ignore_status=True)
if "Could not get path to machine" in out:
self._metadata = {}
else:
self._metadata = convert_kv_to_dict(out)
return self._metadata | return cached metadata by default
:param refresh: bool, returns up to date metadata if set to True
:return: dict | Below is the the instruction that describes the task:
### Input:
return cached metadata by default
:param refresh: bool, returns up to date metadata if set to True
:return: dict
### Response:
def get_metadata(self, refresh=True):
"""
return cached metadata by default
:param refresh: bool, returns up to date metadata if set to True
:return: dict
"""
if refresh or not self._metadata:
ident = self._id or self.name
if not ident:
raise ConuException(
"This container does not have a valid identifier.")
out = run_cmd(["machinectl", "--no-pager", "show", ident], return_output=True, ignore_status=True)
if "Could not get path to machine" in out:
self._metadata = {}
else:
self._metadata = convert_kv_to_dict(out)
return self._metadata |
def threshold(self, front_thresh=0.0, rear_thresh=100.0):
"""Creates a new DepthImage by setting all depths less than
front_thresh and greater than rear_thresh to 0.
Parameters
----------
front_thresh : float
The lower-bound threshold.
rear_thresh : float
The upper bound threshold.
Returns
-------
:obj:`DepthImage`
A new DepthImage created from the thresholding operation.
"""
data = np.copy(self._data)
data[data < front_thresh] = 0.0
data[data > rear_thresh] = 0.0
return DepthImage(data, self._frame) | Creates a new DepthImage by setting all depths less than
front_thresh and greater than rear_thresh to 0.
Parameters
----------
front_thresh : float
The lower-bound threshold.
rear_thresh : float
The upper bound threshold.
Returns
-------
:obj:`DepthImage`
A new DepthImage created from the thresholding operation. | Below is the the instruction that describes the task:
### Input:
Creates a new DepthImage by setting all depths less than
front_thresh and greater than rear_thresh to 0.
Parameters
----------
front_thresh : float
The lower-bound threshold.
rear_thresh : float
The upper bound threshold.
Returns
-------
:obj:`DepthImage`
A new DepthImage created from the thresholding operation.
### Response:
def threshold(self, front_thresh=0.0, rear_thresh=100.0):
"""Creates a new DepthImage by setting all depths less than
front_thresh and greater than rear_thresh to 0.
Parameters
----------
front_thresh : float
The lower-bound threshold.
rear_thresh : float
The upper bound threshold.
Returns
-------
:obj:`DepthImage`
A new DepthImage created from the thresholding operation.
"""
data = np.copy(self._data)
data[data < front_thresh] = 0.0
data[data > rear_thresh] = 0.0
return DepthImage(data, self._frame) |
def options(self, parser, env=os.environ):
"Add options to nosetests."
parser.add_option("--%s-record" % self.name,
action="store",
metavar="FILE",
dest="record_filename",
help="Record actions to this file.")
parser.add_option("--%s-playback" % self.name,
action="store",
metavar="FILE",
dest="playback_filename",
help="Playback actions from this file.") | Add options to nosetests. | Below is the the instruction that describes the task:
### Input:
Add options to nosetests.
### Response:
def options(self, parser, env=os.environ):
"Add options to nosetests."
parser.add_option("--%s-record" % self.name,
action="store",
metavar="FILE",
dest="record_filename",
help="Record actions to this file.")
parser.add_option("--%s-playback" % self.name,
action="store",
metavar="FILE",
dest="playback_filename",
help="Playback actions from this file.") |
def getStrips(self, scraperobj):
"""Download comic strips."""
with lock:
host_lock = get_host_lock(scraperobj.url)
with host_lock:
self._getStrips(scraperobj) | Download comic strips. | Below is the the instruction that describes the task:
### Input:
Download comic strips.
### Response:
def getStrips(self, scraperobj):
"""Download comic strips."""
with lock:
host_lock = get_host_lock(scraperobj.url)
with host_lock:
self._getStrips(scraperobj) |
def request(self, method, suffix, data):
"""
:param method: str, http method ["GET","POST","PUT"]
:param suffix: the url suffix
:param data:
:return:
"""
url = self.site_url + suffix
response = self.session.request(method, url, data=data)
if response.status_code == 200:
json_obj = response.json()
if isinstance(json_obj, dict) and json_obj.get("error_code"):
raise WeiboOauth2Error(
json_obj.get("error_code"),
json_obj.get("error"),
json_obj.get('error_description')
)
else:
return json_obj
else:
raise WeiboRequestError(
"Weibo API request error: status code: {code} url:{url} ->"
" method:{method}: data={data}".format(
code=response.status_code,
url=response.url,
method=method,
data=data
)
) | :param method: str, http method ["GET","POST","PUT"]
:param suffix: the url suffix
:param data:
:return: | Below is the the instruction that describes the task:
### Input:
:param method: str, http method ["GET","POST","PUT"]
:param suffix: the url suffix
:param data:
:return:
### Response:
def request(self, method, suffix, data):
"""
:param method: str, http method ["GET","POST","PUT"]
:param suffix: the url suffix
:param data:
:return:
"""
url = self.site_url + suffix
response = self.session.request(method, url, data=data)
if response.status_code == 200:
json_obj = response.json()
if isinstance(json_obj, dict) and json_obj.get("error_code"):
raise WeiboOauth2Error(
json_obj.get("error_code"),
json_obj.get("error"),
json_obj.get('error_description')
)
else:
return json_obj
else:
raise WeiboRequestError(
"Weibo API request error: status code: {code} url:{url} ->"
" method:{method}: data={data}".format(
code=response.status_code,
url=response.url,
method=method,
data=data
)
) |
def lat_from_pole(ref_loc_lon, ref_loc_lat, pole_plon, pole_plat):
"""
Calculate paleolatitude for a reference location based on a paleomagnetic pole
Required Parameters
----------
ref_loc_lon: longitude of reference location in degrees
ref_loc_lat: latitude of reference location
pole_plon: paleopole longitude in degrees
pole_plat: paleopole latitude in degrees
"""
ref_loc = (ref_loc_lon, ref_loc_lat)
pole = (pole_plon, pole_plat)
paleo_lat = 90 - pmag.angle(pole, ref_loc)
return float(paleo_lat) | Calculate paleolatitude for a reference location based on a paleomagnetic pole
Required Parameters
----------
ref_loc_lon: longitude of reference location in degrees
ref_loc_lat: latitude of reference location
pole_plon: paleopole longitude in degrees
pole_plat: paleopole latitude in degrees | Below is the the instruction that describes the task:
### Input:
Calculate paleolatitude for a reference location based on a paleomagnetic pole
Required Parameters
----------
ref_loc_lon: longitude of reference location in degrees
ref_loc_lat: latitude of reference location
pole_plon: paleopole longitude in degrees
pole_plat: paleopole latitude in degrees
### Response:
def lat_from_pole(ref_loc_lon, ref_loc_lat, pole_plon, pole_plat):
"""
Calculate paleolatitude for a reference location based on a paleomagnetic pole
Required Parameters
----------
ref_loc_lon: longitude of reference location in degrees
ref_loc_lat: latitude of reference location
pole_plon: paleopole longitude in degrees
pole_plat: paleopole latitude in degrees
"""
ref_loc = (ref_loc_lon, ref_loc_lat)
pole = (pole_plon, pole_plat)
paleo_lat = 90 - pmag.angle(pole, ref_loc)
return float(paleo_lat) |
def parse_file(self, sourcepath):
"""Parse an object-per-line JSON file into a log data dict"""
# Open input file and read JSON array:
with open(sourcepath, 'r') as logfile:
jsonlist = logfile.readlines()
# Set our attributes for this entry and add it to data.entries:
data = {}
data['entries'] = []
for line in jsonlist:
entry = self.parse_line(line)
data['entries'].append(entry)
if self.tzone:
for e in data['entries']:
e['tzone'] = self.tzone
# Return the parsed data
return data | Parse an object-per-line JSON file into a log data dict | Below is the the instruction that describes the task:
### Input:
Parse an object-per-line JSON file into a log data dict
### Response:
def parse_file(self, sourcepath):
"""Parse an object-per-line JSON file into a log data dict"""
# Open input file and read JSON array:
with open(sourcepath, 'r') as logfile:
jsonlist = logfile.readlines()
# Set our attributes for this entry and add it to data.entries:
data = {}
data['entries'] = []
for line in jsonlist:
entry = self.parse_line(line)
data['entries'].append(entry)
if self.tzone:
for e in data['entries']:
e['tzone'] = self.tzone
# Return the parsed data
return data |
def _read_mode_route(self, size, kind):
"""Read options with route data.
Positional arguments:
* size - int, length of option
* kind - int, 7/131/137 (RR/LSR/SSR)
Returns:
* dict -- extracted option with route data
Structure of these options:
* [RFC 791] Loose Source Route
+--------+--------+--------+---------//--------+
|10000011| length | pointer| route data |
+--------+--------+--------+---------//--------+
* [RFC 791] Strict Source Route
+--------+--------+--------+---------//--------+
|10001001| length | pointer| route data |
+--------+--------+--------+---------//--------+
* [RFC 791] Record Route
+--------+--------+--------+---------//--------+
|00000111| length | pointer| route data |
+--------+--------+--------+---------//--------+
Octets Bits Name Description
0 0 ip.opt.kind Kind (7/131/137)
0 0 ip.opt.type.copy Copied Flag (0)
0 1 ip.opt.type.class Option Class (0/1)
0 3 ip.opt.type.number Option Number (3/7/9)
1 8 ip.opt.length Length
2 16 ip.opt.pointer Pointer (≥4)
3 24 ip.opt.data Route Data
"""
if size < 3 or (size - 3) % 4 != 0:
raise ProtocolError(f'{self.alias}: [Optno {kind}] invalid format')
_rptr = self._read_unpack(1)
if _rptr < 4:
raise ProtocolError(f'{self.alias}: [Optno {kind}] invalid format')
data = dict(
kind=kind,
type=self._read_opt_type(kind),
length=size,
pointer=_rptr,
)
counter = 4
address = list()
endpoint = min(_rptr, size)
while counter < endpoint:
counter += 4
address.append(self._read_ipv4_addr())
data['ip'] = address or None
return data | Read options with route data.
Positional arguments:
* size - int, length of option
* kind - int, 7/131/137 (RR/LSR/SSR)
Returns:
* dict -- extracted option with route data
Structure of these options:
* [RFC 791] Loose Source Route
+--------+--------+--------+---------//--------+
|10000011| length | pointer| route data |
+--------+--------+--------+---------//--------+
* [RFC 791] Strict Source Route
+--------+--------+--------+---------//--------+
|10001001| length | pointer| route data |
+--------+--------+--------+---------//--------+
* [RFC 791] Record Route
+--------+--------+--------+---------//--------+
|00000111| length | pointer| route data |
+--------+--------+--------+---------//--------+
Octets Bits Name Description
0 0 ip.opt.kind Kind (7/131/137)
0 0 ip.opt.type.copy Copied Flag (0)
0 1 ip.opt.type.class Option Class (0/1)
0 3 ip.opt.type.number Option Number (3/7/9)
1 8 ip.opt.length Length
2 16 ip.opt.pointer Pointer (≥4)
3 24 ip.opt.data Route Data | Below is the the instruction that describes the task:
### Input:
Read options with route data.
Positional arguments:
* size - int, length of option
* kind - int, 7/131/137 (RR/LSR/SSR)
Returns:
* dict -- extracted option with route data
Structure of these options:
* [RFC 791] Loose Source Route
+--------+--------+--------+---------//--------+
|10000011| length | pointer| route data |
+--------+--------+--------+---------//--------+
* [RFC 791] Strict Source Route
+--------+--------+--------+---------//--------+
|10001001| length | pointer| route data |
+--------+--------+--------+---------//--------+
* [RFC 791] Record Route
+--------+--------+--------+---------//--------+
|00000111| length | pointer| route data |
+--------+--------+--------+---------//--------+
Octets Bits Name Description
0 0 ip.opt.kind Kind (7/131/137)
0 0 ip.opt.type.copy Copied Flag (0)
0 1 ip.opt.type.class Option Class (0/1)
0 3 ip.opt.type.number Option Number (3/7/9)
1 8 ip.opt.length Length
2 16 ip.opt.pointer Pointer (≥4)
3 24 ip.opt.data Route Data
### Response:
def _read_mode_route(self, size, kind):
"""Read options with route data.
Positional arguments:
* size - int, length of option
* kind - int, 7/131/137 (RR/LSR/SSR)
Returns:
* dict -- extracted option with route data
Structure of these options:
* [RFC 791] Loose Source Route
+--------+--------+--------+---------//--------+
|10000011| length | pointer| route data |
+--------+--------+--------+---------//--------+
* [RFC 791] Strict Source Route
+--------+--------+--------+---------//--------+
|10001001| length | pointer| route data |
+--------+--------+--------+---------//--------+
* [RFC 791] Record Route
+--------+--------+--------+---------//--------+
|00000111| length | pointer| route data |
+--------+--------+--------+---------//--------+
Octets Bits Name Description
0 0 ip.opt.kind Kind (7/131/137)
0 0 ip.opt.type.copy Copied Flag (0)
0 1 ip.opt.type.class Option Class (0/1)
0 3 ip.opt.type.number Option Number (3/7/9)
1 8 ip.opt.length Length
2 16 ip.opt.pointer Pointer (≥4)
3 24 ip.opt.data Route Data
"""
if size < 3 or (size - 3) % 4 != 0:
raise ProtocolError(f'{self.alias}: [Optno {kind}] invalid format')
_rptr = self._read_unpack(1)
if _rptr < 4:
raise ProtocolError(f'{self.alias}: [Optno {kind}] invalid format')
data = dict(
kind=kind,
type=self._read_opt_type(kind),
length=size,
pointer=_rptr,
)
counter = 4
address = list()
endpoint = min(_rptr, size)
while counter < endpoint:
counter += 4
address.append(self._read_ipv4_addr())
data['ip'] = address or None
return data |
def watershed_delineation(np, dem, outlet_file=None, thresh=0, singlebasin=False,
workingdir=None, mpi_bin=None, bin_dir=None,
logfile=None, runtime_file=None, hostfile=None):
"""Watershed Delineation."""
# 1. Check directories
if not os.path.exists(dem):
TauDEM.error('DEM: %s is not existed!' % dem)
dem = os.path.abspath(dem)
if workingdir is None:
workingdir = os.path.dirname(dem)
namecfg = TauDEMFilesUtils(workingdir)
workingdir = namecfg.workspace
UtilClass.mkdir(workingdir)
# 2. Check log file
if logfile is not None and FileClass.is_file_exists(logfile):
os.remove(logfile)
# 3. Get predefined intermediate file names
filled_dem = namecfg.filldem
flow_dir = namecfg.d8flow
slope = namecfg.slp
flow_dir_dinf = namecfg.dinf
slope_dinf = namecfg.dinf_slp
dir_code_dinf = namecfg.dinf_d8dir
weight_dinf = namecfg.dinf_weight
acc = namecfg.d8acc
stream_raster = namecfg.stream_raster
default_outlet = namecfg.outlet_pre
modified_outlet = namecfg.outlet_m
stream_skeleton = namecfg.stream_pd
acc_with_weight = namecfg.d8acc_weight
stream_order = namecfg.stream_order
ch_network = namecfg.channel_net
ch_coord = namecfg.channel_coord
stream_net = namecfg.streamnet_shp
subbasin = namecfg.subbsn
dist2_stream_d8 = namecfg.dist2stream_d8
# 4. perform calculation
UtilClass.writelog(logfile, '[Output] %d..., %s' % (10, 'pitremove DEM...'), 'a')
TauDEM.pitremove(np, dem, filled_dem, workingdir, mpi_bin, bin_dir,
log_file=logfile, runtime_file=runtime_file, hostfile=hostfile)
UtilClass.writelog(logfile, '[Output] %d..., %s' %
(20, 'Calculating D8 and Dinf flow direction...'), 'a')
TauDEM.d8flowdir(np, filled_dem, flow_dir, slope, workingdir,
mpi_bin, bin_dir, log_file=logfile,
runtime_file=runtime_file, hostfile=hostfile)
TauDEM.dinfflowdir(np, filled_dem, flow_dir_dinf, slope_dinf, workingdir,
mpi_bin, bin_dir, log_file=logfile,
runtime_file=runtime_file, hostfile=hostfile)
DinfUtil.output_compressed_dinf(flow_dir_dinf, dir_code_dinf, weight_dinf)
UtilClass.writelog(logfile, '[Output] %d..., %s' % (30, 'D8 flow accumulation...'), 'a')
TauDEM.aread8(np, flow_dir, acc, None, None, False, workingdir, mpi_bin, bin_dir,
log_file=logfile, runtime_file=runtime_file, hostfile=hostfile)
UtilClass.writelog(logfile, '[Output] %d..., %s' %
(40, 'Generating stream raster initially...'), 'a')
min_accum, max_accum, mean_accum, std_accum = RasterUtilClass.raster_statistics(acc)
TauDEM.threshold(np, acc, stream_raster, mean_accum, workingdir,
mpi_bin, bin_dir, log_file=logfile,
runtime_file=runtime_file, hostfile=hostfile)
UtilClass.writelog(logfile, '[Output] %d..., %s' % (50, 'Moving outlet to stream...'), 'a')
if outlet_file is None:
outlet_file = default_outlet
TauDEM.connectdown(np, flow_dir, acc, outlet_file, wtsd=None,
workingdir=workingdir, mpiexedir=mpi_bin, exedir=bin_dir,
log_file=logfile, runtime_file=runtime_file, hostfile=hostfile)
TauDEM.moveoutletstostrm(np, flow_dir, stream_raster, outlet_file,
modified_outlet, workingdir, mpi_bin, bin_dir,
log_file=logfile, runtime_file=runtime_file, hostfile=hostfile)
UtilClass.writelog(logfile, '[Output] %d..., %s' %
(60, 'Generating stream skeleton...'), 'a')
TauDEM.peukerdouglas(np, filled_dem, stream_skeleton, workingdir,
mpi_bin, bin_dir, log_file=logfile,
runtime_file=runtime_file, hostfile=hostfile)
UtilClass.writelog(logfile, '[Output] %d..., %s' %
(70, 'Flow accumulation with outlet...'), 'a')
tmp_outlet = None
if singlebasin:
tmp_outlet = modified_outlet
TauDEM.aread8(np, flow_dir, acc_with_weight, tmp_outlet, stream_skeleton, False,
workingdir, mpi_bin, bin_dir, log_file=logfile,
runtime_file=runtime_file, hostfile=hostfile)
if thresh <= 0: # find the optimal threshold using dropanalysis function
UtilClass.writelog(logfile, '[Output] %d..., %s' %
(75, 'Drop analysis to select optimal threshold...'), 'a')
min_accum, max_accum, mean_accum, std_accum = \
RasterUtilClass.raster_statistics(acc_with_weight)
if mean_accum - std_accum < 0:
minthresh = mean_accum
else:
minthresh = mean_accum - std_accum
maxthresh = mean_accum + std_accum
numthresh = 20
logspace = 'true'
drp_file = namecfg.drptxt
TauDEM.dropanalysis(np, filled_dem, flow_dir, acc_with_weight,
acc_with_weight, modified_outlet, minthresh, maxthresh,
numthresh, logspace, drp_file, workingdir, mpi_bin, bin_dir,
log_file=logfile, runtime_file=runtime_file, hostfile=hostfile)
if not FileClass.is_file_exists(drp_file):
raise RuntimeError('Dropanalysis failed and drp.txt was not created!')
with open(drp_file, 'r', encoding='utf-8') as drpf:
temp_contents = drpf.read()
(beg, thresh) = temp_contents.rsplit(' ', 1)
print(thresh)
UtilClass.writelog(logfile, '[Output] %d..., %s' % (80, 'Generating stream raster...'), 'a')
TauDEM.threshold(np, acc_with_weight, stream_raster, float(thresh),
workingdir, mpi_bin, bin_dir, log_file=logfile,
runtime_file=runtime_file, hostfile=hostfile)
UtilClass.writelog(logfile, '[Output] %d..., %s' % (90, 'Generating stream net...'), 'a')
TauDEM.streamnet(np, filled_dem, flow_dir, acc_with_weight, stream_raster,
modified_outlet, stream_order, ch_network,
ch_coord, stream_net, subbasin, workingdir, mpi_bin, bin_dir,
log_file=logfile, runtime_file=runtime_file, hostfile=hostfile)
UtilClass.writelog(logfile, '[Output] %d..., %s' %
(95, 'Calculating distance to stream (D8)...'), 'a')
TauDEM.d8hdisttostrm(np, flow_dir, stream_raster, dist2_stream_d8, 1,
workingdir, mpi_bin, bin_dir, log_file=logfile,
runtime_file=runtime_file, hostfile=hostfile)
UtilClass.writelog(logfile, '[Output] %d.., %s' %
(100, 'Original subbasin delineation is finished!'), 'a') | Watershed Delineation. | Below is the the instruction that describes the task:
### Input:
Watershed Delineation.
### Response:
def watershed_delineation(np, dem, outlet_file=None, thresh=0, singlebasin=False,
workingdir=None, mpi_bin=None, bin_dir=None,
logfile=None, runtime_file=None, hostfile=None):
"""Watershed Delineation."""
# 1. Check directories
if not os.path.exists(dem):
TauDEM.error('DEM: %s is not existed!' % dem)
dem = os.path.abspath(dem)
if workingdir is None:
workingdir = os.path.dirname(dem)
namecfg = TauDEMFilesUtils(workingdir)
workingdir = namecfg.workspace
UtilClass.mkdir(workingdir)
# 2. Check log file
if logfile is not None and FileClass.is_file_exists(logfile):
os.remove(logfile)
# 3. Get predefined intermediate file names
filled_dem = namecfg.filldem
flow_dir = namecfg.d8flow
slope = namecfg.slp
flow_dir_dinf = namecfg.dinf
slope_dinf = namecfg.dinf_slp
dir_code_dinf = namecfg.dinf_d8dir
weight_dinf = namecfg.dinf_weight
acc = namecfg.d8acc
stream_raster = namecfg.stream_raster
default_outlet = namecfg.outlet_pre
modified_outlet = namecfg.outlet_m
stream_skeleton = namecfg.stream_pd
acc_with_weight = namecfg.d8acc_weight
stream_order = namecfg.stream_order
ch_network = namecfg.channel_net
ch_coord = namecfg.channel_coord
stream_net = namecfg.streamnet_shp
subbasin = namecfg.subbsn
dist2_stream_d8 = namecfg.dist2stream_d8
# 4. perform calculation
UtilClass.writelog(logfile, '[Output] %d..., %s' % (10, 'pitremove DEM...'), 'a')
TauDEM.pitremove(np, dem, filled_dem, workingdir, mpi_bin, bin_dir,
log_file=logfile, runtime_file=runtime_file, hostfile=hostfile)
UtilClass.writelog(logfile, '[Output] %d..., %s' %
(20, 'Calculating D8 and Dinf flow direction...'), 'a')
TauDEM.d8flowdir(np, filled_dem, flow_dir, slope, workingdir,
mpi_bin, bin_dir, log_file=logfile,
runtime_file=runtime_file, hostfile=hostfile)
TauDEM.dinfflowdir(np, filled_dem, flow_dir_dinf, slope_dinf, workingdir,
mpi_bin, bin_dir, log_file=logfile,
runtime_file=runtime_file, hostfile=hostfile)
DinfUtil.output_compressed_dinf(flow_dir_dinf, dir_code_dinf, weight_dinf)
UtilClass.writelog(logfile, '[Output] %d..., %s' % (30, 'D8 flow accumulation...'), 'a')
TauDEM.aread8(np, flow_dir, acc, None, None, False, workingdir, mpi_bin, bin_dir,
log_file=logfile, runtime_file=runtime_file, hostfile=hostfile)
UtilClass.writelog(logfile, '[Output] %d..., %s' %
(40, 'Generating stream raster initially...'), 'a')
min_accum, max_accum, mean_accum, std_accum = RasterUtilClass.raster_statistics(acc)
TauDEM.threshold(np, acc, stream_raster, mean_accum, workingdir,
mpi_bin, bin_dir, log_file=logfile,
runtime_file=runtime_file, hostfile=hostfile)
UtilClass.writelog(logfile, '[Output] %d..., %s' % (50, 'Moving outlet to stream...'), 'a')
if outlet_file is None:
outlet_file = default_outlet
TauDEM.connectdown(np, flow_dir, acc, outlet_file, wtsd=None,
workingdir=workingdir, mpiexedir=mpi_bin, exedir=bin_dir,
log_file=logfile, runtime_file=runtime_file, hostfile=hostfile)
TauDEM.moveoutletstostrm(np, flow_dir, stream_raster, outlet_file,
modified_outlet, workingdir, mpi_bin, bin_dir,
log_file=logfile, runtime_file=runtime_file, hostfile=hostfile)
UtilClass.writelog(logfile, '[Output] %d..., %s' %
(60, 'Generating stream skeleton...'), 'a')
TauDEM.peukerdouglas(np, filled_dem, stream_skeleton, workingdir,
mpi_bin, bin_dir, log_file=logfile,
runtime_file=runtime_file, hostfile=hostfile)
UtilClass.writelog(logfile, '[Output] %d..., %s' %
(70, 'Flow accumulation with outlet...'), 'a')
tmp_outlet = None
if singlebasin:
tmp_outlet = modified_outlet
TauDEM.aread8(np, flow_dir, acc_with_weight, tmp_outlet, stream_skeleton, False,
workingdir, mpi_bin, bin_dir, log_file=logfile,
runtime_file=runtime_file, hostfile=hostfile)
if thresh <= 0: # find the optimal threshold using dropanalysis function
UtilClass.writelog(logfile, '[Output] %d..., %s' %
(75, 'Drop analysis to select optimal threshold...'), 'a')
min_accum, max_accum, mean_accum, std_accum = \
RasterUtilClass.raster_statistics(acc_with_weight)
if mean_accum - std_accum < 0:
minthresh = mean_accum
else:
minthresh = mean_accum - std_accum
maxthresh = mean_accum + std_accum
numthresh = 20
logspace = 'true'
drp_file = namecfg.drptxt
TauDEM.dropanalysis(np, filled_dem, flow_dir, acc_with_weight,
acc_with_weight, modified_outlet, minthresh, maxthresh,
numthresh, logspace, drp_file, workingdir, mpi_bin, bin_dir,
log_file=logfile, runtime_file=runtime_file, hostfile=hostfile)
if not FileClass.is_file_exists(drp_file):
raise RuntimeError('Dropanalysis failed and drp.txt was not created!')
with open(drp_file, 'r', encoding='utf-8') as drpf:
temp_contents = drpf.read()
(beg, thresh) = temp_contents.rsplit(' ', 1)
print(thresh)
UtilClass.writelog(logfile, '[Output] %d..., %s' % (80, 'Generating stream raster...'), 'a')
TauDEM.threshold(np, acc_with_weight, stream_raster, float(thresh),
workingdir, mpi_bin, bin_dir, log_file=logfile,
runtime_file=runtime_file, hostfile=hostfile)
UtilClass.writelog(logfile, '[Output] %d..., %s' % (90, 'Generating stream net...'), 'a')
TauDEM.streamnet(np, filled_dem, flow_dir, acc_with_weight, stream_raster,
modified_outlet, stream_order, ch_network,
ch_coord, stream_net, subbasin, workingdir, mpi_bin, bin_dir,
log_file=logfile, runtime_file=runtime_file, hostfile=hostfile)
UtilClass.writelog(logfile, '[Output] %d..., %s' %
(95, 'Calculating distance to stream (D8)...'), 'a')
TauDEM.d8hdisttostrm(np, flow_dir, stream_raster, dist2_stream_d8, 1,
workingdir, mpi_bin, bin_dir, log_file=logfile,
runtime_file=runtime_file, hostfile=hostfile)
UtilClass.writelog(logfile, '[Output] %d.., %s' %
(100, 'Original subbasin delineation is finished!'), 'a') |
def _set_enabled_zone(self, v, load=False):
"""
Setter method for enabled_zone, mapped from YANG variable /brocade_zone_rpc/show_zoning_enabled_configuration/output/enabled_configuration/enabled_zone (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_enabled_zone is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_enabled_zone() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("zone_name",enabled_zone.enabled_zone, yang_name="enabled-zone", rest_name="enabled-zone", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='zone-name', extensions={u'tailf-common': {u'info': u'List of enabled Zones'}}), is_container='list', yang_name="enabled-zone", rest_name="enabled-zone", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'info': u'List of enabled Zones'}}, namespace='urn:brocade.com:mgmt:brocade-zone', defining_module='brocade-zone', yang_type='list', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """enabled_zone must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("zone_name",enabled_zone.enabled_zone, yang_name="enabled-zone", rest_name="enabled-zone", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='zone-name', extensions={u'tailf-common': {u'info': u'List of enabled Zones'}}), is_container='list', yang_name="enabled-zone", rest_name="enabled-zone", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'info': u'List of enabled Zones'}}, namespace='urn:brocade.com:mgmt:brocade-zone', defining_module='brocade-zone', yang_type='list', is_config=True)""",
})
self.__enabled_zone = t
if hasattr(self, '_set'):
self._set() | Setter method for enabled_zone, mapped from YANG variable /brocade_zone_rpc/show_zoning_enabled_configuration/output/enabled_configuration/enabled_zone (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_enabled_zone is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_enabled_zone() directly. | Below is the the instruction that describes the task:
### Input:
Setter method for enabled_zone, mapped from YANG variable /brocade_zone_rpc/show_zoning_enabled_configuration/output/enabled_configuration/enabled_zone (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_enabled_zone is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_enabled_zone() directly.
### Response:
def _set_enabled_zone(self, v, load=False):
"""
Setter method for enabled_zone, mapped from YANG variable /brocade_zone_rpc/show_zoning_enabled_configuration/output/enabled_configuration/enabled_zone (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_enabled_zone is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_enabled_zone() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("zone_name",enabled_zone.enabled_zone, yang_name="enabled-zone", rest_name="enabled-zone", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='zone-name', extensions={u'tailf-common': {u'info': u'List of enabled Zones'}}), is_container='list', yang_name="enabled-zone", rest_name="enabled-zone", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'info': u'List of enabled Zones'}}, namespace='urn:brocade.com:mgmt:brocade-zone', defining_module='brocade-zone', yang_type='list', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """enabled_zone must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("zone_name",enabled_zone.enabled_zone, yang_name="enabled-zone", rest_name="enabled-zone", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='zone-name', extensions={u'tailf-common': {u'info': u'List of enabled Zones'}}), is_container='list', yang_name="enabled-zone", rest_name="enabled-zone", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'info': u'List of enabled Zones'}}, namespace='urn:brocade.com:mgmt:brocade-zone', defining_module='brocade-zone', yang_type='list', is_config=True)""",
})
self.__enabled_zone = t
if hasattr(self, '_set'):
self._set() |
def apply_config_defaults(parser, args, root):
"""Update the parser's defaults from either the arguments' config_arg or
the config files given in config_files(root)."""
if root is None:
try:
from pep8radius.vcs import VersionControl
root = VersionControl.which().root_dir()
except NotImplementedError:
pass # don't update local, could be using as module
config = SafeConfigParser()
config.read(args.global_config)
if root and not args.ignore_local_config:
config.read(local_config_files(root))
try:
defaults = dict((k.lstrip('-').replace('-', '_'), v)
for k, v in config.items("pep8"))
parser.set_defaults(**defaults)
except NoSectionError:
pass # just do nothing, potentially this could raise ?
return parser | Update the parser's defaults from either the arguments' config_arg or
the config files given in config_files(root). | Below is the the instruction that describes the task:
### Input:
Update the parser's defaults from either the arguments' config_arg or
the config files given in config_files(root).
### Response:
def apply_config_defaults(parser, args, root):
"""Update the parser's defaults from either the arguments' config_arg or
the config files given in config_files(root)."""
if root is None:
try:
from pep8radius.vcs import VersionControl
root = VersionControl.which().root_dir()
except NotImplementedError:
pass # don't update local, could be using as module
config = SafeConfigParser()
config.read(args.global_config)
if root and not args.ignore_local_config:
config.read(local_config_files(root))
try:
defaults = dict((k.lstrip('-').replace('-', '_'), v)
for k, v in config.items("pep8"))
parser.set_defaults(**defaults)
except NoSectionError:
pass # just do nothing, potentially this could raise ?
return parser |
def atIndices(indexable, indices, default=__unique):
r"""Return a list of items in `indexable` at positions `indices`.
Examples:
>>> atIndices([1,2,3], [1,1,0])
[2, 2, 1]
>>> atIndices([1,2,3], [1,1,0,4], 'default')
[2, 2, 1, 'default']
>>> atIndices({'a':3, 'b':0}, ['a'])
[3]
"""
if default is __unique:
return [indexable[i] for i in indices]
else:
res = []
for i in indices:
try:
res.append(indexable[i])
except (IndexError, KeyError):
res.append(default)
return res | r"""Return a list of items in `indexable` at positions `indices`.
Examples:
>>> atIndices([1,2,3], [1,1,0])
[2, 2, 1]
>>> atIndices([1,2,3], [1,1,0,4], 'default')
[2, 2, 1, 'default']
>>> atIndices({'a':3, 'b':0}, ['a'])
[3] | Below is the the instruction that describes the task:
### Input:
r"""Return a list of items in `indexable` at positions `indices`.
Examples:
>>> atIndices([1,2,3], [1,1,0])
[2, 2, 1]
>>> atIndices([1,2,3], [1,1,0,4], 'default')
[2, 2, 1, 'default']
>>> atIndices({'a':3, 'b':0}, ['a'])
[3]
### Response:
def atIndices(indexable, indices, default=__unique):
r"""Return a list of items in `indexable` at positions `indices`.
Examples:
>>> atIndices([1,2,3], [1,1,0])
[2, 2, 1]
>>> atIndices([1,2,3], [1,1,0,4], 'default')
[2, 2, 1, 'default']
>>> atIndices({'a':3, 'b':0}, ['a'])
[3]
"""
if default is __unique:
return [indexable[i] for i in indices]
else:
res = []
for i in indices:
try:
res.append(indexable[i])
except (IndexError, KeyError):
res.append(default)
return res |
def transform(self, pyobject):
"""Transform a `PyObject` to textual form"""
if pyobject is None:
return ('none',)
object_type = type(pyobject)
try:
method = getattr(self, object_type.__name__ + '_to_textual')
return method(pyobject)
except AttributeError:
return ('unknown',) | Transform a `PyObject` to textual form | Below is the the instruction that describes the task:
### Input:
Transform a `PyObject` to textual form
### Response:
def transform(self, pyobject):
"""Transform a `PyObject` to textual form"""
if pyobject is None:
return ('none',)
object_type = type(pyobject)
try:
method = getattr(self, object_type.__name__ + '_to_textual')
return method(pyobject)
except AttributeError:
return ('unknown',) |
def create_statement(self, connection_id):
"""Creates a new statement.
:param connection_id:
ID of the current connection.
:returns:
New statement ID.
"""
request = requests_pb2.CreateStatementRequest()
request.connection_id = connection_id
response_data = self._apply(request)
response = responses_pb2.CreateStatementResponse()
response.ParseFromString(response_data)
return response.statement_id | Creates a new statement.
:param connection_id:
ID of the current connection.
:returns:
New statement ID. | Below is the the instruction that describes the task:
### Input:
Creates a new statement.
:param connection_id:
ID of the current connection.
:returns:
New statement ID.
### Response:
def create_statement(self, connection_id):
"""Creates a new statement.
:param connection_id:
ID of the current connection.
:returns:
New statement ID.
"""
request = requests_pb2.CreateStatementRequest()
request.connection_id = connection_id
response_data = self._apply(request)
response = responses_pb2.CreateStatementResponse()
response.ParseFromString(response_data)
return response.statement_id |
def ckw05(handle, subtype, degree, begtim, endtim, inst, ref, avflag, segid,
sclkdp, packts, rate, nints, starts):
"""
Write a type 5 segment to a CK file.
https://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/ckw05_c.html
:param handle: Handle of an open CK file.
:type handle: int
:param subtype: CK type 5 subtype code. Can be: 0, 1, 2, 3 see naif docs via link above.
:type subtype: int
:param degree: Degree of interpolating polynomials.
:type degree: int
:param begtim: The beginning encoded SCLK of the segment.
:type begtim: float
:param endtim: The ending encoded SCLK of the segment.
:type endtim: float
:param inst: The NAIF instrument ID code.
:type inst: int
:param ref: The reference frame of the segment.
:type ref: str
:param avflag: True if the segment will contain angular velocity.
:type avflag: bool
:param segid: Segment identifier.
:type segid: str
:param sclkdp: Encoded SCLK times.
:type sclkdp: Array of floats
:param packts: Array of packets.
:type packts: Some NxM vector of floats
:param rate: Nominal SCLK rate in seconds per tick.
:type rate: float
:param nints: Number of intervals.
:type nints: int
:param starts: Encoded SCLK interval start times.
:type starts: Array of floats
"""
handle = ctypes.c_int(handle)
subtype = ctypes.c_int(subtype)
degree = ctypes.c_int(degree)
begtim = ctypes.c_double(begtim)
endtim = ctypes.c_double(endtim)
inst = ctypes.c_int(inst)
ref = stypes.stringToCharP(ref)
avflag = ctypes.c_int(avflag)
segid = stypes.stringToCharP(segid)
n = ctypes.c_int(len(packts))
sclkdp = stypes.toDoubleVector(sclkdp)
packts = stypes.toDoubleMatrix(packts)
rate = ctypes.c_double(rate)
nints = ctypes.c_int(nints)
starts = stypes.toDoubleVector(starts)
libspice.ckw05_c(handle, subtype, degree, begtim, endtim, inst, ref, avflag,
segid, n, sclkdp, packts, rate, nints, starts) | Write a type 5 segment to a CK file.
https://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/ckw05_c.html
:param handle: Handle of an open CK file.
:type handle: int
:param subtype: CK type 5 subtype code. Can be: 0, 1, 2, 3 see naif docs via link above.
:type subtype: int
:param degree: Degree of interpolating polynomials.
:type degree: int
:param begtim: The beginning encoded SCLK of the segment.
:type begtim: float
:param endtim: The ending encoded SCLK of the segment.
:type endtim: float
:param inst: The NAIF instrument ID code.
:type inst: int
:param ref: The reference frame of the segment.
:type ref: str
:param avflag: True if the segment will contain angular velocity.
:type avflag: bool
:param segid: Segment identifier.
:type segid: str
:param sclkdp: Encoded SCLK times.
:type sclkdp: Array of floats
:param packts: Array of packets.
:type packts: Some NxM vector of floats
:param rate: Nominal SCLK rate in seconds per tick.
:type rate: float
:param nints: Number of intervals.
:type nints: int
:param starts: Encoded SCLK interval start times.
:type starts: Array of floats | Below is the the instruction that describes the task:
### Input:
Write a type 5 segment to a CK file.
https://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/ckw05_c.html
:param handle: Handle of an open CK file.
:type handle: int
:param subtype: CK type 5 subtype code. Can be: 0, 1, 2, 3 see naif docs via link above.
:type subtype: int
:param degree: Degree of interpolating polynomials.
:type degree: int
:param begtim: The beginning encoded SCLK of the segment.
:type begtim: float
:param endtim: The ending encoded SCLK of the segment.
:type endtim: float
:param inst: The NAIF instrument ID code.
:type inst: int
:param ref: The reference frame of the segment.
:type ref: str
:param avflag: True if the segment will contain angular velocity.
:type avflag: bool
:param segid: Segment identifier.
:type segid: str
:param sclkdp: Encoded SCLK times.
:type sclkdp: Array of floats
:param packts: Array of packets.
:type packts: Some NxM vector of floats
:param rate: Nominal SCLK rate in seconds per tick.
:type rate: float
:param nints: Number of intervals.
:type nints: int
:param starts: Encoded SCLK interval start times.
:type starts: Array of floats
### Response:
def ckw05(handle, subtype, degree, begtim, endtim, inst, ref, avflag, segid,
sclkdp, packts, rate, nints, starts):
"""
Write a type 5 segment to a CK file.
https://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/ckw05_c.html
:param handle: Handle of an open CK file.
:type handle: int
:param subtype: CK type 5 subtype code. Can be: 0, 1, 2, 3 see naif docs via link above.
:type subtype: int
:param degree: Degree of interpolating polynomials.
:type degree: int
:param begtim: The beginning encoded SCLK of the segment.
:type begtim: float
:param endtim: The ending encoded SCLK of the segment.
:type endtim: float
:param inst: The NAIF instrument ID code.
:type inst: int
:param ref: The reference frame of the segment.
:type ref: str
:param avflag: True if the segment will contain angular velocity.
:type avflag: bool
:param segid: Segment identifier.
:type segid: str
:param sclkdp: Encoded SCLK times.
:type sclkdp: Array of floats
:param packts: Array of packets.
:type packts: Some NxM vector of floats
:param rate: Nominal SCLK rate in seconds per tick.
:type rate: float
:param nints: Number of intervals.
:type nints: int
:param starts: Encoded SCLK interval start times.
:type starts: Array of floats
"""
handle = ctypes.c_int(handle)
subtype = ctypes.c_int(subtype)
degree = ctypes.c_int(degree)
begtim = ctypes.c_double(begtim)
endtim = ctypes.c_double(endtim)
inst = ctypes.c_int(inst)
ref = stypes.stringToCharP(ref)
avflag = ctypes.c_int(avflag)
segid = stypes.stringToCharP(segid)
n = ctypes.c_int(len(packts))
sclkdp = stypes.toDoubleVector(sclkdp)
packts = stypes.toDoubleMatrix(packts)
rate = ctypes.c_double(rate)
nints = ctypes.c_int(nints)
starts = stypes.toDoubleVector(starts)
libspice.ckw05_c(handle, subtype, degree, begtim, endtim, inst, ref, avflag,
segid, n, sclkdp, packts, rate, nints, starts) |
def validate(self, r):
'''
Called automatically by self.result.
'''
if self.show_invalid:
r.valid = True
elif r.valid:
if not r.description:
r.valid = False
if r.size and (r.size + r.offset) > r.file.size:
r.valid = False
if r.jump and (r.jump + r.offset) > r.file.size:
r.valid = False
if hasattr(r, "location") and (r.location != r.offset):
r.valid = False
if r.valid:
# Don't keep displaying signatures that repeat a bunch of times
# (e.g., JFFS2 nodes)
if r.id == self.one_of_many:
r.display = False
elif r.many:
self.one_of_many = r.id
else:
self.one_of_many = None | Called automatically by self.result. | Below is the the instruction that describes the task:
### Input:
Called automatically by self.result.
### Response:
def validate(self, r):
'''
Called automatically by self.result.
'''
if self.show_invalid:
r.valid = True
elif r.valid:
if not r.description:
r.valid = False
if r.size and (r.size + r.offset) > r.file.size:
r.valid = False
if r.jump and (r.jump + r.offset) > r.file.size:
r.valid = False
if hasattr(r, "location") and (r.location != r.offset):
r.valid = False
if r.valid:
# Don't keep displaying signatures that repeat a bunch of times
# (e.g., JFFS2 nodes)
if r.id == self.one_of_many:
r.display = False
elif r.many:
self.one_of_many = r.id
else:
self.one_of_many = None |
def call(function, *args, **kwargs):
"""
Call a function or constructor with given args and kwargs after removing args and kwargs that doesn't match
function or constructor signature
:param function: Function or constructor to call
:type function: callable
:param args:
:type args:
:param kwargs:
:type kwargs:
:return: sale vakye as default function call
:rtype: object
"""
func = constructor_args if inspect.isclass(function) else function_args
call_args, call_kwargs = func(function, *args, **kwargs)
return function(*call_args, **call_kwargs) | Call a function or constructor with given args and kwargs after removing args and kwargs that doesn't match
function or constructor signature
:param function: Function or constructor to call
:type function: callable
:param args:
:type args:
:param kwargs:
:type kwargs:
:return: sale vakye as default function call
:rtype: object | Below is the the instruction that describes the task:
### Input:
Call a function or constructor with given args and kwargs after removing args and kwargs that doesn't match
function or constructor signature
:param function: Function or constructor to call
:type function: callable
:param args:
:type args:
:param kwargs:
:type kwargs:
:return: sale vakye as default function call
:rtype: object
### Response:
def call(function, *args, **kwargs):
"""
Call a function or constructor with given args and kwargs after removing args and kwargs that doesn't match
function or constructor signature
:param function: Function or constructor to call
:type function: callable
:param args:
:type args:
:param kwargs:
:type kwargs:
:return: sale vakye as default function call
:rtype: object
"""
func = constructor_args if inspect.isclass(function) else function_args
call_args, call_kwargs = func(function, *args, **kwargs)
return function(*call_args, **call_kwargs) |
def generate_py(module_name, code, optimizations=None, module_dir=None):
'''python + pythran spec -> py code
Prints and returns the optimized python code.
'''
pm, ir, _, _ = front_middle_end(module_name, code, optimizations,
module_dir)
return pm.dump(Python, ir) | python + pythran spec -> py code
Prints and returns the optimized python code. | Below is the the instruction that describes the task:
### Input:
python + pythran spec -> py code
Prints and returns the optimized python code.
### Response:
def generate_py(module_name, code, optimizations=None, module_dir=None):
'''python + pythran spec -> py code
Prints and returns the optimized python code.
'''
pm, ir, _, _ = front_middle_end(module_name, code, optimizations,
module_dir)
return pm.dump(Python, ir) |
def _set_priv(self, v, load=False):
"""
Setter method for priv, mapped from YANG variable /rbridge_id/snmp_server/user/priv (enumeration)
If this variable is read-only (config: false) in the
source YANG file, then _set_priv is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_priv() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'AES128': {'value': 2}, u'DES': {'value': 0}, u'nopriv': {'value': 1}},), default=unicode("nopriv"), is_leaf=True, yang_name="priv", rest_name="priv", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Privacy protocol for username (Default=nopriv)'}}, namespace='urn:brocade.com:mgmt:brocade-snmp', defining_module='brocade-snmp', yang_type='enumeration', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """priv must be of a type compatible with enumeration""",
'defined-type': "brocade-snmp:enumeration",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'AES128': {'value': 2}, u'DES': {'value': 0}, u'nopriv': {'value': 1}},), default=unicode("nopriv"), is_leaf=True, yang_name="priv", rest_name="priv", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Privacy protocol for username (Default=nopriv)'}}, namespace='urn:brocade.com:mgmt:brocade-snmp', defining_module='brocade-snmp', yang_type='enumeration', is_config=True)""",
})
self.__priv = t
if hasattr(self, '_set'):
self._set() | Setter method for priv, mapped from YANG variable /rbridge_id/snmp_server/user/priv (enumeration)
If this variable is read-only (config: false) in the
source YANG file, then _set_priv is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_priv() directly. | Below is the the instruction that describes the task:
### Input:
Setter method for priv, mapped from YANG variable /rbridge_id/snmp_server/user/priv (enumeration)
If this variable is read-only (config: false) in the
source YANG file, then _set_priv is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_priv() directly.
### Response:
def _set_priv(self, v, load=False):
"""
Setter method for priv, mapped from YANG variable /rbridge_id/snmp_server/user/priv (enumeration)
If this variable is read-only (config: false) in the
source YANG file, then _set_priv is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_priv() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'AES128': {'value': 2}, u'DES': {'value': 0}, u'nopriv': {'value': 1}},), default=unicode("nopriv"), is_leaf=True, yang_name="priv", rest_name="priv", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Privacy protocol for username (Default=nopriv)'}}, namespace='urn:brocade.com:mgmt:brocade-snmp', defining_module='brocade-snmp', yang_type='enumeration', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """priv must be of a type compatible with enumeration""",
'defined-type': "brocade-snmp:enumeration",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'AES128': {'value': 2}, u'DES': {'value': 0}, u'nopriv': {'value': 1}},), default=unicode("nopriv"), is_leaf=True, yang_name="priv", rest_name="priv", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Privacy protocol for username (Default=nopriv)'}}, namespace='urn:brocade.com:mgmt:brocade-snmp', defining_module='brocade-snmp', yang_type='enumeration', is_config=True)""",
})
self.__priv = t
if hasattr(self, '_set'):
self._set() |
def query(self, *args):
""" Send a query to the watchman service and return the response
This call will block until the response is returned.
If any unilateral responses are sent by the service in between
the request-response they will be buffered up in the client object
and NOT returned via this method.
"""
log("calling client.query")
self._connect()
try:
self.sendConn.send(args)
res = self.receive()
while self.isUnilateralResponse(res):
res = self.receive()
return res
except EnvironmentError as ee:
# When we can depend on Python 3, we can use PEP 3134
# exception chaining here.
raise WatchmanEnvironmentError(
"I/O error communicating with watchman daemon",
ee.errno,
ee.strerror,
args,
)
except WatchmanError as ex:
ex.setCommand(args)
raise | Send a query to the watchman service and return the response
This call will block until the response is returned.
If any unilateral responses are sent by the service in between
the request-response they will be buffered up in the client object
and NOT returned via this method. | Below is the the instruction that describes the task:
### Input:
Send a query to the watchman service and return the response
This call will block until the response is returned.
If any unilateral responses are sent by the service in between
the request-response they will be buffered up in the client object
and NOT returned via this method.
### Response:
def query(self, *args):
""" Send a query to the watchman service and return the response
This call will block until the response is returned.
If any unilateral responses are sent by the service in between
the request-response they will be buffered up in the client object
and NOT returned via this method.
"""
log("calling client.query")
self._connect()
try:
self.sendConn.send(args)
res = self.receive()
while self.isUnilateralResponse(res):
res = self.receive()
return res
except EnvironmentError as ee:
# When we can depend on Python 3, we can use PEP 3134
# exception chaining here.
raise WatchmanEnvironmentError(
"I/O error communicating with watchman daemon",
ee.errno,
ee.strerror,
args,
)
except WatchmanError as ex:
ex.setCommand(args)
raise |
def items(sanitize=False):
'''
Return all of the minion's grains
CLI Example:
.. code-block:: bash
salt '*' grains.items
Sanitized CLI Example:
.. code-block:: bash
salt '*' grains.items sanitize=True
'''
if salt.utils.data.is_true(sanitize):
out = dict(__grains__)
for key, func in six.iteritems(_SANITIZERS):
if key in out:
out[key] = func(out[key])
return out
else:
return __grains__ | Return all of the minion's grains
CLI Example:
.. code-block:: bash
salt '*' grains.items
Sanitized CLI Example:
.. code-block:: bash
salt '*' grains.items sanitize=True | Below is the the instruction that describes the task:
### Input:
Return all of the minion's grains
CLI Example:
.. code-block:: bash
salt '*' grains.items
Sanitized CLI Example:
.. code-block:: bash
salt '*' grains.items sanitize=True
### Response:
def items(sanitize=False):
'''
Return all of the minion's grains
CLI Example:
.. code-block:: bash
salt '*' grains.items
Sanitized CLI Example:
.. code-block:: bash
salt '*' grains.items sanitize=True
'''
if salt.utils.data.is_true(sanitize):
out = dict(__grains__)
for key, func in six.iteritems(_SANITIZERS):
if key in out:
out[key] = func(out[key])
return out
else:
return __grains__ |
def get_proxy_url(self, pgt):
"""Returns proxy url, given the proxy granting ticket"""
params = urllib_parse.urlencode({'pgt': pgt, 'targetService': self.service_url})
return "%s/proxy?%s" % (self.server_url, params) | Returns proxy url, given the proxy granting ticket | Below is the the instruction that describes the task:
### Input:
Returns proxy url, given the proxy granting ticket
### Response:
def get_proxy_url(self, pgt):
"""Returns proxy url, given the proxy granting ticket"""
params = urllib_parse.urlencode({'pgt': pgt, 'targetService': self.service_url})
return "%s/proxy?%s" % (self.server_url, params) |
def forwards(self, orm):
"Write your forwards methods here."
for category in orm['document_library.DocumentCategory'].objects.all():
category.is_published = True
category.save() | Write your forwards methods here. | Below is the the instruction that describes the task:
### Input:
Write your forwards methods here.
### Response:
def forwards(self, orm):
"Write your forwards methods here."
for category in orm['document_library.DocumentCategory'].objects.all():
category.is_published = True
category.save() |
def isCommaList(inputFilelist):
"""Return True if the input is a comma separated list of names."""
if isinstance(inputFilelist, int) or isinstance(inputFilelist, np.int32):
ilist = str(inputFilelist)
else:
ilist = inputFilelist
if "," in ilist:
return True
return False | Return True if the input is a comma separated list of names. | Below is the the instruction that describes the task:
### Input:
Return True if the input is a comma separated list of names.
### Response:
def isCommaList(inputFilelist):
"""Return True if the input is a comma separated list of names."""
if isinstance(inputFilelist, int) or isinstance(inputFilelist, np.int32):
ilist = str(inputFilelist)
else:
ilist = inputFilelist
if "," in ilist:
return True
return False |
def derivative(f, t):
"""Fourth-order finite-differencing with non-uniform time steps
The formula for this finite difference comes from Eq. (A 5b) of "Derivative formulas and errors for non-uniformly
spaced points" by M. K. Bowen and Ronald Smith. As explained in their Eqs. (B 9b) and (B 10b), this is a
fourth-order formula -- though that's a squishy concept with non-uniform time steps.
TODO: If there are fewer than five points, the function should revert to simpler (lower-order) formulas.
"""
dfdt = np.empty_like(f)
if (f.ndim == 1):
_derivative(f, t, dfdt)
elif (f.ndim == 2):
_derivative_2d(f, t, dfdt)
elif (f.ndim == 3):
_derivative_3d(f, t, dfdt)
else:
raise NotImplementedError("Taking derivatives of {0}-dimensional arrays is not yet implemented".format(f.ndim))
return dfdt | Fourth-order finite-differencing with non-uniform time steps
The formula for this finite difference comes from Eq. (A 5b) of "Derivative formulas and errors for non-uniformly
spaced points" by M. K. Bowen and Ronald Smith. As explained in their Eqs. (B 9b) and (B 10b), this is a
fourth-order formula -- though that's a squishy concept with non-uniform time steps.
TODO: If there are fewer than five points, the function should revert to simpler (lower-order) formulas. | Below is the the instruction that describes the task:
### Input:
Fourth-order finite-differencing with non-uniform time steps
The formula for this finite difference comes from Eq. (A 5b) of "Derivative formulas and errors for non-uniformly
spaced points" by M. K. Bowen and Ronald Smith. As explained in their Eqs. (B 9b) and (B 10b), this is a
fourth-order formula -- though that's a squishy concept with non-uniform time steps.
TODO: If there are fewer than five points, the function should revert to simpler (lower-order) formulas.
### Response:
def derivative(f, t):
"""Fourth-order finite-differencing with non-uniform time steps
The formula for this finite difference comes from Eq. (A 5b) of "Derivative formulas and errors for non-uniformly
spaced points" by M. K. Bowen and Ronald Smith. As explained in their Eqs. (B 9b) and (B 10b), this is a
fourth-order formula -- though that's a squishy concept with non-uniform time steps.
TODO: If there are fewer than five points, the function should revert to simpler (lower-order) formulas.
"""
dfdt = np.empty_like(f)
if (f.ndim == 1):
_derivative(f, t, dfdt)
elif (f.ndim == 2):
_derivative_2d(f, t, dfdt)
elif (f.ndim == 3):
_derivative_3d(f, t, dfdt)
else:
raise NotImplementedError("Taking derivatives of {0}-dimensional arrays is not yet implemented".format(f.ndim))
return dfdt |
def convergent_round(value, ndigits=0):
"""Convergent rounding.
Round to neareas even, similar to Python3's round() method.
"""
if sys.version_info[0] < 3:
if value < 0.0:
return -convergent_round(-value)
epsilon = 0.0000001
integral_part, _ = divmod(value, 1)
if abs(value - (integral_part + 0.5)) < epsilon:
if integral_part % 2.0 < epsilon:
return integral_part
else:
nearest_even = integral_part + 0.5
return math.ceil(nearest_even)
return round(value, ndigits) | Convergent rounding.
Round to neareas even, similar to Python3's round() method. | Below is the the instruction that describes the task:
### Input:
Convergent rounding.
Round to neareas even, similar to Python3's round() method.
### Response:
def convergent_round(value, ndigits=0):
"""Convergent rounding.
Round to neareas even, similar to Python3's round() method.
"""
if sys.version_info[0] < 3:
if value < 0.0:
return -convergent_round(-value)
epsilon = 0.0000001
integral_part, _ = divmod(value, 1)
if abs(value - (integral_part + 0.5)) < epsilon:
if integral_part % 2.0 < epsilon:
return integral_part
else:
nearest_even = integral_part + 0.5
return math.ceil(nearest_even)
return round(value, ndigits) |
def callback(self, sources):
"""When a source is selected, enable widgets that depend on that condition
and do done_callback"""
enable = bool(sources)
if not enable:
self.plot_widget.value = False
enable_widget(self.plot_widget, enable)
if self.done_callback:
self.done_callback(sources) | When a source is selected, enable widgets that depend on that condition
and do done_callback | Below is the the instruction that describes the task:
### Input:
When a source is selected, enable widgets that depend on that condition
and do done_callback
### Response:
def callback(self, sources):
"""When a source is selected, enable widgets that depend on that condition
and do done_callback"""
enable = bool(sources)
if not enable:
self.plot_widget.value = False
enable_widget(self.plot_widget, enable)
if self.done_callback:
self.done_callback(sources) |
def unpause(self):
"""Unpauses the stream."""
res = librtmp.RTMP_Pause(self.client.rtmp, 0)
if res < 1:
raise RTMPError("Failed to unpause") | Unpauses the stream. | Below is the the instruction that describes the task:
### Input:
Unpauses the stream.
### Response:
def unpause(self):
"""Unpauses the stream."""
res = librtmp.RTMP_Pause(self.client.rtmp, 0)
if res < 1:
raise RTMPError("Failed to unpause") |
def add_extra_dim(self, name, type, description=""):
""" Adds a new extra dimension to the point record
Parameters
----------
name: str
the name of the dimension
type: str
type of the dimension (eg 'uint8')
description: str, optional
a small description of the dimension
"""
name = name.replace(" ", "_")
type_id = extradims.get_id_for_extra_dim_type(type)
extra_byte = ExtraBytesStruct(
data_type=type_id, name=name.encode(), description=description.encode()
)
try:
extra_bytes_vlr = self.vlrs.get("ExtraBytesVlr")[0]
except IndexError:
extra_bytes_vlr = ExtraBytesVlr()
self.vlrs.append(extra_bytes_vlr)
finally:
extra_bytes_vlr.extra_bytes_structs.append(extra_byte)
self.points_data.add_extra_dims([(name, type)]) | Adds a new extra dimension to the point record
Parameters
----------
name: str
the name of the dimension
type: str
type of the dimension (eg 'uint8')
description: str, optional
a small description of the dimension | Below is the the instruction that describes the task:
### Input:
Adds a new extra dimension to the point record
Parameters
----------
name: str
the name of the dimension
type: str
type of the dimension (eg 'uint8')
description: str, optional
a small description of the dimension
### Response:
def add_extra_dim(self, name, type, description=""):
""" Adds a new extra dimension to the point record
Parameters
----------
name: str
the name of the dimension
type: str
type of the dimension (eg 'uint8')
description: str, optional
a small description of the dimension
"""
name = name.replace(" ", "_")
type_id = extradims.get_id_for_extra_dim_type(type)
extra_byte = ExtraBytesStruct(
data_type=type_id, name=name.encode(), description=description.encode()
)
try:
extra_bytes_vlr = self.vlrs.get("ExtraBytesVlr")[0]
except IndexError:
extra_bytes_vlr = ExtraBytesVlr()
self.vlrs.append(extra_bytes_vlr)
finally:
extra_bytes_vlr.extra_bytes_structs.append(extra_byte)
self.points_data.add_extra_dims([(name, type)]) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.