code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def translate_to_zipkin(self, span_datas):
"""Translate the opencensus spans to zipkin spans.
:type span_datas: list of :class:
`~opencensus.trace.span_data.SpanData`
:param span_datas:
SpanData tuples to emit
:rtype: list
:returns: List of zipkin format spans.
"""
local_endpoint = {
'serviceName': self.service_name,
'port': self.port,
}
if self.ipv4 is not None:
local_endpoint['ipv4'] = self.ipv4
if self.ipv6 is not None:
local_endpoint['ipv6'] = self.ipv6
zipkin_spans = []
for span in span_datas:
# Timestamp in zipkin spans is int of microseconds.
start_timestamp_mus = timestamp_to_microseconds(span.start_time)
end_timestamp_mus = timestamp_to_microseconds(span.end_time)
duration_mus = end_timestamp_mus - start_timestamp_mus
zipkin_span = {
'traceId': span.context.trace_id,
'id': str(span.span_id),
'name': span.name,
'timestamp': int(round(start_timestamp_mus)),
'duration': int(round(duration_mus)),
'localEndpoint': local_endpoint,
'tags': _extract_tags_from_span(span.attributes),
'annotations': _extract_annotations_from_span(span),
}
span_kind = span.span_kind
parent_span_id = span.parent_span_id
if span_kind is not None:
kind = SPAN_KIND_MAP.get(span_kind)
# Zipkin API for span kind only accept
# enum(CLIENT|SERVER|PRODUCER|CONSUMER|Absent)
if kind is not None:
zipkin_span['kind'] = kind
if parent_span_id is not None:
zipkin_span['parentId'] = str(parent_span_id)
zipkin_spans.append(zipkin_span)
return zipkin_spans | Translate the opencensus spans to zipkin spans.
:type span_datas: list of :class:
`~opencensus.trace.span_data.SpanData`
:param span_datas:
SpanData tuples to emit
:rtype: list
:returns: List of zipkin format spans. | Below is the the instruction that describes the task:
### Input:
Translate the opencensus spans to zipkin spans.
:type span_datas: list of :class:
`~opencensus.trace.span_data.SpanData`
:param span_datas:
SpanData tuples to emit
:rtype: list
:returns: List of zipkin format spans.
### Response:
def translate_to_zipkin(self, span_datas):
"""Translate the opencensus spans to zipkin spans.
:type span_datas: list of :class:
`~opencensus.trace.span_data.SpanData`
:param span_datas:
SpanData tuples to emit
:rtype: list
:returns: List of zipkin format spans.
"""
local_endpoint = {
'serviceName': self.service_name,
'port': self.port,
}
if self.ipv4 is not None:
local_endpoint['ipv4'] = self.ipv4
if self.ipv6 is not None:
local_endpoint['ipv6'] = self.ipv6
zipkin_spans = []
for span in span_datas:
# Timestamp in zipkin spans is int of microseconds.
start_timestamp_mus = timestamp_to_microseconds(span.start_time)
end_timestamp_mus = timestamp_to_microseconds(span.end_time)
duration_mus = end_timestamp_mus - start_timestamp_mus
zipkin_span = {
'traceId': span.context.trace_id,
'id': str(span.span_id),
'name': span.name,
'timestamp': int(round(start_timestamp_mus)),
'duration': int(round(duration_mus)),
'localEndpoint': local_endpoint,
'tags': _extract_tags_from_span(span.attributes),
'annotations': _extract_annotations_from_span(span),
}
span_kind = span.span_kind
parent_span_id = span.parent_span_id
if span_kind is not None:
kind = SPAN_KIND_MAP.get(span_kind)
# Zipkin API for span kind only accept
# enum(CLIENT|SERVER|PRODUCER|CONSUMER|Absent)
if kind is not None:
zipkin_span['kind'] = kind
if parent_span_id is not None:
zipkin_span['parentId'] = str(parent_span_id)
zipkin_spans.append(zipkin_span)
return zipkin_spans |
def _fixPe(self):
"""
Fixes the necessary fields in the PE file instance in order to create a valid PE32. i.e. SizeOfImage.
"""
sizeOfImage = 0
for sh in self.sectionHeaders:
sizeOfImage += sh.misc
self.ntHeaders.optionaHeader.sizeoOfImage.value = self._sectionAlignment(sizeOfImage + 0x1000) | Fixes the necessary fields in the PE file instance in order to create a valid PE32. i.e. SizeOfImage. | Below is the the instruction that describes the task:
### Input:
Fixes the necessary fields in the PE file instance in order to create a valid PE32. i.e. SizeOfImage.
### Response:
def _fixPe(self):
"""
Fixes the necessary fields in the PE file instance in order to create a valid PE32. i.e. SizeOfImage.
"""
sizeOfImage = 0
for sh in self.sectionHeaders:
sizeOfImage += sh.misc
self.ntHeaders.optionaHeader.sizeoOfImage.value = self._sectionAlignment(sizeOfImage + 0x1000) |
def to_html(self, classes=None, notebook=False, border=None):
"""
Render a DataFrame to a html table.
Parameters
----------
classes : str or list-like
classes to include in the `class` attribute of the opening
``<table>`` tag, in addition to the default "dataframe".
notebook : {True, False}, optional, default False
Whether the generated HTML is for IPython Notebook.
border : int
A ``border=border`` attribute is included in the opening
``<table>`` tag. Default ``pd.options.html.border``.
.. versionadded:: 0.19.0
"""
from pandas.io.formats.html import HTMLFormatter, NotebookFormatter
Klass = NotebookFormatter if notebook else HTMLFormatter
html = Klass(self, classes=classes, border=border).render()
if hasattr(self.buf, 'write'):
buffer_put_lines(self.buf, html)
elif isinstance(self.buf, str):
with open(self.buf, 'w') as f:
buffer_put_lines(f, html)
else:
raise TypeError('buf is not a file name and it has no write '
' method') | Render a DataFrame to a html table.
Parameters
----------
classes : str or list-like
classes to include in the `class` attribute of the opening
``<table>`` tag, in addition to the default "dataframe".
notebook : {True, False}, optional, default False
Whether the generated HTML is for IPython Notebook.
border : int
A ``border=border`` attribute is included in the opening
``<table>`` tag. Default ``pd.options.html.border``.
.. versionadded:: 0.19.0 | Below is the the instruction that describes the task:
### Input:
Render a DataFrame to a html table.
Parameters
----------
classes : str or list-like
classes to include in the `class` attribute of the opening
``<table>`` tag, in addition to the default "dataframe".
notebook : {True, False}, optional, default False
Whether the generated HTML is for IPython Notebook.
border : int
A ``border=border`` attribute is included in the opening
``<table>`` tag. Default ``pd.options.html.border``.
.. versionadded:: 0.19.0
### Response:
def to_html(self, classes=None, notebook=False, border=None):
"""
Render a DataFrame to a html table.
Parameters
----------
classes : str or list-like
classes to include in the `class` attribute of the opening
``<table>`` tag, in addition to the default "dataframe".
notebook : {True, False}, optional, default False
Whether the generated HTML is for IPython Notebook.
border : int
A ``border=border`` attribute is included in the opening
``<table>`` tag. Default ``pd.options.html.border``.
.. versionadded:: 0.19.0
"""
from pandas.io.formats.html import HTMLFormatter, NotebookFormatter
Klass = NotebookFormatter if notebook else HTMLFormatter
html = Klass(self, classes=classes, border=border).render()
if hasattr(self.buf, 'write'):
buffer_put_lines(self.buf, html)
elif isinstance(self.buf, str):
with open(self.buf, 'w') as f:
buffer_put_lines(f, html)
else:
raise TypeError('buf is not a file name and it has no write '
' method') |
def parse_FASTA_files(options, fasta_file_contents):
''' This function iterates through each filepath in fasta_file_contents and returns a dict mapping (pdbid, chain, file_name) tuples to sequences:
- options is the OptionParser member;
- fasta_file_contents is a map from input filenames to the associated FASTA file contents.
'''
records = {}
reverse_mapping = {}
original_segment_list = []
key_location = {}
sequenceLine = re.compile("^[A-Z]+\n?$")
sequence_offsets = {}
for fasta_file_name, tagged_fasta in sorted(fasta_file_contents.iteritems()):
# Check the tagged pair
fasta = tagged_fasta[0].strip().split('\n')
file_type = tagged_fasta[1]
assert(file_type == 'PDB' or file_type == 'FASTA')
if not fasta:
raise Exception("Empty FASTA file.")
first_line = [line for line in fasta if line.strip()][0]
if first_line[0] != '>':
raise Exception("The FASTA file %s is not formatted properly - the first non-blank line is not a description line (does not start with '>')." % fasta_file_name)
key = None
line_count = 0
record_count = 0
file_keys = []
unique_keys = {}
for line in fasta:
line_count += 1
line = line.strip()
if line:
if line[0] == '>':
record_count += 1
tokens = [t.strip() for t in line[1:].split('|') if t.strip()]
if len(tokens) < 2:
raise Exception("The description line ('%s') of record %d of %s is invalid. It must contain both a protein description and a chain identifier, separated by a pipe ('|') symbol." % (line, record_count, fasta_file_name))
if len(tokens[0]) < 4:
raise Exception("The protein description in the description line ('%s') of record %d of %s is too short. It must be at least four characters long." % (line, record_count, fasta_file_name))
if len(tokens[1]) != 1:
raise Exception("The chain identifier in the description line ('%s') of record %d of %s is the wrong length. It must be exactky one character long." % (line, record_count, fasta_file_name))
# Note: We store the PDB ID as lower-case so that the user does not have to worry about case-sensitivity here (we convert the user's PDB ID argument to lower-case as well)
key = (tokens[0][0:4].lower(), tokens[1], fasta_file_name)
sub_key = (key[0], key[1]) # this is the part of the key that we expect to be unique (the actual key)
key_location[key] = fasta_file_name
if sub_key in unique_keys:
# todo: we include the fasta_file_name in the key - should we not be checking for uniqueness w.r.t. just tokens[0][0:4].lower() and tokens[1] i.e. omitting the fasta_file_name as part of the check for a more stringent check?
raise Exception("Duplicate protein/chain identifier pair. The key %s was generated from both %s and %s. Remember that the first four characters of the protein description are concatenated with the chain letter to generate a 5-character ID which must be unique." % (key, key_location[key], fasta_file_name))
records[key] = [line]
unique_keys[sub_key] = True
file_keys.append(key)
else:
mtchs = sequenceLine.match(line)
if not mtchs:
raise FastaException("Expected a record header or sequence line at line %d." % line_count)
records[key].append(line)
offset = 0
if file_type == 'PDB':
for key in file_keys:
sequence_length = len(''.join(records[key][1:]))
sequence_offsets[key[0] + key[1]] = (offset, offset + 1, offset + sequence_length) # storing the sequence start and end residue IDs here is redundant but simplifies code later on
offset += sequence_length
# We remove non-protein chains from fragment generation although we did consider them above when determining the offsets
# as we expect them to be used in predictions
non_protein_records = []
set_of_rna_dna_codes = set(('A', 'C', 'G', 'T', 'U', 'X', 'Z'))
for key, content_lines in records.iteritems():
mm_sequence = ''.join(content_lines[1:])
assert(re.match('^[A-Z]+$', mm_sequence)) # Allow X or Z because these may exist (X from the RCSB, Z from our input files)
if set(mm_sequence).union(set_of_rna_dna_codes) == set_of_rna_dna_codes:
non_protein_records.append(key)
for non_protein_record in non_protein_records:
del records[non_protein_record]
# If a loops file was passed in, use that to cut up the sequences and concatenate these subsequences to generate a
# shorter sequence to process. This should save a lot of time when the total length of the subsequences is considerably
# shorter than the length of the total sequence e.g. in cases where the protein has @1000 residues but we only care about
# 100 residues in particular loop regions.
# We need to sample all sequences around a loop i.e. if a sequence segment is 7 residues long at positions 13-19 and we
# require 9-mers, we must consider the segment from positions 5-27 so that all possible 9-mers are considered.
residue_offset = max(options.frag_sizes)
if options.loops_file or options.indices:
loops_definition = None
if options.loops_file:
loops_definition = LoopsFile.from_filepath(options.loops_file, ignore_whitespace = True, ignore_errors = False)
# If the user supplied more ranges of residues, use those as well
if options.indices:
if not loops_definition:
loops_definition = LoopsFile('')
for p in options.indices:
if loops_definition:
loops_definition.add(p[0], p[1])
segment_list = loops_definition.get_distinct_segments(residue_offset, residue_offset)
original_segment_list = loops_definition.get_distinct_segments(1, 1) # We are looking for 1-mers so the offset is 1 rather than 0
# Sanity checks
assert(sorted(segment_list) == segment_list) # sanity check
for x in range(len(segment_list)):
segment = segment_list[x]
if x < len(segment_list) - 1:
assert(segment[1] < segment_list[x+1][0]) # sanity check
# Create the generic reverse_mapping from the indices in the sequences defined by the segment_list to the indices in the original sequences.
# This will be used in FASTA sequences to rewrite the fragments files to make them compatible with the original sequences.
# Note that this mapping ignores the length of the sequences (which may vary in length) so it may be mapping residues indices
# which are outside of the length of some of the sequences.
# Create a sorted list of residues of the chain that we will be including in the sequence for fragment generation
# then turn that into a 1-indexed mapping from the order of the residue in the sequence to the original residue ID in the PDB
residues_for_generation = []
for s in segment_list:
residues_for_generation += range(s[0], s[1] + 1)
reverse_mapping['FASTA'] = dict((key, value) for (key, value) in zip(range(1, len(residues_for_generation) + 1), residues_for_generation))
# Create the reverse_mappings from the indices in the PDB sequences defined by the segment_list to the indices in the original sequences.
# Membership in sequence_offsets implies a PDB sequence.
for k, v in sorted(sequence_offsets.iteritems()):
# For each PDB chain, we consider the set of segments (ignoring extra residues due to nmerage for now so that
# we do not include chains by accident e.g. if the user specified the first residues of chain C but none in chain B,
# they probably do not wish to generate fragments for chain B)
chain_residues = range(v[1], v[2] + 1)
residues_for_generation = []
for s in original_segment_list:
# If the original segment lists lie inside the chain residues then we extend the range w.r.t. the nmers
if (chain_residues[0] <= s[0] <= chain_residues[-1]) or (chain_residues[0] <= s[1] <= chain_residues[-1]):
residues_for_generation += range(s[0] - residue_offset + 1, s[1] + residue_offset - 1 + 1)
# Create a sorted list of residues of the chain that we will be including in the sequence for fragment generation
# then turn that into a 1-indexed mapping from the order of the residue in the sequence to the original residue ID in the PDB
chain_residues_for_generation = sorted(set(chain_residues).intersection(set(residues_for_generation)))
reverse_mapping[k] = dict((key, value) for (key, value) in zip(range(1, len(chain_residues_for_generation) + 1), chain_residues_for_generation))
found_at_least_one_sequence = False
for k, v in sorted(records.iteritems()):
assert(v[0].startswith('>'))
subkey = k[0] + k[1]
sequence = ''.join([s.strip() for s in v[1:]])
assert(sequenceLine.match(sequence) != None) # sanity check
cropped_sequence = None
if sequence_offsets.get(subkey):
# PDB chain case
first_residue_id = sequence_offsets[subkey][1]
cropped_sequence = ''.join([sequence[rmv - first_residue_id] for rmk, rmv in sorted(reverse_mapping[subkey].iteritems())])
# Sanity check - check that the remapping from the cropped sequence to the original sequence will work in postprocessing
for x in range(0, len(cropped_sequence)):
assert(cropped_sequence[x] == sequence[reverse_mapping[subkey][x + 1] - sequence_offsets[subkey][0] - 1])
records[k] = [v[0]] + [cropped_sequence[i:i+60] for i in range(0, len(cropped_sequence), 60)] # update the record to only use the truncated sequence
else:
# FASTA chain case
cropped_sequence = ''.join([sequence[rmv - 1] for rmk, rmv in sorted(reverse_mapping['FASTA'].iteritems()) if rmv <= len(sequence)])
# Sanity check - check that the remapping from the cropped sequence to the original sequence will work in postprocessing
for x in range(0, len(cropped_sequence)):
assert(cropped_sequence[x] == sequence[reverse_mapping['FASTA'][x + 1] - 1])
found_at_least_one_sequence = found_at_least_one_sequence or (not not cropped_sequence)
if cropped_sequence:
records[k] = [v[0]] + [cropped_sequence[i:i+60] for i in range(0, len(cropped_sequence), 60)]
else:
del records[k] # delete the chain. todo: test that this works
if not found_at_least_one_sequence:
raise Exception('No sequences were created from the loops/indices and the input sequences. This may be an input error so the job is being terminated.')
if reverse_mapping:
return records, dict(reverse_mapping = reverse_mapping, segment_list = original_segment_list, sequence_offsets = sequence_offsets)
else:
return records, None | This function iterates through each filepath in fasta_file_contents and returns a dict mapping (pdbid, chain, file_name) tuples to sequences:
- options is the OptionParser member;
- fasta_file_contents is a map from input filenames to the associated FASTA file contents. | Below is the the instruction that describes the task:
### Input:
This function iterates through each filepath in fasta_file_contents and returns a dict mapping (pdbid, chain, file_name) tuples to sequences:
- options is the OptionParser member;
- fasta_file_contents is a map from input filenames to the associated FASTA file contents.
### Response:
def parse_FASTA_files(options, fasta_file_contents):
''' This function iterates through each filepath in fasta_file_contents and returns a dict mapping (pdbid, chain, file_name) tuples to sequences:
- options is the OptionParser member;
- fasta_file_contents is a map from input filenames to the associated FASTA file contents.
'''
records = {}
reverse_mapping = {}
original_segment_list = []
key_location = {}
sequenceLine = re.compile("^[A-Z]+\n?$")
sequence_offsets = {}
for fasta_file_name, tagged_fasta in sorted(fasta_file_contents.iteritems()):
# Check the tagged pair
fasta = tagged_fasta[0].strip().split('\n')
file_type = tagged_fasta[1]
assert(file_type == 'PDB' or file_type == 'FASTA')
if not fasta:
raise Exception("Empty FASTA file.")
first_line = [line for line in fasta if line.strip()][0]
if first_line[0] != '>':
raise Exception("The FASTA file %s is not formatted properly - the first non-blank line is not a description line (does not start with '>')." % fasta_file_name)
key = None
line_count = 0
record_count = 0
file_keys = []
unique_keys = {}
for line in fasta:
line_count += 1
line = line.strip()
if line:
if line[0] == '>':
record_count += 1
tokens = [t.strip() for t in line[1:].split('|') if t.strip()]
if len(tokens) < 2:
raise Exception("The description line ('%s') of record %d of %s is invalid. It must contain both a protein description and a chain identifier, separated by a pipe ('|') symbol." % (line, record_count, fasta_file_name))
if len(tokens[0]) < 4:
raise Exception("The protein description in the description line ('%s') of record %d of %s is too short. It must be at least four characters long." % (line, record_count, fasta_file_name))
if len(tokens[1]) != 1:
raise Exception("The chain identifier in the description line ('%s') of record %d of %s is the wrong length. It must be exactky one character long." % (line, record_count, fasta_file_name))
# Note: We store the PDB ID as lower-case so that the user does not have to worry about case-sensitivity here (we convert the user's PDB ID argument to lower-case as well)
key = (tokens[0][0:4].lower(), tokens[1], fasta_file_name)
sub_key = (key[0], key[1]) # this is the part of the key that we expect to be unique (the actual key)
key_location[key] = fasta_file_name
if sub_key in unique_keys:
# todo: we include the fasta_file_name in the key - should we not be checking for uniqueness w.r.t. just tokens[0][0:4].lower() and tokens[1] i.e. omitting the fasta_file_name as part of the check for a more stringent check?
raise Exception("Duplicate protein/chain identifier pair. The key %s was generated from both %s and %s. Remember that the first four characters of the protein description are concatenated with the chain letter to generate a 5-character ID which must be unique." % (key, key_location[key], fasta_file_name))
records[key] = [line]
unique_keys[sub_key] = True
file_keys.append(key)
else:
mtchs = sequenceLine.match(line)
if not mtchs:
raise FastaException("Expected a record header or sequence line at line %d." % line_count)
records[key].append(line)
offset = 0
if file_type == 'PDB':
for key in file_keys:
sequence_length = len(''.join(records[key][1:]))
sequence_offsets[key[0] + key[1]] = (offset, offset + 1, offset + sequence_length) # storing the sequence start and end residue IDs here is redundant but simplifies code later on
offset += sequence_length
# We remove non-protein chains from fragment generation although we did consider them above when determining the offsets
# as we expect them to be used in predictions
non_protein_records = []
set_of_rna_dna_codes = set(('A', 'C', 'G', 'T', 'U', 'X', 'Z'))
for key, content_lines in records.iteritems():
mm_sequence = ''.join(content_lines[1:])
assert(re.match('^[A-Z]+$', mm_sequence)) # Allow X or Z because these may exist (X from the RCSB, Z from our input files)
if set(mm_sequence).union(set_of_rna_dna_codes) == set_of_rna_dna_codes:
non_protein_records.append(key)
for non_protein_record in non_protein_records:
del records[non_protein_record]
# If a loops file was passed in, use that to cut up the sequences and concatenate these subsequences to generate a
# shorter sequence to process. This should save a lot of time when the total length of the subsequences is considerably
# shorter than the length of the total sequence e.g. in cases where the protein has @1000 residues but we only care about
# 100 residues in particular loop regions.
# We need to sample all sequences around a loop i.e. if a sequence segment is 7 residues long at positions 13-19 and we
# require 9-mers, we must consider the segment from positions 5-27 so that all possible 9-mers are considered.
residue_offset = max(options.frag_sizes)
if options.loops_file or options.indices:
loops_definition = None
if options.loops_file:
loops_definition = LoopsFile.from_filepath(options.loops_file, ignore_whitespace = True, ignore_errors = False)
# If the user supplied more ranges of residues, use those as well
if options.indices:
if not loops_definition:
loops_definition = LoopsFile('')
for p in options.indices:
if loops_definition:
loops_definition.add(p[0], p[1])
segment_list = loops_definition.get_distinct_segments(residue_offset, residue_offset)
original_segment_list = loops_definition.get_distinct_segments(1, 1) # We are looking for 1-mers so the offset is 1 rather than 0
# Sanity checks
assert(sorted(segment_list) == segment_list) # sanity check
for x in range(len(segment_list)):
segment = segment_list[x]
if x < len(segment_list) - 1:
assert(segment[1] < segment_list[x+1][0]) # sanity check
# Create the generic reverse_mapping from the indices in the sequences defined by the segment_list to the indices in the original sequences.
# This will be used in FASTA sequences to rewrite the fragments files to make them compatible with the original sequences.
# Note that this mapping ignores the length of the sequences (which may vary in length) so it may be mapping residues indices
# which are outside of the length of some of the sequences.
# Create a sorted list of residues of the chain that we will be including in the sequence for fragment generation
# then turn that into a 1-indexed mapping from the order of the residue in the sequence to the original residue ID in the PDB
residues_for_generation = []
for s in segment_list:
residues_for_generation += range(s[0], s[1] + 1)
reverse_mapping['FASTA'] = dict((key, value) for (key, value) in zip(range(1, len(residues_for_generation) + 1), residues_for_generation))
# Create the reverse_mappings from the indices in the PDB sequences defined by the segment_list to the indices in the original sequences.
# Membership in sequence_offsets implies a PDB sequence.
for k, v in sorted(sequence_offsets.iteritems()):
# For each PDB chain, we consider the set of segments (ignoring extra residues due to nmerage for now so that
# we do not include chains by accident e.g. if the user specified the first residues of chain C but none in chain B,
# they probably do not wish to generate fragments for chain B)
chain_residues = range(v[1], v[2] + 1)
residues_for_generation = []
for s in original_segment_list:
# If the original segment lists lie inside the chain residues then we extend the range w.r.t. the nmers
if (chain_residues[0] <= s[0] <= chain_residues[-1]) or (chain_residues[0] <= s[1] <= chain_residues[-1]):
residues_for_generation += range(s[0] - residue_offset + 1, s[1] + residue_offset - 1 + 1)
# Create a sorted list of residues of the chain that we will be including in the sequence for fragment generation
# then turn that into a 1-indexed mapping from the order of the residue in the sequence to the original residue ID in the PDB
chain_residues_for_generation = sorted(set(chain_residues).intersection(set(residues_for_generation)))
reverse_mapping[k] = dict((key, value) for (key, value) in zip(range(1, len(chain_residues_for_generation) + 1), chain_residues_for_generation))
found_at_least_one_sequence = False
for k, v in sorted(records.iteritems()):
assert(v[0].startswith('>'))
subkey = k[0] + k[1]
sequence = ''.join([s.strip() for s in v[1:]])
assert(sequenceLine.match(sequence) != None) # sanity check
cropped_sequence = None
if sequence_offsets.get(subkey):
# PDB chain case
first_residue_id = sequence_offsets[subkey][1]
cropped_sequence = ''.join([sequence[rmv - first_residue_id] for rmk, rmv in sorted(reverse_mapping[subkey].iteritems())])
# Sanity check - check that the remapping from the cropped sequence to the original sequence will work in postprocessing
for x in range(0, len(cropped_sequence)):
assert(cropped_sequence[x] == sequence[reverse_mapping[subkey][x + 1] - sequence_offsets[subkey][0] - 1])
records[k] = [v[0]] + [cropped_sequence[i:i+60] for i in range(0, len(cropped_sequence), 60)] # update the record to only use the truncated sequence
else:
# FASTA chain case
cropped_sequence = ''.join([sequence[rmv - 1] for rmk, rmv in sorted(reverse_mapping['FASTA'].iteritems()) if rmv <= len(sequence)])
# Sanity check - check that the remapping from the cropped sequence to the original sequence will work in postprocessing
for x in range(0, len(cropped_sequence)):
assert(cropped_sequence[x] == sequence[reverse_mapping['FASTA'][x + 1] - 1])
found_at_least_one_sequence = found_at_least_one_sequence or (not not cropped_sequence)
if cropped_sequence:
records[k] = [v[0]] + [cropped_sequence[i:i+60] for i in range(0, len(cropped_sequence), 60)]
else:
del records[k] # delete the chain. todo: test that this works
if not found_at_least_one_sequence:
raise Exception('No sequences were created from the loops/indices and the input sequences. This may be an input error so the job is being terminated.')
if reverse_mapping:
return records, dict(reverse_mapping = reverse_mapping, segment_list = original_segment_list, sequence_offsets = sequence_offsets)
else:
return records, None |
def dem_url_dia(dt_day='2015-06-22'):
"""Obtiene las urls de descarga de los datos de demanda energética de un día concreto."""
def _url_tipo_dato(str_dia, k):
url = SERVER + '/archives/{}/download_json?locale=es'.format(D_TIPOS_REQ_DEM[k])
if type(str_dia) is str:
return url + '&date=' + str_dia
else:
return url + '&date=' + str_dia.date().isoformat()
urls = [_url_tipo_dato(dt_day, k) for k in D_TIPOS_REQ_DEM.keys()]
return urls | Obtiene las urls de descarga de los datos de demanda energética de un día concreto. | Below is the the instruction that describes the task:
### Input:
Obtiene las urls de descarga de los datos de demanda energética de un día concreto.
### Response:
def dem_url_dia(dt_day='2015-06-22'):
"""Obtiene las urls de descarga de los datos de demanda energética de un día concreto."""
def _url_tipo_dato(str_dia, k):
url = SERVER + '/archives/{}/download_json?locale=es'.format(D_TIPOS_REQ_DEM[k])
if type(str_dia) is str:
return url + '&date=' + str_dia
else:
return url + '&date=' + str_dia.date().isoformat()
urls = [_url_tipo_dato(dt_day, k) for k in D_TIPOS_REQ_DEM.keys()]
return urls |
def is_topology(self, layers=None):
'''
valid the topology
'''
if layers is None:
layers = self.layers
layers_nodle = []
result = []
for i, layer in enumerate(layers):
if layer.is_delete is False:
layers_nodle.append(i)
while True:
flag_break = True
layers_toremove = []
for layer1 in layers_nodle:
flag_arrive = True
for layer2 in layers[layer1].input:
if layer2 in layers_nodle:
flag_arrive = False
if flag_arrive is True:
for layer2 in layers[layer1].output:
# Size is error
if layers[layer2].set_size(layer1, layers[layer1].size) is False:
return False
layers_toremove.append(layer1)
result.append(layer1)
flag_break = False
for layer in layers_toremove:
layers_nodle.remove(layer)
result.append('|')
if flag_break:
break
# There is loop in graph || some layers can't to arrive
if layers_nodle:
return False
return result | valid the topology | Below is the the instruction that describes the task:
### Input:
valid the topology
### Response:
def is_topology(self, layers=None):
'''
valid the topology
'''
if layers is None:
layers = self.layers
layers_nodle = []
result = []
for i, layer in enumerate(layers):
if layer.is_delete is False:
layers_nodle.append(i)
while True:
flag_break = True
layers_toremove = []
for layer1 in layers_nodle:
flag_arrive = True
for layer2 in layers[layer1].input:
if layer2 in layers_nodle:
flag_arrive = False
if flag_arrive is True:
for layer2 in layers[layer1].output:
# Size is error
if layers[layer2].set_size(layer1, layers[layer1].size) is False:
return False
layers_toremove.append(layer1)
result.append(layer1)
flag_break = False
for layer in layers_toremove:
layers_nodle.remove(layer)
result.append('|')
if flag_break:
break
# There is loop in graph || some layers can't to arrive
if layers_nodle:
return False
return result |
def close(self) -> None:
"""
Closes the connection.
"""
if self.transport is not None and not self.transport.is_closing():
self.transport.close()
if self._connect_lock.locked():
self._connect_lock.release()
self.protocol = None
self.transport = None | Closes the connection. | Below is the the instruction that describes the task:
### Input:
Closes the connection.
### Response:
def close(self) -> None:
"""
Closes the connection.
"""
if self.transport is not None and not self.transport.is_closing():
self.transport.close()
if self._connect_lock.locked():
self._connect_lock.release()
self.protocol = None
self.transport = None |
def _complex_dtype(dtype):
"""Patched version of :func:`sporco.linalg.complex_dtype`."""
dt = cp.dtype(dtype)
if dt == cp.dtype('float128'):
return cp.dtype('complex256')
elif dt == cp.dtype('float64'):
return cp.dtype('complex128')
else:
return cp.dtype('complex64') | Patched version of :func:`sporco.linalg.complex_dtype`. | Below is the the instruction that describes the task:
### Input:
Patched version of :func:`sporco.linalg.complex_dtype`.
### Response:
def _complex_dtype(dtype):
"""Patched version of :func:`sporco.linalg.complex_dtype`."""
dt = cp.dtype(dtype)
if dt == cp.dtype('float128'):
return cp.dtype('complex256')
elif dt == cp.dtype('float64'):
return cp.dtype('complex128')
else:
return cp.dtype('complex64') |
def tcp_reassembly(packet, *, count=NotImplemented):
"""Store data for TCP reassembly."""
if 'TCP' in packet:
ip = packet['IP'] if 'IP' in packet else packet['IPv6']
tcp = packet['TCP']
data = dict(
bufid=(
ipaddress.ip_address(ip.src), # source IP address
ipaddress.ip_address(ip.dst), # destination IP address
tcp.sport, # source port
tcp.dport, # destination port
),
num=count, # original packet range number
ack=tcp.ack, # acknowledgement
dsn=tcp.seq, # data sequence number
syn=bool(tcp.flags.S), # synchronise flag
fin=bool(tcp.flags.F), # finish flag
rst=bool(tcp.flags.R), # reset connection flag
payload=bytearray(bytes(tcp.payload)), # raw bytearray type payload
)
raw_len = len(tcp.payload) # payload length, header excludes
data['first'] = tcp.seq # this sequence number
data['last'] = tcp.seq + raw_len # next (wanted) sequence number
data['len'] = raw_len # payload length, header excludes
return True, data
return False, None | Store data for TCP reassembly. | Below is the the instruction that describes the task:
### Input:
Store data for TCP reassembly.
### Response:
def tcp_reassembly(packet, *, count=NotImplemented):
"""Store data for TCP reassembly."""
if 'TCP' in packet:
ip = packet['IP'] if 'IP' in packet else packet['IPv6']
tcp = packet['TCP']
data = dict(
bufid=(
ipaddress.ip_address(ip.src), # source IP address
ipaddress.ip_address(ip.dst), # destination IP address
tcp.sport, # source port
tcp.dport, # destination port
),
num=count, # original packet range number
ack=tcp.ack, # acknowledgement
dsn=tcp.seq, # data sequence number
syn=bool(tcp.flags.S), # synchronise flag
fin=bool(tcp.flags.F), # finish flag
rst=bool(tcp.flags.R), # reset connection flag
payload=bytearray(bytes(tcp.payload)), # raw bytearray type payload
)
raw_len = len(tcp.payload) # payload length, header excludes
data['first'] = tcp.seq # this sequence number
data['last'] = tcp.seq + raw_len # next (wanted) sequence number
data['len'] = raw_len # payload length, header excludes
return True, data
return False, None |
def ramp(time, slope, start, finish=0):
"""
Implements vensim's and xmile's RAMP function
Parameters
----------
time: function
The current time of modelling
slope: float
The slope of the ramp starting at zero at time start
start: float
Time at which the ramp begins
finish: float
Optional. Time at which the ramp ends
Returns
-------
response: float
If prior to ramp start, returns zero
If after ramp ends, returns top of ramp
Examples
--------
"""
t = time()
if t < start:
return 0
else:
if finish <= 0:
return slope * (t - start)
elif t > finish:
return slope * (finish - start)
else:
return slope * (t - start) | Implements vensim's and xmile's RAMP function
Parameters
----------
time: function
The current time of modelling
slope: float
The slope of the ramp starting at zero at time start
start: float
Time at which the ramp begins
finish: float
Optional. Time at which the ramp ends
Returns
-------
response: float
If prior to ramp start, returns zero
If after ramp ends, returns top of ramp
Examples
-------- | Below is the the instruction that describes the task:
### Input:
Implements vensim's and xmile's RAMP function
Parameters
----------
time: function
The current time of modelling
slope: float
The slope of the ramp starting at zero at time start
start: float
Time at which the ramp begins
finish: float
Optional. Time at which the ramp ends
Returns
-------
response: float
If prior to ramp start, returns zero
If after ramp ends, returns top of ramp
Examples
--------
### Response:
def ramp(time, slope, start, finish=0):
"""
Implements vensim's and xmile's RAMP function
Parameters
----------
time: function
The current time of modelling
slope: float
The slope of the ramp starting at zero at time start
start: float
Time at which the ramp begins
finish: float
Optional. Time at which the ramp ends
Returns
-------
response: float
If prior to ramp start, returns zero
If after ramp ends, returns top of ramp
Examples
--------
"""
t = time()
if t < start:
return 0
else:
if finish <= 0:
return slope * (t - start)
elif t > finish:
return slope * (finish - start)
else:
return slope * (t - start) |
def addbr(name):
''' Create new bridge with the given name '''
fcntl.ioctl(ifconfig.sockfd, SIOCBRADDBR, name)
return Bridge(name) | Create new bridge with the given name | Below is the the instruction that describes the task:
### Input:
Create new bridge with the given name
### Response:
def addbr(name):
''' Create new bridge with the given name '''
fcntl.ioctl(ifconfig.sockfd, SIOCBRADDBR, name)
return Bridge(name) |
def attach_volume(volume_id, instance_id, device,
region=None, key=None, keyid=None, profile=None):
'''
Attach an EBS volume to an EC2 instance.
..
volume_id
(string) – The ID of the EBS volume to be attached.
instance_id
(string) – The ID of the EC2 instance to attach the volume to.
device
(string) – The device on the instance through which the volume is exposed (e.g. /dev/sdh)
returns
(bool) - True on success, False on failure.
CLI Example:
.. code-block:: bash
salt-call boto_ec2.attach_volume vol-12345678 i-87654321 /dev/sdh
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
return conn.attach_volume(volume_id, instance_id, device)
except boto.exception.BotoServerError as error:
log.error(error)
return False | Attach an EBS volume to an EC2 instance.
..
volume_id
(string) – The ID of the EBS volume to be attached.
instance_id
(string) – The ID of the EC2 instance to attach the volume to.
device
(string) – The device on the instance through which the volume is exposed (e.g. /dev/sdh)
returns
(bool) - True on success, False on failure.
CLI Example:
.. code-block:: bash
salt-call boto_ec2.attach_volume vol-12345678 i-87654321 /dev/sdh | Below is the the instruction that describes the task:
### Input:
Attach an EBS volume to an EC2 instance.
..
volume_id
(string) – The ID of the EBS volume to be attached.
instance_id
(string) – The ID of the EC2 instance to attach the volume to.
device
(string) – The device on the instance through which the volume is exposed (e.g. /dev/sdh)
returns
(bool) - True on success, False on failure.
CLI Example:
.. code-block:: bash
salt-call boto_ec2.attach_volume vol-12345678 i-87654321 /dev/sdh
### Response:
def attach_volume(volume_id, instance_id, device,
region=None, key=None, keyid=None, profile=None):
'''
Attach an EBS volume to an EC2 instance.
..
volume_id
(string) – The ID of the EBS volume to be attached.
instance_id
(string) – The ID of the EC2 instance to attach the volume to.
device
(string) – The device on the instance through which the volume is exposed (e.g. /dev/sdh)
returns
(bool) - True on success, False on failure.
CLI Example:
.. code-block:: bash
salt-call boto_ec2.attach_volume vol-12345678 i-87654321 /dev/sdh
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
return conn.attach_volume(volume_id, instance_id, device)
except boto.exception.BotoServerError as error:
log.error(error)
return False |
def read_html(io, match='.+', flavor=None, header=None, index_col=None,
skiprows=None, attrs=None, parse_dates=False,
tupleize_cols=None, thousands=',', encoding=None,
decimal='.', converters=None, na_values=None,
keep_default_na=True, displayed_only=True):
r"""Read HTML tables into a ``list`` of ``DataFrame`` objects.
Parameters
----------
io : str or file-like
A URL, a file-like object, or a raw string containing HTML. Note that
lxml only accepts the http, ftp and file url protocols. If you have a
URL that starts with ``'https'`` you might try removing the ``'s'``.
match : str or compiled regular expression, optional
The set of tables containing text matching this regex or string will be
returned. Unless the HTML is extremely simple you will probably need to
pass a non-empty string here. Defaults to '.+' (match any non-empty
string). The default value will return all tables contained on a page.
This value is converted to a regular expression so that there is
consistent behavior between Beautiful Soup and lxml.
flavor : str or None, container of strings
The parsing engine to use. 'bs4' and 'html5lib' are synonymous with
each other, they are both there for backwards compatibility. The
default of ``None`` tries to use ``lxml`` to parse and if that fails it
falls back on ``bs4`` + ``html5lib``.
header : int or list-like or None, optional
The row (or list of rows for a :class:`~pandas.MultiIndex`) to use to
make the columns headers.
index_col : int or list-like or None, optional
The column (or list of columns) to use to create the index.
skiprows : int or list-like or slice or None, optional
0-based. Number of rows to skip after parsing the column integer. If a
sequence of integers or a slice is given, will skip the rows indexed by
that sequence. Note that a single element sequence means 'skip the nth
row' whereas an integer means 'skip n rows'.
attrs : dict or None, optional
This is a dictionary of attributes that you can pass to use to identify
the table in the HTML. These are not checked for validity before being
passed to lxml or Beautiful Soup. However, these attributes must be
valid HTML table attributes to work correctly. For example, ::
attrs = {'id': 'table'}
is a valid attribute dictionary because the 'id' HTML tag attribute is
a valid HTML attribute for *any* HTML tag as per `this document
<http://www.w3.org/TR/html-markup/global-attributes.html>`__. ::
attrs = {'asdf': 'table'}
is *not* a valid attribute dictionary because 'asdf' is not a valid
HTML attribute even if it is a valid XML attribute. Valid HTML 4.01
table attributes can be found `here
<http://www.w3.org/TR/REC-html40/struct/tables.html#h-11.2>`__. A
working draft of the HTML 5 spec can be found `here
<http://www.w3.org/TR/html-markup/table.html>`__. It contains the
latest information on table attributes for the modern web.
parse_dates : bool, optional
See :func:`~read_csv` for more details.
tupleize_cols : bool, optional
If ``False`` try to parse multiple header rows into a
:class:`~pandas.MultiIndex`, otherwise return raw tuples. Defaults to
``False``.
.. deprecated:: 0.21.0
This argument will be removed and will always convert to MultiIndex
thousands : str, optional
Separator to use to parse thousands. Defaults to ``','``.
encoding : str or None, optional
The encoding used to decode the web page. Defaults to ``None``.``None``
preserves the previous encoding behavior, which depends on the
underlying parser library (e.g., the parser library will try to use
the encoding provided by the document).
decimal : str, default '.'
Character to recognize as decimal point (e.g. use ',' for European
data).
.. versionadded:: 0.19.0
converters : dict, default None
Dict of functions for converting values in certain columns. Keys can
either be integers or column labels, values are functions that take one
input argument, the cell (not column) content, and return the
transformed content.
.. versionadded:: 0.19.0
na_values : iterable, default None
Custom NA values
.. versionadded:: 0.19.0
keep_default_na : bool, default True
If na_values are specified and keep_default_na is False the default NaN
values are overridden, otherwise they're appended to
.. versionadded:: 0.19.0
displayed_only : bool, default True
Whether elements with "display: none" should be parsed
.. versionadded:: 0.23.0
Returns
-------
dfs : list of DataFrames
See Also
--------
read_csv
Notes
-----
Before using this function you should read the :ref:`gotchas about the
HTML parsing libraries <io.html.gotchas>`.
Expect to do some cleanup after you call this function. For example, you
might need to manually assign column names if the column names are
converted to NaN when you pass the `header=0` argument. We try to assume as
little as possible about the structure of the table and push the
idiosyncrasies of the HTML contained in the table to the user.
This function searches for ``<table>`` elements and only for ``<tr>``
and ``<th>`` rows and ``<td>`` elements within each ``<tr>`` or ``<th>``
element in the table. ``<td>`` stands for "table data". This function
attempts to properly handle ``colspan`` and ``rowspan`` attributes.
If the function has a ``<thead>`` argument, it is used to construct
the header, otherwise the function attempts to find the header within
the body (by putting rows with only ``<th>`` elements into the header).
.. versionadded:: 0.21.0
Similar to :func:`~read_csv` the `header` argument is applied
**after** `skiprows` is applied.
This function will *always* return a list of :class:`DataFrame` *or*
it will fail, e.g., it will *not* return an empty list.
Examples
--------
See the :ref:`read_html documentation in the IO section of the docs
<io.read_html>` for some examples of reading in HTML tables.
"""
_importers()
# Type check here. We don't want to parse only to fail because of an
# invalid value of an integer skiprows.
if isinstance(skiprows, numbers.Integral) and skiprows < 0:
raise ValueError('cannot skip rows starting from the end of the '
'data (you passed a negative value)')
_validate_header_arg(header)
return _parse(flavor=flavor, io=io, match=match, header=header,
index_col=index_col, skiprows=skiprows,
parse_dates=parse_dates, tupleize_cols=tupleize_cols,
thousands=thousands, attrs=attrs, encoding=encoding,
decimal=decimal, converters=converters, na_values=na_values,
keep_default_na=keep_default_na,
displayed_only=displayed_only) | r"""Read HTML tables into a ``list`` of ``DataFrame`` objects.
Parameters
----------
io : str or file-like
A URL, a file-like object, or a raw string containing HTML. Note that
lxml only accepts the http, ftp and file url protocols. If you have a
URL that starts with ``'https'`` you might try removing the ``'s'``.
match : str or compiled regular expression, optional
The set of tables containing text matching this regex or string will be
returned. Unless the HTML is extremely simple you will probably need to
pass a non-empty string here. Defaults to '.+' (match any non-empty
string). The default value will return all tables contained on a page.
This value is converted to a regular expression so that there is
consistent behavior between Beautiful Soup and lxml.
flavor : str or None, container of strings
The parsing engine to use. 'bs4' and 'html5lib' are synonymous with
each other, they are both there for backwards compatibility. The
default of ``None`` tries to use ``lxml`` to parse and if that fails it
falls back on ``bs4`` + ``html5lib``.
header : int or list-like or None, optional
The row (or list of rows for a :class:`~pandas.MultiIndex`) to use to
make the columns headers.
index_col : int or list-like or None, optional
The column (or list of columns) to use to create the index.
skiprows : int or list-like or slice or None, optional
0-based. Number of rows to skip after parsing the column integer. If a
sequence of integers or a slice is given, will skip the rows indexed by
that sequence. Note that a single element sequence means 'skip the nth
row' whereas an integer means 'skip n rows'.
attrs : dict or None, optional
This is a dictionary of attributes that you can pass to use to identify
the table in the HTML. These are not checked for validity before being
passed to lxml or Beautiful Soup. However, these attributes must be
valid HTML table attributes to work correctly. For example, ::
attrs = {'id': 'table'}
is a valid attribute dictionary because the 'id' HTML tag attribute is
a valid HTML attribute for *any* HTML tag as per `this document
<http://www.w3.org/TR/html-markup/global-attributes.html>`__. ::
attrs = {'asdf': 'table'}
is *not* a valid attribute dictionary because 'asdf' is not a valid
HTML attribute even if it is a valid XML attribute. Valid HTML 4.01
table attributes can be found `here
<http://www.w3.org/TR/REC-html40/struct/tables.html#h-11.2>`__. A
working draft of the HTML 5 spec can be found `here
<http://www.w3.org/TR/html-markup/table.html>`__. It contains the
latest information on table attributes for the modern web.
parse_dates : bool, optional
See :func:`~read_csv` for more details.
tupleize_cols : bool, optional
If ``False`` try to parse multiple header rows into a
:class:`~pandas.MultiIndex`, otherwise return raw tuples. Defaults to
``False``.
.. deprecated:: 0.21.0
This argument will be removed and will always convert to MultiIndex
thousands : str, optional
Separator to use to parse thousands. Defaults to ``','``.
encoding : str or None, optional
The encoding used to decode the web page. Defaults to ``None``.``None``
preserves the previous encoding behavior, which depends on the
underlying parser library (e.g., the parser library will try to use
the encoding provided by the document).
decimal : str, default '.'
Character to recognize as decimal point (e.g. use ',' for European
data).
.. versionadded:: 0.19.0
converters : dict, default None
Dict of functions for converting values in certain columns. Keys can
either be integers or column labels, values are functions that take one
input argument, the cell (not column) content, and return the
transformed content.
.. versionadded:: 0.19.0
na_values : iterable, default None
Custom NA values
.. versionadded:: 0.19.0
keep_default_na : bool, default True
If na_values are specified and keep_default_na is False the default NaN
values are overridden, otherwise they're appended to
.. versionadded:: 0.19.0
displayed_only : bool, default True
Whether elements with "display: none" should be parsed
.. versionadded:: 0.23.0
Returns
-------
dfs : list of DataFrames
See Also
--------
read_csv
Notes
-----
Before using this function you should read the :ref:`gotchas about the
HTML parsing libraries <io.html.gotchas>`.
Expect to do some cleanup after you call this function. For example, you
might need to manually assign column names if the column names are
converted to NaN when you pass the `header=0` argument. We try to assume as
little as possible about the structure of the table and push the
idiosyncrasies of the HTML contained in the table to the user.
This function searches for ``<table>`` elements and only for ``<tr>``
and ``<th>`` rows and ``<td>`` elements within each ``<tr>`` or ``<th>``
element in the table. ``<td>`` stands for "table data". This function
attempts to properly handle ``colspan`` and ``rowspan`` attributes.
If the function has a ``<thead>`` argument, it is used to construct
the header, otherwise the function attempts to find the header within
the body (by putting rows with only ``<th>`` elements into the header).
.. versionadded:: 0.21.0
Similar to :func:`~read_csv` the `header` argument is applied
**after** `skiprows` is applied.
This function will *always* return a list of :class:`DataFrame` *or*
it will fail, e.g., it will *not* return an empty list.
Examples
--------
See the :ref:`read_html documentation in the IO section of the docs
<io.read_html>` for some examples of reading in HTML tables. | Below is the the instruction that describes the task:
### Input:
r"""Read HTML tables into a ``list`` of ``DataFrame`` objects.
Parameters
----------
io : str or file-like
A URL, a file-like object, or a raw string containing HTML. Note that
lxml only accepts the http, ftp and file url protocols. If you have a
URL that starts with ``'https'`` you might try removing the ``'s'``.
match : str or compiled regular expression, optional
The set of tables containing text matching this regex or string will be
returned. Unless the HTML is extremely simple you will probably need to
pass a non-empty string here. Defaults to '.+' (match any non-empty
string). The default value will return all tables contained on a page.
This value is converted to a regular expression so that there is
consistent behavior between Beautiful Soup and lxml.
flavor : str or None, container of strings
The parsing engine to use. 'bs4' and 'html5lib' are synonymous with
each other, they are both there for backwards compatibility. The
default of ``None`` tries to use ``lxml`` to parse and if that fails it
falls back on ``bs4`` + ``html5lib``.
header : int or list-like or None, optional
The row (or list of rows for a :class:`~pandas.MultiIndex`) to use to
make the columns headers.
index_col : int or list-like or None, optional
The column (or list of columns) to use to create the index.
skiprows : int or list-like or slice or None, optional
0-based. Number of rows to skip after parsing the column integer. If a
sequence of integers or a slice is given, will skip the rows indexed by
that sequence. Note that a single element sequence means 'skip the nth
row' whereas an integer means 'skip n rows'.
attrs : dict or None, optional
This is a dictionary of attributes that you can pass to use to identify
the table in the HTML. These are not checked for validity before being
passed to lxml or Beautiful Soup. However, these attributes must be
valid HTML table attributes to work correctly. For example, ::
attrs = {'id': 'table'}
is a valid attribute dictionary because the 'id' HTML tag attribute is
a valid HTML attribute for *any* HTML tag as per `this document
<http://www.w3.org/TR/html-markup/global-attributes.html>`__. ::
attrs = {'asdf': 'table'}
is *not* a valid attribute dictionary because 'asdf' is not a valid
HTML attribute even if it is a valid XML attribute. Valid HTML 4.01
table attributes can be found `here
<http://www.w3.org/TR/REC-html40/struct/tables.html#h-11.2>`__. A
working draft of the HTML 5 spec can be found `here
<http://www.w3.org/TR/html-markup/table.html>`__. It contains the
latest information on table attributes for the modern web.
parse_dates : bool, optional
See :func:`~read_csv` for more details.
tupleize_cols : bool, optional
If ``False`` try to parse multiple header rows into a
:class:`~pandas.MultiIndex`, otherwise return raw tuples. Defaults to
``False``.
.. deprecated:: 0.21.0
This argument will be removed and will always convert to MultiIndex
thousands : str, optional
Separator to use to parse thousands. Defaults to ``','``.
encoding : str or None, optional
The encoding used to decode the web page. Defaults to ``None``.``None``
preserves the previous encoding behavior, which depends on the
underlying parser library (e.g., the parser library will try to use
the encoding provided by the document).
decimal : str, default '.'
Character to recognize as decimal point (e.g. use ',' for European
data).
.. versionadded:: 0.19.0
converters : dict, default None
Dict of functions for converting values in certain columns. Keys can
either be integers or column labels, values are functions that take one
input argument, the cell (not column) content, and return the
transformed content.
.. versionadded:: 0.19.0
na_values : iterable, default None
Custom NA values
.. versionadded:: 0.19.0
keep_default_na : bool, default True
If na_values are specified and keep_default_na is False the default NaN
values are overridden, otherwise they're appended to
.. versionadded:: 0.19.0
displayed_only : bool, default True
Whether elements with "display: none" should be parsed
.. versionadded:: 0.23.0
Returns
-------
dfs : list of DataFrames
See Also
--------
read_csv
Notes
-----
Before using this function you should read the :ref:`gotchas about the
HTML parsing libraries <io.html.gotchas>`.
Expect to do some cleanup after you call this function. For example, you
might need to manually assign column names if the column names are
converted to NaN when you pass the `header=0` argument. We try to assume as
little as possible about the structure of the table and push the
idiosyncrasies of the HTML contained in the table to the user.
This function searches for ``<table>`` elements and only for ``<tr>``
and ``<th>`` rows and ``<td>`` elements within each ``<tr>`` or ``<th>``
element in the table. ``<td>`` stands for "table data". This function
attempts to properly handle ``colspan`` and ``rowspan`` attributes.
If the function has a ``<thead>`` argument, it is used to construct
the header, otherwise the function attempts to find the header within
the body (by putting rows with only ``<th>`` elements into the header).
.. versionadded:: 0.21.0
Similar to :func:`~read_csv` the `header` argument is applied
**after** `skiprows` is applied.
This function will *always* return a list of :class:`DataFrame` *or*
it will fail, e.g., it will *not* return an empty list.
Examples
--------
See the :ref:`read_html documentation in the IO section of the docs
<io.read_html>` for some examples of reading in HTML tables.
### Response:
def read_html(io, match='.+', flavor=None, header=None, index_col=None,
skiprows=None, attrs=None, parse_dates=False,
tupleize_cols=None, thousands=',', encoding=None,
decimal='.', converters=None, na_values=None,
keep_default_na=True, displayed_only=True):
r"""Read HTML tables into a ``list`` of ``DataFrame`` objects.
Parameters
----------
io : str or file-like
A URL, a file-like object, or a raw string containing HTML. Note that
lxml only accepts the http, ftp and file url protocols. If you have a
URL that starts with ``'https'`` you might try removing the ``'s'``.
match : str or compiled regular expression, optional
The set of tables containing text matching this regex or string will be
returned. Unless the HTML is extremely simple you will probably need to
pass a non-empty string here. Defaults to '.+' (match any non-empty
string). The default value will return all tables contained on a page.
This value is converted to a regular expression so that there is
consistent behavior between Beautiful Soup and lxml.
flavor : str or None, container of strings
The parsing engine to use. 'bs4' and 'html5lib' are synonymous with
each other, they are both there for backwards compatibility. The
default of ``None`` tries to use ``lxml`` to parse and if that fails it
falls back on ``bs4`` + ``html5lib``.
header : int or list-like or None, optional
The row (or list of rows for a :class:`~pandas.MultiIndex`) to use to
make the columns headers.
index_col : int or list-like or None, optional
The column (or list of columns) to use to create the index.
skiprows : int or list-like or slice or None, optional
0-based. Number of rows to skip after parsing the column integer. If a
sequence of integers or a slice is given, will skip the rows indexed by
that sequence. Note that a single element sequence means 'skip the nth
row' whereas an integer means 'skip n rows'.
attrs : dict or None, optional
This is a dictionary of attributes that you can pass to use to identify
the table in the HTML. These are not checked for validity before being
passed to lxml or Beautiful Soup. However, these attributes must be
valid HTML table attributes to work correctly. For example, ::
attrs = {'id': 'table'}
is a valid attribute dictionary because the 'id' HTML tag attribute is
a valid HTML attribute for *any* HTML tag as per `this document
<http://www.w3.org/TR/html-markup/global-attributes.html>`__. ::
attrs = {'asdf': 'table'}
is *not* a valid attribute dictionary because 'asdf' is not a valid
HTML attribute even if it is a valid XML attribute. Valid HTML 4.01
table attributes can be found `here
<http://www.w3.org/TR/REC-html40/struct/tables.html#h-11.2>`__. A
working draft of the HTML 5 spec can be found `here
<http://www.w3.org/TR/html-markup/table.html>`__. It contains the
latest information on table attributes for the modern web.
parse_dates : bool, optional
See :func:`~read_csv` for more details.
tupleize_cols : bool, optional
If ``False`` try to parse multiple header rows into a
:class:`~pandas.MultiIndex`, otherwise return raw tuples. Defaults to
``False``.
.. deprecated:: 0.21.0
This argument will be removed and will always convert to MultiIndex
thousands : str, optional
Separator to use to parse thousands. Defaults to ``','``.
encoding : str or None, optional
The encoding used to decode the web page. Defaults to ``None``.``None``
preserves the previous encoding behavior, which depends on the
underlying parser library (e.g., the parser library will try to use
the encoding provided by the document).
decimal : str, default '.'
Character to recognize as decimal point (e.g. use ',' for European
data).
.. versionadded:: 0.19.0
converters : dict, default None
Dict of functions for converting values in certain columns. Keys can
either be integers or column labels, values are functions that take one
input argument, the cell (not column) content, and return the
transformed content.
.. versionadded:: 0.19.0
na_values : iterable, default None
Custom NA values
.. versionadded:: 0.19.0
keep_default_na : bool, default True
If na_values are specified and keep_default_na is False the default NaN
values are overridden, otherwise they're appended to
.. versionadded:: 0.19.0
displayed_only : bool, default True
Whether elements with "display: none" should be parsed
.. versionadded:: 0.23.0
Returns
-------
dfs : list of DataFrames
See Also
--------
read_csv
Notes
-----
Before using this function you should read the :ref:`gotchas about the
HTML parsing libraries <io.html.gotchas>`.
Expect to do some cleanup after you call this function. For example, you
might need to manually assign column names if the column names are
converted to NaN when you pass the `header=0` argument. We try to assume as
little as possible about the structure of the table and push the
idiosyncrasies of the HTML contained in the table to the user.
This function searches for ``<table>`` elements and only for ``<tr>``
and ``<th>`` rows and ``<td>`` elements within each ``<tr>`` or ``<th>``
element in the table. ``<td>`` stands for "table data". This function
attempts to properly handle ``colspan`` and ``rowspan`` attributes.
If the function has a ``<thead>`` argument, it is used to construct
the header, otherwise the function attempts to find the header within
the body (by putting rows with only ``<th>`` elements into the header).
.. versionadded:: 0.21.0
Similar to :func:`~read_csv` the `header` argument is applied
**after** `skiprows` is applied.
This function will *always* return a list of :class:`DataFrame` *or*
it will fail, e.g., it will *not* return an empty list.
Examples
--------
See the :ref:`read_html documentation in the IO section of the docs
<io.read_html>` for some examples of reading in HTML tables.
"""
_importers()
# Type check here. We don't want to parse only to fail because of an
# invalid value of an integer skiprows.
if isinstance(skiprows, numbers.Integral) and skiprows < 0:
raise ValueError('cannot skip rows starting from the end of the '
'data (you passed a negative value)')
_validate_header_arg(header)
return _parse(flavor=flavor, io=io, match=match, header=header,
index_col=index_col, skiprows=skiprows,
parse_dates=parse_dates, tupleize_cols=tupleize_cols,
thousands=thousands, attrs=attrs, encoding=encoding,
decimal=decimal, converters=converters, na_values=na_values,
keep_default_na=keep_default_na,
displayed_only=displayed_only) |
def get_top_referrers(self):
"""
:calls: `GET /repos/:owner/:repo/traffic/popular/referrers <https://developer.github.com/v3/repos/traffic/>`_
:rtype: :class:`list` of :class:`github.Referrer.Referrer`
"""
headers, data = self._requester.requestJsonAndCheck(
"GET",
self.url + "/traffic/popular/referrers"
)
if isinstance(data, list):
return [
github.Referrer.Referrer(self._requester, headers, item, completed=True)
for item in data
] | :calls: `GET /repos/:owner/:repo/traffic/popular/referrers <https://developer.github.com/v3/repos/traffic/>`_
:rtype: :class:`list` of :class:`github.Referrer.Referrer` | Below is the the instruction that describes the task:
### Input:
:calls: `GET /repos/:owner/:repo/traffic/popular/referrers <https://developer.github.com/v3/repos/traffic/>`_
:rtype: :class:`list` of :class:`github.Referrer.Referrer`
### Response:
def get_top_referrers(self):
"""
:calls: `GET /repos/:owner/:repo/traffic/popular/referrers <https://developer.github.com/v3/repos/traffic/>`_
:rtype: :class:`list` of :class:`github.Referrer.Referrer`
"""
headers, data = self._requester.requestJsonAndCheck(
"GET",
self.url + "/traffic/popular/referrers"
)
if isinstance(data, list):
return [
github.Referrer.Referrer(self._requester, headers, item, completed=True)
for item in data
] |
def strip_xml_declaration(file_or_xml):
"""
Removes XML declaration line from file or string passed in.
If file_or_xml is not a file or string, it is returned as is.
"""
xml_content = _xml_content_to_string(file_or_xml)
if not isinstance(xml_content, string_types):
return xml_content
# For Python 2 compliance: replacement string must not specify unicode u''
return _XML_DECLARATION_REGEX.sub(r'', xml_content, 1) | Removes XML declaration line from file or string passed in.
If file_or_xml is not a file or string, it is returned as is. | Below is the the instruction that describes the task:
### Input:
Removes XML declaration line from file or string passed in.
If file_or_xml is not a file or string, it is returned as is.
### Response:
def strip_xml_declaration(file_or_xml):
"""
Removes XML declaration line from file or string passed in.
If file_or_xml is not a file or string, it is returned as is.
"""
xml_content = _xml_content_to_string(file_or_xml)
if not isinstance(xml_content, string_types):
return xml_content
# For Python 2 compliance: replacement string must not specify unicode u''
return _XML_DECLARATION_REGEX.sub(r'', xml_content, 1) |
def process(self, candidates):
"""
:arg list candidates: list of Candidates
:returns: list of Candidates where score is at least min_score,
if and only if one or more Candidates have at least min_score.
Otherwise, returns original list of Candidates.
"""
high_score_candidates = [c for c in candidates if c.score >= self.min_score]
if high_score_candidates != []:
return high_score_candidates
return candidates | :arg list candidates: list of Candidates
:returns: list of Candidates where score is at least min_score,
if and only if one or more Candidates have at least min_score.
Otherwise, returns original list of Candidates. | Below is the the instruction that describes the task:
### Input:
:arg list candidates: list of Candidates
:returns: list of Candidates where score is at least min_score,
if and only if one or more Candidates have at least min_score.
Otherwise, returns original list of Candidates.
### Response:
def process(self, candidates):
"""
:arg list candidates: list of Candidates
:returns: list of Candidates where score is at least min_score,
if and only if one or more Candidates have at least min_score.
Otherwise, returns original list of Candidates.
"""
high_score_candidates = [c for c in candidates if c.score >= self.min_score]
if high_score_candidates != []:
return high_score_candidates
return candidates |
def save_npz_dict(save_list=None, name='model.npz', sess=None):
"""Input parameters and the file name, save parameters as a dictionary into .npz file.
Use ``tl.files.load_and_assign_npz_dict()`` to restore.
Parameters
----------
save_list : list of parameters
A list of parameters (tensor) to be saved.
name : str
The name of the `.npz` file.
sess : Session
TensorFlow Session.
"""
if sess is None:
raise ValueError("session is None.")
if save_list is None:
save_list = []
save_list_names = [tensor.name for tensor in save_list]
save_list_var = sess.run(save_list)
save_var_dict = {save_list_names[idx]: val for idx, val in enumerate(save_list_var)}
np.savez(name, **save_var_dict)
save_list_var = None
save_var_dict = None
del save_list_var
del save_var_dict
logging.info("[*] Model saved in npz_dict %s" % name) | Input parameters and the file name, save parameters as a dictionary into .npz file.
Use ``tl.files.load_and_assign_npz_dict()`` to restore.
Parameters
----------
save_list : list of parameters
A list of parameters (tensor) to be saved.
name : str
The name of the `.npz` file.
sess : Session
TensorFlow Session. | Below is the the instruction that describes the task:
### Input:
Input parameters and the file name, save parameters as a dictionary into .npz file.
Use ``tl.files.load_and_assign_npz_dict()`` to restore.
Parameters
----------
save_list : list of parameters
A list of parameters (tensor) to be saved.
name : str
The name of the `.npz` file.
sess : Session
TensorFlow Session.
### Response:
def save_npz_dict(save_list=None, name='model.npz', sess=None):
"""Input parameters and the file name, save parameters as a dictionary into .npz file.
Use ``tl.files.load_and_assign_npz_dict()`` to restore.
Parameters
----------
save_list : list of parameters
A list of parameters (tensor) to be saved.
name : str
The name of the `.npz` file.
sess : Session
TensorFlow Session.
"""
if sess is None:
raise ValueError("session is None.")
if save_list is None:
save_list = []
save_list_names = [tensor.name for tensor in save_list]
save_list_var = sess.run(save_list)
save_var_dict = {save_list_names[idx]: val for idx, val in enumerate(save_list_var)}
np.savez(name, **save_var_dict)
save_list_var = None
save_var_dict = None
del save_list_var
del save_var_dict
logging.info("[*] Model saved in npz_dict %s" % name) |
def check_weather(self):
'''
Query the configured/queried station and return the weather data
'''
if self.station_id is None:
# Failed to get the nearest station ID when first launched, so
# retry it.
self.get_station_id()
self.data['update_error'] = ''
try:
query_url = STATION_QUERY_URL % (self.api_key,
'conditions',
self.station_id)
try:
response = self.api_request(query_url)['current_observation']
self.forecast_url = response.pop('ob_url', None)
except KeyError:
self.logger.error('No weather data found for %s', self.station_id)
self.data['update_error'] = self.update_error
return
if self.forecast:
query_url = STATION_QUERY_URL % (self.api_key,
'forecast',
self.station_id)
try:
forecast = self.api_request(query_url)['forecast']
forecast = forecast['simpleforecast']['forecastday'][0]
except (KeyError, IndexError, TypeError):
self.logger.error(
'No forecast data found for %s', self.station_id)
# This is a non-fatal error, so don't return but do set the
# error flag.
self.data['update_error'] = self.update_error
unit = 'celsius' if self.units == 'metric' else 'fahrenheit'
low_temp = forecast.get('low', {}).get(unit, '')
high_temp = forecast.get('high', {}).get(unit, '')
else:
low_temp = high_temp = ''
if self.units == 'metric':
temp_unit = 'c'
speed_unit = 'kph'
distance_unit = 'km'
pressure_unit = 'mb'
else:
temp_unit = 'f'
speed_unit = 'mph'
distance_unit = 'mi'
pressure_unit = 'in'
def _find(key, data=None, default=''):
if data is None:
data = response
return str(data.get(key, default))
try:
observation_epoch = _find('observation_epoch') or _find('local_epoch')
observation_time = datetime.fromtimestamp(int(observation_epoch))
except (TypeError, ValueError):
log.debug(
'Observation time \'%s\' is not a UNIX timestamp',
observation_epoch
)
observation_time = datetime.fromtimestamp(0)
self.data['city'] = _find('city', response['observation_location'])
self.data['condition'] = _find('weather')
self.data['observation_time'] = observation_time
self.data['current_temp'] = _find('temp_' + temp_unit).split('.')[0]
self.data['low_temp'] = low_temp
self.data['high_temp'] = high_temp
self.data['temp_unit'] = '°' + temp_unit.upper()
self.data['feelslike'] = _find('feelslike_' + temp_unit)
self.data['dewpoint'] = _find('dewpoint_' + temp_unit)
self.data['wind_speed'] = _find('wind_' + speed_unit)
self.data['wind_unit'] = speed_unit
self.data['wind_direction'] = _find('wind_dir')
self.data['wind_gust'] = _find('wind_gust_' + speed_unit)
self.data['pressure'] = _find('pressure_' + pressure_unit)
self.data['pressure_unit'] = pressure_unit
self.data['pressure_trend'] = _find('pressure_trend')
self.data['visibility'] = _find('visibility_' + distance_unit)
self.data['visibility_unit'] = distance_unit
self.data['humidity'] = _find('relative_humidity').rstrip('%')
self.data['uv_index'] = _find('UV')
except Exception:
# Don't let an uncaught exception kill the update thread
self.logger.error(
'Uncaught error occurred while checking weather. '
'Exception follows:', exc_info=True
)
self.data['update_error'] = self.update_error | Query the configured/queried station and return the weather data | Below is the the instruction that describes the task:
### Input:
Query the configured/queried station and return the weather data
### Response:
def check_weather(self):
'''
Query the configured/queried station and return the weather data
'''
if self.station_id is None:
# Failed to get the nearest station ID when first launched, so
# retry it.
self.get_station_id()
self.data['update_error'] = ''
try:
query_url = STATION_QUERY_URL % (self.api_key,
'conditions',
self.station_id)
try:
response = self.api_request(query_url)['current_observation']
self.forecast_url = response.pop('ob_url', None)
except KeyError:
self.logger.error('No weather data found for %s', self.station_id)
self.data['update_error'] = self.update_error
return
if self.forecast:
query_url = STATION_QUERY_URL % (self.api_key,
'forecast',
self.station_id)
try:
forecast = self.api_request(query_url)['forecast']
forecast = forecast['simpleforecast']['forecastday'][0]
except (KeyError, IndexError, TypeError):
self.logger.error(
'No forecast data found for %s', self.station_id)
# This is a non-fatal error, so don't return but do set the
# error flag.
self.data['update_error'] = self.update_error
unit = 'celsius' if self.units == 'metric' else 'fahrenheit'
low_temp = forecast.get('low', {}).get(unit, '')
high_temp = forecast.get('high', {}).get(unit, '')
else:
low_temp = high_temp = ''
if self.units == 'metric':
temp_unit = 'c'
speed_unit = 'kph'
distance_unit = 'km'
pressure_unit = 'mb'
else:
temp_unit = 'f'
speed_unit = 'mph'
distance_unit = 'mi'
pressure_unit = 'in'
def _find(key, data=None, default=''):
if data is None:
data = response
return str(data.get(key, default))
try:
observation_epoch = _find('observation_epoch') or _find('local_epoch')
observation_time = datetime.fromtimestamp(int(observation_epoch))
except (TypeError, ValueError):
log.debug(
'Observation time \'%s\' is not a UNIX timestamp',
observation_epoch
)
observation_time = datetime.fromtimestamp(0)
self.data['city'] = _find('city', response['observation_location'])
self.data['condition'] = _find('weather')
self.data['observation_time'] = observation_time
self.data['current_temp'] = _find('temp_' + temp_unit).split('.')[0]
self.data['low_temp'] = low_temp
self.data['high_temp'] = high_temp
self.data['temp_unit'] = '°' + temp_unit.upper()
self.data['feelslike'] = _find('feelslike_' + temp_unit)
self.data['dewpoint'] = _find('dewpoint_' + temp_unit)
self.data['wind_speed'] = _find('wind_' + speed_unit)
self.data['wind_unit'] = speed_unit
self.data['wind_direction'] = _find('wind_dir')
self.data['wind_gust'] = _find('wind_gust_' + speed_unit)
self.data['pressure'] = _find('pressure_' + pressure_unit)
self.data['pressure_unit'] = pressure_unit
self.data['pressure_trend'] = _find('pressure_trend')
self.data['visibility'] = _find('visibility_' + distance_unit)
self.data['visibility_unit'] = distance_unit
self.data['humidity'] = _find('relative_humidity').rstrip('%')
self.data['uv_index'] = _find('UV')
except Exception:
# Don't let an uncaught exception kill the update thread
self.logger.error(
'Uncaught error occurred while checking weather. '
'Exception follows:', exc_info=True
)
self.data['update_error'] = self.update_error |
def check_url_accessibility(url, timeout=10):
'''
Check whether the URL accessible and returns HTTP 200 OK or not
if not raises ValidationError
'''
if(url=='localhost'):
url = 'http://127.0.0.1'
try:
req = urllib2.urlopen(url, timeout=timeout)
if (req.getcode()==200):
return True
except Exception:
pass
fail("URL '%s' is not accessible from this machine" % url) | Check whether the URL accessible and returns HTTP 200 OK or not
if not raises ValidationError | Below is the the instruction that describes the task:
### Input:
Check whether the URL accessible and returns HTTP 200 OK or not
if not raises ValidationError
### Response:
def check_url_accessibility(url, timeout=10):
'''
Check whether the URL accessible and returns HTTP 200 OK or not
if not raises ValidationError
'''
if(url=='localhost'):
url = 'http://127.0.0.1'
try:
req = urllib2.urlopen(url, timeout=timeout)
if (req.getcode()==200):
return True
except Exception:
pass
fail("URL '%s' is not accessible from this machine" % url) |
def get_ssl_context(private_key, certificate):
"""Get ssl context from private key and certificate paths.
The return value is used when calling Flask.
i.e. app.run(ssl_context=get_ssl_context(,,,))
"""
if (
certificate
and os.path.isfile(certificate)
and private_key
and os.path.isfile(private_key)
):
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1_2)
context.load_cert_chain(certificate, private_key)
return context
return None | Get ssl context from private key and certificate paths.
The return value is used when calling Flask.
i.e. app.run(ssl_context=get_ssl_context(,,,)) | Below is the the instruction that describes the task:
### Input:
Get ssl context from private key and certificate paths.
The return value is used when calling Flask.
i.e. app.run(ssl_context=get_ssl_context(,,,))
### Response:
def get_ssl_context(private_key, certificate):
"""Get ssl context from private key and certificate paths.
The return value is used when calling Flask.
i.e. app.run(ssl_context=get_ssl_context(,,,))
"""
if (
certificate
and os.path.isfile(certificate)
and private_key
and os.path.isfile(private_key)
):
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1_2)
context.load_cert_chain(certificate, private_key)
return context
return None |
def mode_to_str(mode):
"""
Converts a tf.estimator.ModeKeys in a nice readable string.
:param mode: The mdoe as a tf.estimator.ModeKeys
:return: A human readable string representing the mode.
"""
if mode == tf.estimator.ModeKeys.TRAIN:
return "train"
if mode == tf.estimator.ModeKeys.EVAL:
return "eval"
if mode == tf.estimator.ModeKeys.PREDICT:
return "predict"
return "unknown" | Converts a tf.estimator.ModeKeys in a nice readable string.
:param mode: The mdoe as a tf.estimator.ModeKeys
:return: A human readable string representing the mode. | Below is the the instruction that describes the task:
### Input:
Converts a tf.estimator.ModeKeys in a nice readable string.
:param mode: The mdoe as a tf.estimator.ModeKeys
:return: A human readable string representing the mode.
### Response:
def mode_to_str(mode):
"""
Converts a tf.estimator.ModeKeys in a nice readable string.
:param mode: The mdoe as a tf.estimator.ModeKeys
:return: A human readable string representing the mode.
"""
if mode == tf.estimator.ModeKeys.TRAIN:
return "train"
if mode == tf.estimator.ModeKeys.EVAL:
return "eval"
if mode == tf.estimator.ModeKeys.PREDICT:
return "predict"
return "unknown" |
def suspend(self, instance_id):
'''
Suspend a server
'''
nt_ks = self.compute_conn
response = nt_ks.servers.suspend(instance_id)
return True | Suspend a server | Below is the the instruction that describes the task:
### Input:
Suspend a server
### Response:
def suspend(self, instance_id):
'''
Suspend a server
'''
nt_ks = self.compute_conn
response = nt_ks.servers.suspend(instance_id)
return True |
def _process_ssh_rsa(self, data):
"""Parses ssh-rsa public keys."""
current_position, raw_e = self._unpack_by_int(data, 0)
current_position, raw_n = self._unpack_by_int(data, current_position)
unpacked_e = self._parse_long(raw_e)
unpacked_n = self._parse_long(raw_n)
self.rsa = RSAPublicNumbers(unpacked_e, unpacked_n).public_key(default_backend())
self.bits = self.rsa.key_size
if self.strict_mode:
min_length = self.RSA_MIN_LENGTH_STRICT
max_length = self.RSA_MAX_LENGTH_STRICT
else:
min_length = self.RSA_MIN_LENGTH_LOOSE
max_length = self.RSA_MAX_LENGTH_LOOSE
if self.bits < min_length:
raise TooShortKeyError(
"%s key data can not be shorter than %s bits (was %s)" % (self.key_type, min_length, self.bits)
)
if self.bits > max_length:
raise TooLongKeyError(
"%s key data can not be longer than %s bits (was %s)" % (self.key_type, max_length, self.bits)
)
return current_position | Parses ssh-rsa public keys. | Below is the the instruction that describes the task:
### Input:
Parses ssh-rsa public keys.
### Response:
def _process_ssh_rsa(self, data):
"""Parses ssh-rsa public keys."""
current_position, raw_e = self._unpack_by_int(data, 0)
current_position, raw_n = self._unpack_by_int(data, current_position)
unpacked_e = self._parse_long(raw_e)
unpacked_n = self._parse_long(raw_n)
self.rsa = RSAPublicNumbers(unpacked_e, unpacked_n).public_key(default_backend())
self.bits = self.rsa.key_size
if self.strict_mode:
min_length = self.RSA_MIN_LENGTH_STRICT
max_length = self.RSA_MAX_LENGTH_STRICT
else:
min_length = self.RSA_MIN_LENGTH_LOOSE
max_length = self.RSA_MAX_LENGTH_LOOSE
if self.bits < min_length:
raise TooShortKeyError(
"%s key data can not be shorter than %s bits (was %s)" % (self.key_type, min_length, self.bits)
)
if self.bits > max_length:
raise TooLongKeyError(
"%s key data can not be longer than %s bits (was %s)" % (self.key_type, max_length, self.bits)
)
return current_position |
def convert_tokens_to_ids(self, tokens):
"""Converts a sequence of tokens into ids using the vocab."""
ids = []
for token in tokens:
ids.append(self.vocab[token])
if len(ids) > self.max_len:
logger.warning(
"Token indices sequence length is longer than the specified maximum "
" sequence length for this BERT model ({} > {}). Running this"
" sequence through BERT will result in indexing errors".format(len(ids), self.max_len)
)
return ids | Converts a sequence of tokens into ids using the vocab. | Below is the the instruction that describes the task:
### Input:
Converts a sequence of tokens into ids using the vocab.
### Response:
def convert_tokens_to_ids(self, tokens):
"""Converts a sequence of tokens into ids using the vocab."""
ids = []
for token in tokens:
ids.append(self.vocab[token])
if len(ids) > self.max_len:
logger.warning(
"Token indices sequence length is longer than the specified maximum "
" sequence length for this BERT model ({} > {}). Running this"
" sequence through BERT will result in indexing errors".format(len(ids), self.max_len)
)
return ids |
def flip_alleles(genotypes):
"""Flip the alleles of an Genotypes instance."""
warnings.warn("deprecated: use 'Genotypes.flip_coded'", DeprecationWarning)
genotypes.reference, genotypes.coded = (genotypes.coded,
genotypes.reference)
genotypes.genotypes = 2 - genotypes.genotypes
return genotypes | Flip the alleles of an Genotypes instance. | Below is the the instruction that describes the task:
### Input:
Flip the alleles of an Genotypes instance.
### Response:
def flip_alleles(genotypes):
"""Flip the alleles of an Genotypes instance."""
warnings.warn("deprecated: use 'Genotypes.flip_coded'", DeprecationWarning)
genotypes.reference, genotypes.coded = (genotypes.coded,
genotypes.reference)
genotypes.genotypes = 2 - genotypes.genotypes
return genotypes |
def _bg(self, coro: coroutine) -> asyncio.Task:
"""Run coro in background, log errors"""
async def runner():
try:
await coro
except:
self._log.exception("async: Coroutine raised exception")
return asyncio.ensure_future(runner()) | Run coro in background, log errors | Below is the the instruction that describes the task:
### Input:
Run coro in background, log errors
### Response:
def _bg(self, coro: coroutine) -> asyncio.Task:
"""Run coro in background, log errors"""
async def runner():
try:
await coro
except:
self._log.exception("async: Coroutine raised exception")
return asyncio.ensure_future(runner()) |
def match_planted(fk_candidate_observations, match_filename, bright_limit=BRIGHT_LIMIT, object_planted=OBJECT_PLANTED,
minimum_bright_detections=MINIMUM_BRIGHT_DETECTIONS, bright_fraction=MINIMUM_BRIGHT_FRACTION):
"""
Using the fk_candidate_observations as input get the Object.planted file from VOSpace and match
planted sources with found sources.
The Object.planted list is pulled from VOSpace based on the standard file-layout and name of the
first exposure as read from the .astrom file.
:param fk_candidate_observations: name of the fk*reals.astrom file to check against Object.planted
:param match_filename: a file that will contain a list of all planted sources and the matched found source
@param minimum_bright_detections: if there are too few bright detections we raise an error.
"""
found_pos = []
detections = fk_candidate_observations.get_sources()
for detection in detections:
reading = detection.get_reading(0)
# create a list of positions, to be used later by match_lists
found_pos.append([reading.x, reading.y])
# Now get the Object.planted file, either from the local FS or from VOSpace.
objects_planted_uri = object_planted
if not os.access(objects_planted_uri, os.F_OK):
objects_planted_uri = fk_candidate_observations.observations[0].get_object_planted_uri()
lines = open(objects_planted_uri).read()
# we are changing the format of the Object.planted header to be compatible with astropy.io.ascii but
# there are some old Object.planted files out there so we do these string/replace calls to reset those.
new_lines = lines.replace("pix rate", "pix_rate")
new_lines = new_lines.replace("""''/h rate""", "sky_rate")
planted_objects_table = ascii.read(new_lines, header_start=-1, data_start=0)
planted_objects_table.meta = None
# The match_list method expects a list that contains a position, not an x and a y vector, so we transpose.
planted_pos = numpy.transpose([planted_objects_table['x'].data, planted_objects_table['y'].data])
# match_idx is an order list. The list is in the order of the first list of positions and each entry
# is the index of the matching position from the second list.
(match_idx, match_fnd) = util.match_lists(numpy.array(planted_pos), numpy.array(found_pos))
assert isinstance(match_idx, numpy.ma.MaskedArray)
assert isinstance(match_fnd, numpy.ma.MaskedArray)
false_positives_table = Table()
# Once we've matched the two lists we'll need some new columns to store the information in.
# these are masked columns so that object.planted entries that have no detected match are left 'blank'.
new_columns = [MaskedColumn(name="measure_x", length=len(planted_objects_table), mask=True),
MaskedColumn(name="measure_y", length=len(planted_objects_table), mask=True),
MaskedColumn(name="measure_rate", length=len(planted_objects_table), mask=True),
MaskedColumn(name="measure_angle", length=len(planted_objects_table), mask=True),
MaskedColumn(name="measure_mag1", length=len(planted_objects_table), mask=True),
MaskedColumn(name="measure_merr1", length=len(planted_objects_table), mask=True),
MaskedColumn(name="measure_mag2", length=len(planted_objects_table), mask=True),
MaskedColumn(name="measure_merr2", length=len(planted_objects_table), mask=True),
MaskedColumn(name="measure_mag3", length=len(planted_objects_table), mask=True),
MaskedColumn(name="measure_merr3", length=len(planted_objects_table), mask=True)]
planted_objects_table.add_columns(new_columns)
tlength = 0
new_columns = [MaskedColumn(name="measure_x", length=tlength, mask=True),
MaskedColumn(name="measure_y", length=tlength, mask=True),
MaskedColumn(name="measure_rate", length=0, mask=True),
MaskedColumn(name="measure_angle", length=0, mask=True),
MaskedColumn(name="measure_mag1", length=0, mask=True),
MaskedColumn(name="measure_merr1", length=0, mask=True),
MaskedColumn(name="measure_mag2", length=0, mask=True),
MaskedColumn(name="measure_merr2", length=0, mask=True),
MaskedColumn(name="measure_mag3", length=tlength, mask=True),
MaskedColumn(name="measure_merr3", length=tlength, mask=True)]
false_positives_table.add_columns(new_columns)
# We do some 'checks' on the Object.planted match to diagnose pipeline issues. Those checks are made using just
# those planted sources we should have detected.
bright = planted_objects_table['mag'] < bright_limit
n_bright_planted = numpy.count_nonzero(planted_objects_table['mag'][bright])
measures = []
idxs = []
for idx in range(len(match_idx)):
# The match_idx value is False if nothing was found.
if not match_idx.mask[idx]:
# Each 'source' has multiple 'readings'
measures.append(detections[match_idx[idx]].get_readings())
idxs.append(idx)
observations = measure_mags(measures)
for oidx in range(len(measures)):
idx = idxs[oidx]
readings = measures[oidx]
start_jd = util.Time(readings[0].obs.header['MJD_OBS_CENTER'], format='mpc', scale='utc').jd
end_jd = util.Time(readings[-1].obs.header['MJD_OBS_CENTER'], format='mpc', scale='utc').jd
rate = math.sqrt((readings[-1].x - readings[0].x) ** 2 + (readings[-1].y - readings[0].y) ** 2) / (
24 * (end_jd - start_jd))
rate = int(rate * 100) / 100.0
angle = math.degrees(math.atan2(readings[-1].y - readings[0].y, readings[-1].x - readings[0].x))
angle = int(angle * 100) / 100.0
planted_objects_table[idx]['measure_rate'] = rate
planted_objects_table[idx]['measure_angle'] = angle
planted_objects_table[idx]['measure_x'] = observations[readings[0].obs]['mags']["XCENTER"][oidx]
planted_objects_table[idx]['measure_y'] = observations[readings[0].obs]['mags']["YCENTER"][oidx]
for ridx in range(len(readings)):
reading = readings[ridx]
mags = observations[reading.obs]['mags']
planted_objects_table[idx]['measure_mag{}'.format(ridx+1)] = mags["MAG"][oidx]
planted_objects_table[idx]['measure_merr{}'.format(ridx+1)] = mags["MERR"][oidx]
# for idx in range(len(match_fnd)):
# if match_fnd.mask[idx]:
# measures = detections[idx].get_readings()
# false_positives_table.add_row()
# false_positives_table[-1] = measure_mags(measures, false_positives_table[-1])
# Count an object as detected if it has a measured magnitude in the first frame of the triplet.
n_bright_found = numpy.count_nonzero(planted_objects_table['measure_mag1'][bright])
# Also compute the offset and standard deviation of the measured magnitude from that planted ones.
offset = numpy.mean(planted_objects_table['mag'][bright] - planted_objects_table['measure_mag1'][bright])
try:
offset = "{:5.2f}".format(offset)
except:
offset = "indef"
std = numpy.std(planted_objects_table['mag'][bright] - planted_objects_table['measure_mag1'][bright])
try:
std = "{:5.2f}".format(std)
except:
std = "indef"
if os.access(match_filename, os.R_OK):
fout = open(match_filename, 'a')
else:
fout = open(match_filename, 'w')
fout.write("#K {:10s} {:10s}\n".format("EXPNUM", "FWHM"))
for measure in detections[0].get_readings():
fout.write('#V {:10s} {:10s}\n'.format(measure.obs.header['EXPNUM'], measure.obs.header['FWHM']))
fout.write("#K ")
for keyword in ["RMIN", "RMAX", "ANGLE", "AWIDTH"]:
fout.write("{:10s} ".format(keyword))
fout.write("\n")
fout.write("#V ")
for keyword in ["RMIN", "RMAX", "ANGLE", "AWIDTH"]:
fout.write("{:10s} ".format(fk_candidate_observations.sys_header[keyword]))
fout.write("\n")
fout.write("#K ")
for keyword in ["NBRIGHT", "NFOUND", "OFFSET", "STDEV"]:
fout.write("{:10s} ".format(keyword))
fout.write("\n")
fout.write("#V {:<10} {:<10} {:<10} {:<10}\n".format(n_bright_planted,
n_bright_found,
offset,
std))
try:
writer = ascii.FixedWidth
# add a hash to the start of line that will have header columns: for JMP
fout.write("#")
fout.flush()
ascii.write(planted_objects_table, output=fout, Writer=writer, delimiter=None)
if len(false_positives_table) > 0:
with open(match_filename+".fp", 'a') as fpout:
fpout.write("#")
ascii.write(false_positives_table, output=fpout, Writer=writer, delimiter=None)
except Exception as e:
logging.error(str(e))
raise e
finally:
fout.close()
# Some simple checks to report a failure how we're doing.
if n_bright_planted < minimum_bright_detections:
raise RuntimeError(1, "Too few bright objects planted.")
if n_bright_found / float(n_bright_planted) < bright_fraction:
raise RuntimeError(2, "Too few bright objects found.")
return "{} {} {} {}".format(n_bright_planted, n_bright_found, offset, std) | Using the fk_candidate_observations as input get the Object.planted file from VOSpace and match
planted sources with found sources.
The Object.planted list is pulled from VOSpace based on the standard file-layout and name of the
first exposure as read from the .astrom file.
:param fk_candidate_observations: name of the fk*reals.astrom file to check against Object.planted
:param match_filename: a file that will contain a list of all planted sources and the matched found source
@param minimum_bright_detections: if there are too few bright detections we raise an error. | Below is the the instruction that describes the task:
### Input:
Using the fk_candidate_observations as input get the Object.planted file from VOSpace and match
planted sources with found sources.
The Object.planted list is pulled from VOSpace based on the standard file-layout and name of the
first exposure as read from the .astrom file.
:param fk_candidate_observations: name of the fk*reals.astrom file to check against Object.planted
:param match_filename: a file that will contain a list of all planted sources and the matched found source
@param minimum_bright_detections: if there are too few bright detections we raise an error.
### Response:
def match_planted(fk_candidate_observations, match_filename, bright_limit=BRIGHT_LIMIT, object_planted=OBJECT_PLANTED,
minimum_bright_detections=MINIMUM_BRIGHT_DETECTIONS, bright_fraction=MINIMUM_BRIGHT_FRACTION):
"""
Using the fk_candidate_observations as input get the Object.planted file from VOSpace and match
planted sources with found sources.
The Object.planted list is pulled from VOSpace based on the standard file-layout and name of the
first exposure as read from the .astrom file.
:param fk_candidate_observations: name of the fk*reals.astrom file to check against Object.planted
:param match_filename: a file that will contain a list of all planted sources and the matched found source
@param minimum_bright_detections: if there are too few bright detections we raise an error.
"""
found_pos = []
detections = fk_candidate_observations.get_sources()
for detection in detections:
reading = detection.get_reading(0)
# create a list of positions, to be used later by match_lists
found_pos.append([reading.x, reading.y])
# Now get the Object.planted file, either from the local FS or from VOSpace.
objects_planted_uri = object_planted
if not os.access(objects_planted_uri, os.F_OK):
objects_planted_uri = fk_candidate_observations.observations[0].get_object_planted_uri()
lines = open(objects_planted_uri).read()
# we are changing the format of the Object.planted header to be compatible with astropy.io.ascii but
# there are some old Object.planted files out there so we do these string/replace calls to reset those.
new_lines = lines.replace("pix rate", "pix_rate")
new_lines = new_lines.replace("""''/h rate""", "sky_rate")
planted_objects_table = ascii.read(new_lines, header_start=-1, data_start=0)
planted_objects_table.meta = None
# The match_list method expects a list that contains a position, not an x and a y vector, so we transpose.
planted_pos = numpy.transpose([planted_objects_table['x'].data, planted_objects_table['y'].data])
# match_idx is an order list. The list is in the order of the first list of positions and each entry
# is the index of the matching position from the second list.
(match_idx, match_fnd) = util.match_lists(numpy.array(planted_pos), numpy.array(found_pos))
assert isinstance(match_idx, numpy.ma.MaskedArray)
assert isinstance(match_fnd, numpy.ma.MaskedArray)
false_positives_table = Table()
# Once we've matched the two lists we'll need some new columns to store the information in.
# these are masked columns so that object.planted entries that have no detected match are left 'blank'.
new_columns = [MaskedColumn(name="measure_x", length=len(planted_objects_table), mask=True),
MaskedColumn(name="measure_y", length=len(planted_objects_table), mask=True),
MaskedColumn(name="measure_rate", length=len(planted_objects_table), mask=True),
MaskedColumn(name="measure_angle", length=len(planted_objects_table), mask=True),
MaskedColumn(name="measure_mag1", length=len(planted_objects_table), mask=True),
MaskedColumn(name="measure_merr1", length=len(planted_objects_table), mask=True),
MaskedColumn(name="measure_mag2", length=len(planted_objects_table), mask=True),
MaskedColumn(name="measure_merr2", length=len(planted_objects_table), mask=True),
MaskedColumn(name="measure_mag3", length=len(planted_objects_table), mask=True),
MaskedColumn(name="measure_merr3", length=len(planted_objects_table), mask=True)]
planted_objects_table.add_columns(new_columns)
tlength = 0
new_columns = [MaskedColumn(name="measure_x", length=tlength, mask=True),
MaskedColumn(name="measure_y", length=tlength, mask=True),
MaskedColumn(name="measure_rate", length=0, mask=True),
MaskedColumn(name="measure_angle", length=0, mask=True),
MaskedColumn(name="measure_mag1", length=0, mask=True),
MaskedColumn(name="measure_merr1", length=0, mask=True),
MaskedColumn(name="measure_mag2", length=0, mask=True),
MaskedColumn(name="measure_merr2", length=0, mask=True),
MaskedColumn(name="measure_mag3", length=tlength, mask=True),
MaskedColumn(name="measure_merr3", length=tlength, mask=True)]
false_positives_table.add_columns(new_columns)
# We do some 'checks' on the Object.planted match to diagnose pipeline issues. Those checks are made using just
# those planted sources we should have detected.
bright = planted_objects_table['mag'] < bright_limit
n_bright_planted = numpy.count_nonzero(planted_objects_table['mag'][bright])
measures = []
idxs = []
for idx in range(len(match_idx)):
# The match_idx value is False if nothing was found.
if not match_idx.mask[idx]:
# Each 'source' has multiple 'readings'
measures.append(detections[match_idx[idx]].get_readings())
idxs.append(idx)
observations = measure_mags(measures)
for oidx in range(len(measures)):
idx = idxs[oidx]
readings = measures[oidx]
start_jd = util.Time(readings[0].obs.header['MJD_OBS_CENTER'], format='mpc', scale='utc').jd
end_jd = util.Time(readings[-1].obs.header['MJD_OBS_CENTER'], format='mpc', scale='utc').jd
rate = math.sqrt((readings[-1].x - readings[0].x) ** 2 + (readings[-1].y - readings[0].y) ** 2) / (
24 * (end_jd - start_jd))
rate = int(rate * 100) / 100.0
angle = math.degrees(math.atan2(readings[-1].y - readings[0].y, readings[-1].x - readings[0].x))
angle = int(angle * 100) / 100.0
planted_objects_table[idx]['measure_rate'] = rate
planted_objects_table[idx]['measure_angle'] = angle
planted_objects_table[idx]['measure_x'] = observations[readings[0].obs]['mags']["XCENTER"][oidx]
planted_objects_table[idx]['measure_y'] = observations[readings[0].obs]['mags']["YCENTER"][oidx]
for ridx in range(len(readings)):
reading = readings[ridx]
mags = observations[reading.obs]['mags']
planted_objects_table[idx]['measure_mag{}'.format(ridx+1)] = mags["MAG"][oidx]
planted_objects_table[idx]['measure_merr{}'.format(ridx+1)] = mags["MERR"][oidx]
# for idx in range(len(match_fnd)):
# if match_fnd.mask[idx]:
# measures = detections[idx].get_readings()
# false_positives_table.add_row()
# false_positives_table[-1] = measure_mags(measures, false_positives_table[-1])
# Count an object as detected if it has a measured magnitude in the first frame of the triplet.
n_bright_found = numpy.count_nonzero(planted_objects_table['measure_mag1'][bright])
# Also compute the offset and standard deviation of the measured magnitude from that planted ones.
offset = numpy.mean(planted_objects_table['mag'][bright] - planted_objects_table['measure_mag1'][bright])
try:
offset = "{:5.2f}".format(offset)
except:
offset = "indef"
std = numpy.std(planted_objects_table['mag'][bright] - planted_objects_table['measure_mag1'][bright])
try:
std = "{:5.2f}".format(std)
except:
std = "indef"
if os.access(match_filename, os.R_OK):
fout = open(match_filename, 'a')
else:
fout = open(match_filename, 'w')
fout.write("#K {:10s} {:10s}\n".format("EXPNUM", "FWHM"))
for measure in detections[0].get_readings():
fout.write('#V {:10s} {:10s}\n'.format(measure.obs.header['EXPNUM'], measure.obs.header['FWHM']))
fout.write("#K ")
for keyword in ["RMIN", "RMAX", "ANGLE", "AWIDTH"]:
fout.write("{:10s} ".format(keyword))
fout.write("\n")
fout.write("#V ")
for keyword in ["RMIN", "RMAX", "ANGLE", "AWIDTH"]:
fout.write("{:10s} ".format(fk_candidate_observations.sys_header[keyword]))
fout.write("\n")
fout.write("#K ")
for keyword in ["NBRIGHT", "NFOUND", "OFFSET", "STDEV"]:
fout.write("{:10s} ".format(keyword))
fout.write("\n")
fout.write("#V {:<10} {:<10} {:<10} {:<10}\n".format(n_bright_planted,
n_bright_found,
offset,
std))
try:
writer = ascii.FixedWidth
# add a hash to the start of line that will have header columns: for JMP
fout.write("#")
fout.flush()
ascii.write(planted_objects_table, output=fout, Writer=writer, delimiter=None)
if len(false_positives_table) > 0:
with open(match_filename+".fp", 'a') as fpout:
fpout.write("#")
ascii.write(false_positives_table, output=fpout, Writer=writer, delimiter=None)
except Exception as e:
logging.error(str(e))
raise e
finally:
fout.close()
# Some simple checks to report a failure how we're doing.
if n_bright_planted < minimum_bright_detections:
raise RuntimeError(1, "Too few bright objects planted.")
if n_bright_found / float(n_bright_planted) < bright_fraction:
raise RuntimeError(2, "Too few bright objects found.")
return "{} {} {} {}".format(n_bright_planted, n_bright_found, offset, std) |
def databunch(self, path:PathOrStr=None, bs:int=64, val_bs:int=None, num_workers:int=defaults.cpus,
dl_tfms:Optional[Collection[Callable]]=None, device:torch.device=None, collate_fn:Callable=data_collate,
no_check:bool=False, **kwargs)->'DataBunch':
"Create an `DataBunch` from self, `path` will override `self.path`, `kwargs` are passed to `DataBunch.create`."
path = Path(ifnone(path, self.path))
data = self.x._bunch.create(self.train, self.valid, test_ds=self.test, path=path, bs=bs, val_bs=val_bs,
num_workers=num_workers, device=device, collate_fn=collate_fn, no_check=no_check, **kwargs)
if getattr(self, 'normalize', False):#In case a normalization was serialized
norm = self.normalize
data.normalize((norm['mean'], norm['std']), do_x=norm['do_x'], do_y=norm['do_y'])
data.label_list = self
return data | Create an `DataBunch` from self, `path` will override `self.path`, `kwargs` are passed to `DataBunch.create`. | Below is the the instruction that describes the task:
### Input:
Create an `DataBunch` from self, `path` will override `self.path`, `kwargs` are passed to `DataBunch.create`.
### Response:
def databunch(self, path:PathOrStr=None, bs:int=64, val_bs:int=None, num_workers:int=defaults.cpus,
dl_tfms:Optional[Collection[Callable]]=None, device:torch.device=None, collate_fn:Callable=data_collate,
no_check:bool=False, **kwargs)->'DataBunch':
"Create an `DataBunch` from self, `path` will override `self.path`, `kwargs` are passed to `DataBunch.create`."
path = Path(ifnone(path, self.path))
data = self.x._bunch.create(self.train, self.valid, test_ds=self.test, path=path, bs=bs, val_bs=val_bs,
num_workers=num_workers, device=device, collate_fn=collate_fn, no_check=no_check, **kwargs)
if getattr(self, 'normalize', False):#In case a normalization was serialized
norm = self.normalize
data.normalize((norm['mean'], norm['std']), do_x=norm['do_x'], do_y=norm['do_y'])
data.label_list = self
return data |
def get_api_service(self, name=None):
"""Returns the specific service config definition"""
try:
svc = self.services_by_name.get(name, None)
if svc is None:
raise ValueError(f"Couldn't find the API service configuration")
return svc
except: # NOQA
raise Exception(f"Failed to retrieve the API service configuration") | Returns the specific service config definition | Below is the the instruction that describes the task:
### Input:
Returns the specific service config definition
### Response:
def get_api_service(self, name=None):
"""Returns the specific service config definition"""
try:
svc = self.services_by_name.get(name, None)
if svc is None:
raise ValueError(f"Couldn't find the API service configuration")
return svc
except: # NOQA
raise Exception(f"Failed to retrieve the API service configuration") |
def _make_path(self, items):
'''Returns a relative path for the given dictionary of items.
Uses this url rule's url pattern and replaces instances of <var_name>
with the appropriate value from the items dict.
'''
for key, val in items.items():
if not isinstance(val, basestring):
raise TypeError, ('Value "%s" for key "%s" must be an instance'
' of basestring' % (val, key))
items[key] = quote_plus(val)
try:
path = self._url_format.format(**items)
except AttributeError:
# Old version of python
path = self._url_format
for key, val in items.items():
path = path.replace('{%s}' % key, val)
return path | Returns a relative path for the given dictionary of items.
Uses this url rule's url pattern and replaces instances of <var_name>
with the appropriate value from the items dict. | Below is the the instruction that describes the task:
### Input:
Returns a relative path for the given dictionary of items.
Uses this url rule's url pattern and replaces instances of <var_name>
with the appropriate value from the items dict.
### Response:
def _make_path(self, items):
'''Returns a relative path for the given dictionary of items.
Uses this url rule's url pattern and replaces instances of <var_name>
with the appropriate value from the items dict.
'''
for key, val in items.items():
if not isinstance(val, basestring):
raise TypeError, ('Value "%s" for key "%s" must be an instance'
' of basestring' % (val, key))
items[key] = quote_plus(val)
try:
path = self._url_format.format(**items)
except AttributeError:
# Old version of python
path = self._url_format
for key, val in items.items():
path = path.replace('{%s}' % key, val)
return path |
def graphql_query(self, query_hash: str, variables: Dict[str, Any],
referer: Optional[str] = None, rhx_gis: Optional[str] = None) -> Dict[str, Any]:
"""
Do a GraphQL Query.
:param query_hash: Query identifying hash.
:param variables: Variables for the Query.
:param referer: HTTP Referer, or None.
:param rhx_gis: 'rhx_gis' variable as somewhere returned by Instagram, needed to 'sign' request
:return: The server's response dictionary.
"""
with copy_session(self._session) as tmpsession:
tmpsession.headers.update(self._default_http_header(empty_session_only=True))
del tmpsession.headers['Connection']
del tmpsession.headers['Content-Length']
tmpsession.headers['authority'] = 'www.instagram.com'
tmpsession.headers['scheme'] = 'https'
tmpsession.headers['accept'] = '*/*'
if referer is not None:
tmpsession.headers['referer'] = urllib.parse.quote(referer)
variables_json = json.dumps(variables, separators=(',', ':'))
if rhx_gis:
#self.log("rhx_gis {} query_hash {}".format(rhx_gis, query_hash))
values = "{}:{}".format(rhx_gis, variables_json)
x_instagram_gis = hashlib.md5(values.encode()).hexdigest()
tmpsession.headers['x-instagram-gis'] = x_instagram_gis
resp_json = self.get_json('graphql/query',
params={'query_hash': query_hash,
'variables': variables_json},
session=tmpsession)
if 'status' not in resp_json:
self.error("GraphQL response did not contain a \"status\" field.")
return resp_json | Do a GraphQL Query.
:param query_hash: Query identifying hash.
:param variables: Variables for the Query.
:param referer: HTTP Referer, or None.
:param rhx_gis: 'rhx_gis' variable as somewhere returned by Instagram, needed to 'sign' request
:return: The server's response dictionary. | Below is the the instruction that describes the task:
### Input:
Do a GraphQL Query.
:param query_hash: Query identifying hash.
:param variables: Variables for the Query.
:param referer: HTTP Referer, or None.
:param rhx_gis: 'rhx_gis' variable as somewhere returned by Instagram, needed to 'sign' request
:return: The server's response dictionary.
### Response:
def graphql_query(self, query_hash: str, variables: Dict[str, Any],
referer: Optional[str] = None, rhx_gis: Optional[str] = None) -> Dict[str, Any]:
"""
Do a GraphQL Query.
:param query_hash: Query identifying hash.
:param variables: Variables for the Query.
:param referer: HTTP Referer, or None.
:param rhx_gis: 'rhx_gis' variable as somewhere returned by Instagram, needed to 'sign' request
:return: The server's response dictionary.
"""
with copy_session(self._session) as tmpsession:
tmpsession.headers.update(self._default_http_header(empty_session_only=True))
del tmpsession.headers['Connection']
del tmpsession.headers['Content-Length']
tmpsession.headers['authority'] = 'www.instagram.com'
tmpsession.headers['scheme'] = 'https'
tmpsession.headers['accept'] = '*/*'
if referer is not None:
tmpsession.headers['referer'] = urllib.parse.quote(referer)
variables_json = json.dumps(variables, separators=(',', ':'))
if rhx_gis:
#self.log("rhx_gis {} query_hash {}".format(rhx_gis, query_hash))
values = "{}:{}".format(rhx_gis, variables_json)
x_instagram_gis = hashlib.md5(values.encode()).hexdigest()
tmpsession.headers['x-instagram-gis'] = x_instagram_gis
resp_json = self.get_json('graphql/query',
params={'query_hash': query_hash,
'variables': variables_json},
session=tmpsession)
if 'status' not in resp_json:
self.error("GraphQL response did not contain a \"status\" field.")
return resp_json |
def save_current_figure_as(self):
"""Save the currently selected figure."""
if self.current_thumbnail is not None:
self.save_figure_as(self.current_thumbnail.canvas.fig,
self.current_thumbnail.canvas.fmt) | Save the currently selected figure. | Below is the the instruction that describes the task:
### Input:
Save the currently selected figure.
### Response:
def save_current_figure_as(self):
"""Save the currently selected figure."""
if self.current_thumbnail is not None:
self.save_figure_as(self.current_thumbnail.canvas.fig,
self.current_thumbnail.canvas.fmt) |
def attribute(
self, attr_type, attr_value, displayed=False, source=None, unique=True, formatter=None
):
"""Return instance of Attribute
unique:
* False - Attribute type:value can be duplicated.
* Type - Attribute type has to be unique (e.g., only 1 Description Attribute).
* True - Attribute type:value combo must be unique.
Args:
attr_type (str): The ThreatConnect defined attribute type.
attr_value (str): The value for this attribute.
displayed (bool, default:false): If True the supported attribute will be marked for
display.
source (str, optional): The source value for this attribute.
unique (bool|string, optional): Control attribute creation.
formatter (method, optional): A method that takes a single attribute value and returns a
single formatted value.
Returns:
obj: An instance of Attribute.
"""
attr = Attribute(attr_type, attr_value, displayed, source, formatter)
if unique == 'Type':
for attribute_data in self._attributes:
if attribute_data.type == attr_type:
attr = attribute_data
break
else:
self._attributes.append(attr)
elif unique is True:
for attribute_data in self._attributes:
if attribute_data.type == attr_type and attribute_data.value == attr.value:
attr = attribute_data
break
else:
self._attributes.append(attr)
elif unique is False:
self._attributes.append(attr)
return attr | Return instance of Attribute
unique:
* False - Attribute type:value can be duplicated.
* Type - Attribute type has to be unique (e.g., only 1 Description Attribute).
* True - Attribute type:value combo must be unique.
Args:
attr_type (str): The ThreatConnect defined attribute type.
attr_value (str): The value for this attribute.
displayed (bool, default:false): If True the supported attribute will be marked for
display.
source (str, optional): The source value for this attribute.
unique (bool|string, optional): Control attribute creation.
formatter (method, optional): A method that takes a single attribute value and returns a
single formatted value.
Returns:
obj: An instance of Attribute. | Below is the the instruction that describes the task:
### Input:
Return instance of Attribute
unique:
* False - Attribute type:value can be duplicated.
* Type - Attribute type has to be unique (e.g., only 1 Description Attribute).
* True - Attribute type:value combo must be unique.
Args:
attr_type (str): The ThreatConnect defined attribute type.
attr_value (str): The value for this attribute.
displayed (bool, default:false): If True the supported attribute will be marked for
display.
source (str, optional): The source value for this attribute.
unique (bool|string, optional): Control attribute creation.
formatter (method, optional): A method that takes a single attribute value and returns a
single formatted value.
Returns:
obj: An instance of Attribute.
### Response:
def attribute(
self, attr_type, attr_value, displayed=False, source=None, unique=True, formatter=None
):
"""Return instance of Attribute
unique:
* False - Attribute type:value can be duplicated.
* Type - Attribute type has to be unique (e.g., only 1 Description Attribute).
* True - Attribute type:value combo must be unique.
Args:
attr_type (str): The ThreatConnect defined attribute type.
attr_value (str): The value for this attribute.
displayed (bool, default:false): If True the supported attribute will be marked for
display.
source (str, optional): The source value for this attribute.
unique (bool|string, optional): Control attribute creation.
formatter (method, optional): A method that takes a single attribute value and returns a
single formatted value.
Returns:
obj: An instance of Attribute.
"""
attr = Attribute(attr_type, attr_value, displayed, source, formatter)
if unique == 'Type':
for attribute_data in self._attributes:
if attribute_data.type == attr_type:
attr = attribute_data
break
else:
self._attributes.append(attr)
elif unique is True:
for attribute_data in self._attributes:
if attribute_data.type == attr_type and attribute_data.value == attr.value:
attr = attribute_data
break
else:
self._attributes.append(attr)
elif unique is False:
self._attributes.append(attr)
return attr |
def add(self, name, value, bitmask=DEFMASK):
"""Add an enum member
Args:
name: Name of the member
value: value of the member
bitmask: bitmask. Only use if enum is a bitfield.
"""
_add_enum_member(self._eid, name, value, bitmask) | Add an enum member
Args:
name: Name of the member
value: value of the member
bitmask: bitmask. Only use if enum is a bitfield. | Below is the the instruction that describes the task:
### Input:
Add an enum member
Args:
name: Name of the member
value: value of the member
bitmask: bitmask. Only use if enum is a bitfield.
### Response:
def add(self, name, value, bitmask=DEFMASK):
"""Add an enum member
Args:
name: Name of the member
value: value of the member
bitmask: bitmask. Only use if enum is a bitfield.
"""
_add_enum_member(self._eid, name, value, bitmask) |
def layers_intersect(layer_a, layer_b):
"""Check if extents of two layers intersect.
:param layer_a: One of the two layers to test overlapping
:type layer_a: QgsMapLayer
:param layer_b: The second of the two layers to test overlapping
:type layer_b: QgsMapLayer
:returns: true if the layers intersect, false if they are disjoint
:rtype: boolean
"""
extent_a = layer_a.extent()
extent_b = layer_b.extent()
if layer_a.crs() != layer_b.crs():
coord_transform = QgsCoordinateTransform(
layer_a.crs(), layer_b.crs(), QgsProject.instance())
extent_b = (coord_transform.transform(
extent_b, QgsCoordinateTransform.ReverseTransform))
return extent_a.intersects(extent_b) | Check if extents of two layers intersect.
:param layer_a: One of the two layers to test overlapping
:type layer_a: QgsMapLayer
:param layer_b: The second of the two layers to test overlapping
:type layer_b: QgsMapLayer
:returns: true if the layers intersect, false if they are disjoint
:rtype: boolean | Below is the the instruction that describes the task:
### Input:
Check if extents of two layers intersect.
:param layer_a: One of the two layers to test overlapping
:type layer_a: QgsMapLayer
:param layer_b: The second of the two layers to test overlapping
:type layer_b: QgsMapLayer
:returns: true if the layers intersect, false if they are disjoint
:rtype: boolean
### Response:
def layers_intersect(layer_a, layer_b):
"""Check if extents of two layers intersect.
:param layer_a: One of the two layers to test overlapping
:type layer_a: QgsMapLayer
:param layer_b: The second of the two layers to test overlapping
:type layer_b: QgsMapLayer
:returns: true if the layers intersect, false if they are disjoint
:rtype: boolean
"""
extent_a = layer_a.extent()
extent_b = layer_b.extent()
if layer_a.crs() != layer_b.crs():
coord_transform = QgsCoordinateTransform(
layer_a.crs(), layer_b.crs(), QgsProject.instance())
extent_b = (coord_transform.transform(
extent_b, QgsCoordinateTransform.ReverseTransform))
return extent_a.intersects(extent_b) |
def service_registry(self, sr):
"""
Sets service registry object in context, doesn't check it
Args:
sr: EFServiceRegistry object
"""
if type(sr) is not EFServiceRegistry:
raise TypeError("sr value must be type 'EFServiceRegistry'")
self._service_registry = sr | Sets service registry object in context, doesn't check it
Args:
sr: EFServiceRegistry object | Below is the the instruction that describes the task:
### Input:
Sets service registry object in context, doesn't check it
Args:
sr: EFServiceRegistry object
### Response:
def service_registry(self, sr):
"""
Sets service registry object in context, doesn't check it
Args:
sr: EFServiceRegistry object
"""
if type(sr) is not EFServiceRegistry:
raise TypeError("sr value must be type 'EFServiceRegistry'")
self._service_registry = sr |
def butter_lowpass_filter(data, sample_rate, cutoff=10, order=4, plot=False):
"""
`Low-pass filter <http://stackoverflow.com/questions/25191620/
creating-lowpass-filter-in-scipy-understanding-methods-and-units>`_ data by the [order]th order zero lag Butterworth filter
whose cut frequency is set to [cutoff] Hz.
:param data: time-series data,
:type data: numpy array of floats
:param: sample_rate: data sample rate
:type sample_rate: integer
:param cutoff: filter cutoff
:type cutoff: float
:param order: order
:type order: integer
:return y: low-pass-filtered data
:rtype y: numpy array of floats
:Examples:
>>> from mhealthx.signals import butter_lowpass_filter
>>> data = np.random.random(100)
>>> sample_rate = 10
>>> cutoff = 5
>>> order = 4
>>> y = butter_lowpass_filter(data, sample_rate, cutoff, order)
"""
nyquist = 0.5 * sample_rate
normal_cutoff = cutoff / nyquist
b, a = butter(order, normal_cutoff, btype='low', analog=False)
if plot:
w, h = freqz(b, a, worN=8000)
plt.subplot(2, 1, 1)
plt.plot(0.5*sample_rate*w/np.pi, np.abs(h), 'b')
plt.plot(cutoff, 0.5*np.sqrt(2), 'ko')
plt.axvline(cutoff, color='k')
plt.xlim(0, 0.5*sample_rate)
plt.title("Lowpass Filter Frequency Response")
plt.xlabel('Frequency [Hz]')
plt.grid()
plt.show()
y = lfilter(b, a, data)
return y | `Low-pass filter <http://stackoverflow.com/questions/25191620/
creating-lowpass-filter-in-scipy-understanding-methods-and-units>`_ data by the [order]th order zero lag Butterworth filter
whose cut frequency is set to [cutoff] Hz.
:param data: time-series data,
:type data: numpy array of floats
:param: sample_rate: data sample rate
:type sample_rate: integer
:param cutoff: filter cutoff
:type cutoff: float
:param order: order
:type order: integer
:return y: low-pass-filtered data
:rtype y: numpy array of floats
:Examples:
>>> from mhealthx.signals import butter_lowpass_filter
>>> data = np.random.random(100)
>>> sample_rate = 10
>>> cutoff = 5
>>> order = 4
>>> y = butter_lowpass_filter(data, sample_rate, cutoff, order) | Below is the the instruction that describes the task:
### Input:
`Low-pass filter <http://stackoverflow.com/questions/25191620/
creating-lowpass-filter-in-scipy-understanding-methods-and-units>`_ data by the [order]th order zero lag Butterworth filter
whose cut frequency is set to [cutoff] Hz.
:param data: time-series data,
:type data: numpy array of floats
:param: sample_rate: data sample rate
:type sample_rate: integer
:param cutoff: filter cutoff
:type cutoff: float
:param order: order
:type order: integer
:return y: low-pass-filtered data
:rtype y: numpy array of floats
:Examples:
>>> from mhealthx.signals import butter_lowpass_filter
>>> data = np.random.random(100)
>>> sample_rate = 10
>>> cutoff = 5
>>> order = 4
>>> y = butter_lowpass_filter(data, sample_rate, cutoff, order)
### Response:
def butter_lowpass_filter(data, sample_rate, cutoff=10, order=4, plot=False):
"""
`Low-pass filter <http://stackoverflow.com/questions/25191620/
creating-lowpass-filter-in-scipy-understanding-methods-and-units>`_ data by the [order]th order zero lag Butterworth filter
whose cut frequency is set to [cutoff] Hz.
:param data: time-series data,
:type data: numpy array of floats
:param: sample_rate: data sample rate
:type sample_rate: integer
:param cutoff: filter cutoff
:type cutoff: float
:param order: order
:type order: integer
:return y: low-pass-filtered data
:rtype y: numpy array of floats
:Examples:
>>> from mhealthx.signals import butter_lowpass_filter
>>> data = np.random.random(100)
>>> sample_rate = 10
>>> cutoff = 5
>>> order = 4
>>> y = butter_lowpass_filter(data, sample_rate, cutoff, order)
"""
nyquist = 0.5 * sample_rate
normal_cutoff = cutoff / nyquist
b, a = butter(order, normal_cutoff, btype='low', analog=False)
if plot:
w, h = freqz(b, a, worN=8000)
plt.subplot(2, 1, 1)
plt.plot(0.5*sample_rate*w/np.pi, np.abs(h), 'b')
plt.plot(cutoff, 0.5*np.sqrt(2), 'ko')
plt.axvline(cutoff, color='k')
plt.xlim(0, 0.5*sample_rate)
plt.title("Lowpass Filter Frequency Response")
plt.xlabel('Frequency [Hz]')
plt.grid()
plt.show()
y = lfilter(b, a, data)
return y |
def fetch(TableName,M,I,numin,numax,ParameterGroups=[],Parameters=[]):
"""
INPUT PARAMETERS:
TableName: local table name to fetch in (required)
M: HITRAN molecule number (required)
I: HITRAN isotopologue number (required)
numin: lower wavenumber bound (required)
numax: upper wavenumber bound (required)
OUTPUT PARAMETERS:
none
---
DESCRIPTION:
Download line-by-line data from HITRANonline server
and save it to local table. The input parameters M and I
are the HITRAN molecule and isotopologue numbers.
This function results in a table containing single
isotopologue specie. To have multiple species in a
single table use fetch_by_ids instead.
---
EXAMPLE OF USAGE:
fetch('HOH',1,1,4000,4100)
---
"""
queryHITRAN(TableName,[ISO[(M,I)][ISO_INDEX['id']]],numin,numax,
pargroups=ParameterGroups,params=Parameters)
iso_name = ISO[(M,I)][ISO_INDEX['iso_name']]
Comment = 'Contains lines for '+iso_name
Comment += ('\n in %.3f-%.3f wavenumber range' % (numin,numax))
comment(TableName,Comment) | INPUT PARAMETERS:
TableName: local table name to fetch in (required)
M: HITRAN molecule number (required)
I: HITRAN isotopologue number (required)
numin: lower wavenumber bound (required)
numax: upper wavenumber bound (required)
OUTPUT PARAMETERS:
none
---
DESCRIPTION:
Download line-by-line data from HITRANonline server
and save it to local table. The input parameters M and I
are the HITRAN molecule and isotopologue numbers.
This function results in a table containing single
isotopologue specie. To have multiple species in a
single table use fetch_by_ids instead.
---
EXAMPLE OF USAGE:
fetch('HOH',1,1,4000,4100)
--- | Below is the the instruction that describes the task:
### Input:
INPUT PARAMETERS:
TableName: local table name to fetch in (required)
M: HITRAN molecule number (required)
I: HITRAN isotopologue number (required)
numin: lower wavenumber bound (required)
numax: upper wavenumber bound (required)
OUTPUT PARAMETERS:
none
---
DESCRIPTION:
Download line-by-line data from HITRANonline server
and save it to local table. The input parameters M and I
are the HITRAN molecule and isotopologue numbers.
This function results in a table containing single
isotopologue specie. To have multiple species in a
single table use fetch_by_ids instead.
---
EXAMPLE OF USAGE:
fetch('HOH',1,1,4000,4100)
---
### Response:
def fetch(TableName,M,I,numin,numax,ParameterGroups=[],Parameters=[]):
"""
INPUT PARAMETERS:
TableName: local table name to fetch in (required)
M: HITRAN molecule number (required)
I: HITRAN isotopologue number (required)
numin: lower wavenumber bound (required)
numax: upper wavenumber bound (required)
OUTPUT PARAMETERS:
none
---
DESCRIPTION:
Download line-by-line data from HITRANonline server
and save it to local table. The input parameters M and I
are the HITRAN molecule and isotopologue numbers.
This function results in a table containing single
isotopologue specie. To have multiple species in a
single table use fetch_by_ids instead.
---
EXAMPLE OF USAGE:
fetch('HOH',1,1,4000,4100)
---
"""
queryHITRAN(TableName,[ISO[(M,I)][ISO_INDEX['id']]],numin,numax,
pargroups=ParameterGroups,params=Parameters)
iso_name = ISO[(M,I)][ISO_INDEX['iso_name']]
Comment = 'Contains lines for '+iso_name
Comment += ('\n in %.3f-%.3f wavenumber range' % (numin,numax))
comment(TableName,Comment) |
def _create_context_jar(self, compile_context):
"""Jar up the compile_context to its output jar location.
TODO(stuhood): In the medium term, we hope to add compiler support for this step, which would
allow the jars to be used as compile _inputs_ as well. Currently using jar'd compile outputs as
compile inputs would make the compiler's analysis useless.
see https://github.com/twitter-forks/sbt/tree/stuhood/output-jars
"""
root = compile_context.classes_dir.path
with compile_context.open_jar(mode='w') as jar:
for abs_sub_dir, dirnames, filenames in safe_walk(root):
for name in dirnames + filenames:
abs_filename = os.path.join(abs_sub_dir, name)
arcname = fast_relpath(abs_filename, root)
jar.write(abs_filename, arcname) | Jar up the compile_context to its output jar location.
TODO(stuhood): In the medium term, we hope to add compiler support for this step, which would
allow the jars to be used as compile _inputs_ as well. Currently using jar'd compile outputs as
compile inputs would make the compiler's analysis useless.
see https://github.com/twitter-forks/sbt/tree/stuhood/output-jars | Below is the the instruction that describes the task:
### Input:
Jar up the compile_context to its output jar location.
TODO(stuhood): In the medium term, we hope to add compiler support for this step, which would
allow the jars to be used as compile _inputs_ as well. Currently using jar'd compile outputs as
compile inputs would make the compiler's analysis useless.
see https://github.com/twitter-forks/sbt/tree/stuhood/output-jars
### Response:
def _create_context_jar(self, compile_context):
"""Jar up the compile_context to its output jar location.
TODO(stuhood): In the medium term, we hope to add compiler support for this step, which would
allow the jars to be used as compile _inputs_ as well. Currently using jar'd compile outputs as
compile inputs would make the compiler's analysis useless.
see https://github.com/twitter-forks/sbt/tree/stuhood/output-jars
"""
root = compile_context.classes_dir.path
with compile_context.open_jar(mode='w') as jar:
for abs_sub_dir, dirnames, filenames in safe_walk(root):
for name in dirnames + filenames:
abs_filename = os.path.join(abs_sub_dir, name)
arcname = fast_relpath(abs_filename, root)
jar.write(abs_filename, arcname) |
def _validate_color(self, color):
"""Validates color, raising error if invalid."""
three_digit_pattern = compile("^[a-f0-9]{3}$")
six_digit_pattern = compile("^[a-f0-9]{6}$")
if not match(three_digit_pattern, color)\
and not match(six_digit_pattern, color):
raise InvalidColorError(
"{} is not a valid color".format(color)
)
return color | Validates color, raising error if invalid. | Below is the the instruction that describes the task:
### Input:
Validates color, raising error if invalid.
### Response:
def _validate_color(self, color):
"""Validates color, raising error if invalid."""
three_digit_pattern = compile("^[a-f0-9]{3}$")
six_digit_pattern = compile("^[a-f0-9]{6}$")
if not match(three_digit_pattern, color)\
and not match(six_digit_pattern, color):
raise InvalidColorError(
"{} is not a valid color".format(color)
)
return color |
def count_lines(path, extensions=None, excluded_dirnames=None):
"""Return number of source code lines for all filenames in subdirectories
of *path* with names ending with *extensions*
Directory names *excluded_dirnames* will be ignored"""
if extensions is None:
extensions = ['.py', '.pyw', '.ipy', '.enaml', '.c', '.h', '.cpp',
'.hpp', '.inc', '.', '.hh', '.hxx', '.cc', '.cxx',
'.cl', '.f', '.for', '.f77', '.f90', '.f95', '.f2k',
'.f03', '.f08']
if excluded_dirnames is None:
excluded_dirnames = ['build', 'dist', '.hg', '.svn']
def get_filelines(path):
dfiles, dlines = 0, 0
if osp.splitext(path)[1] in extensions:
dfiles = 1
with open(path, 'rb') as textfile:
dlines = len(textfile.read().strip().splitlines())
return dfiles, dlines
lines = 0
files = 0
if osp.isdir(path):
for dirpath, dirnames, filenames in os.walk(path):
for d in dirnames[:]:
if d in excluded_dirnames:
dirnames.remove(d)
if excluded_dirnames is None or \
osp.dirname(dirpath) not in excluded_dirnames:
for fname in filenames:
dfiles, dlines = get_filelines(osp.join(dirpath, fname))
files += dfiles
lines += dlines
else:
dfiles, dlines = get_filelines(path)
files += dfiles
lines += dlines
return files, lines | Return number of source code lines for all filenames in subdirectories
of *path* with names ending with *extensions*
Directory names *excluded_dirnames* will be ignored | Below is the the instruction that describes the task:
### Input:
Return number of source code lines for all filenames in subdirectories
of *path* with names ending with *extensions*
Directory names *excluded_dirnames* will be ignored
### Response:
def count_lines(path, extensions=None, excluded_dirnames=None):
"""Return number of source code lines for all filenames in subdirectories
of *path* with names ending with *extensions*
Directory names *excluded_dirnames* will be ignored"""
if extensions is None:
extensions = ['.py', '.pyw', '.ipy', '.enaml', '.c', '.h', '.cpp',
'.hpp', '.inc', '.', '.hh', '.hxx', '.cc', '.cxx',
'.cl', '.f', '.for', '.f77', '.f90', '.f95', '.f2k',
'.f03', '.f08']
if excluded_dirnames is None:
excluded_dirnames = ['build', 'dist', '.hg', '.svn']
def get_filelines(path):
dfiles, dlines = 0, 0
if osp.splitext(path)[1] in extensions:
dfiles = 1
with open(path, 'rb') as textfile:
dlines = len(textfile.read().strip().splitlines())
return dfiles, dlines
lines = 0
files = 0
if osp.isdir(path):
for dirpath, dirnames, filenames in os.walk(path):
for d in dirnames[:]:
if d in excluded_dirnames:
dirnames.remove(d)
if excluded_dirnames is None or \
osp.dirname(dirpath) not in excluded_dirnames:
for fname in filenames:
dfiles, dlines = get_filelines(osp.join(dirpath, fname))
files += dfiles
lines += dlines
else:
dfiles, dlines = get_filelines(path)
files += dfiles
lines += dlines
return files, lines |
def longest_increasing_subsequence(sequence):
"""
Dynamic Programming Algorithm for
counting the length of longest increasing subsequence
type sequence: List[int]
"""
length = len(sequence)
counts = [1 for _ in range(length)]
for i in range(1, length):
for j in range(0, i):
if sequence[i] > sequence[j]:
counts[i] = max(counts[i], counts[j] + 1)
print(counts)
return max(counts) | Dynamic Programming Algorithm for
counting the length of longest increasing subsequence
type sequence: List[int] | Below is the the instruction that describes the task:
### Input:
Dynamic Programming Algorithm for
counting the length of longest increasing subsequence
type sequence: List[int]
### Response:
def longest_increasing_subsequence(sequence):
"""
Dynamic Programming Algorithm for
counting the length of longest increasing subsequence
type sequence: List[int]
"""
length = len(sequence)
counts = [1 for _ in range(length)]
for i in range(1, length):
for j in range(0, i):
if sequence[i] > sequence[j]:
counts[i] = max(counts[i], counts[j] + 1)
print(counts)
return max(counts) |
def naverage(wave, indep_min=None, indep_max=None):
r"""
Return the numerical average of a waveform's dependent variable vector.
:param wave: Waveform
:type wave: :py:class:`peng.eng.Waveform`
:param indep_min: Independent vector start point of computation
:type indep_min: integer or float
:param indep_max: Independent vector stop point of computation
:type indep_max: integer or float
:rtype: :py:class:`peng.eng.Waveform`
.. [[[cog cog.out(exobj_eng.get_sphinx_autodoc(raised=True)) ]]]
.. Auto-generated exceptions documentation for
.. peng.wave_functions.naverage
:raises:
* RuntimeError (Argument \`indep_max\` is not valid)
* RuntimeError (Argument \`indep_min\` is not valid)
* RuntimeError (Argument \`wave\` is not valid)
* RuntimeError (Incongruent \`indep_min\` and \`indep_max\`
arguments)
.. [[[end]]]
"""
ret = copy.copy(wave)
_bound_waveform(ret, indep_min, indep_max)
delta_x = ret._indep_vector[-1] - ret._indep_vector[0]
return np.trapz(ret._dep_vector, x=ret._indep_vector) / delta_x | r"""
Return the numerical average of a waveform's dependent variable vector.
:param wave: Waveform
:type wave: :py:class:`peng.eng.Waveform`
:param indep_min: Independent vector start point of computation
:type indep_min: integer or float
:param indep_max: Independent vector stop point of computation
:type indep_max: integer or float
:rtype: :py:class:`peng.eng.Waveform`
.. [[[cog cog.out(exobj_eng.get_sphinx_autodoc(raised=True)) ]]]
.. Auto-generated exceptions documentation for
.. peng.wave_functions.naverage
:raises:
* RuntimeError (Argument \`indep_max\` is not valid)
* RuntimeError (Argument \`indep_min\` is not valid)
* RuntimeError (Argument \`wave\` is not valid)
* RuntimeError (Incongruent \`indep_min\` and \`indep_max\`
arguments)
.. [[[end]]] | Below is the the instruction that describes the task:
### Input:
r"""
Return the numerical average of a waveform's dependent variable vector.
:param wave: Waveform
:type wave: :py:class:`peng.eng.Waveform`
:param indep_min: Independent vector start point of computation
:type indep_min: integer or float
:param indep_max: Independent vector stop point of computation
:type indep_max: integer or float
:rtype: :py:class:`peng.eng.Waveform`
.. [[[cog cog.out(exobj_eng.get_sphinx_autodoc(raised=True)) ]]]
.. Auto-generated exceptions documentation for
.. peng.wave_functions.naverage
:raises:
* RuntimeError (Argument \`indep_max\` is not valid)
* RuntimeError (Argument \`indep_min\` is not valid)
* RuntimeError (Argument \`wave\` is not valid)
* RuntimeError (Incongruent \`indep_min\` and \`indep_max\`
arguments)
.. [[[end]]]
### Response:
def naverage(wave, indep_min=None, indep_max=None):
r"""
Return the numerical average of a waveform's dependent variable vector.
:param wave: Waveform
:type wave: :py:class:`peng.eng.Waveform`
:param indep_min: Independent vector start point of computation
:type indep_min: integer or float
:param indep_max: Independent vector stop point of computation
:type indep_max: integer or float
:rtype: :py:class:`peng.eng.Waveform`
.. [[[cog cog.out(exobj_eng.get_sphinx_autodoc(raised=True)) ]]]
.. Auto-generated exceptions documentation for
.. peng.wave_functions.naverage
:raises:
* RuntimeError (Argument \`indep_max\` is not valid)
* RuntimeError (Argument \`indep_min\` is not valid)
* RuntimeError (Argument \`wave\` is not valid)
* RuntimeError (Incongruent \`indep_min\` and \`indep_max\`
arguments)
.. [[[end]]]
"""
ret = copy.copy(wave)
_bound_waveform(ret, indep_min, indep_max)
delta_x = ret._indep_vector[-1] - ret._indep_vector[0]
return np.trapz(ret._dep_vector, x=ret._indep_vector) / delta_x |
def _identify_eds_ing(first, second):
"""Find nodes connecting adjacent edges.
Args:
first(Edge): Edge object representing the first edge.
second(Edge): Edge object representing the second edge.
Returns:
tuple[int, int, set[int]]: The first two values represent left and right node
indicies of the new edge. The third value is the new dependence set.
"""
A = set([first.L, first.R])
A.update(first.D)
B = set([second.L, second.R])
B.update(second.D)
depend_set = A & B
left, right = sorted(list(A ^ B))
return left, right, depend_set | Find nodes connecting adjacent edges.
Args:
first(Edge): Edge object representing the first edge.
second(Edge): Edge object representing the second edge.
Returns:
tuple[int, int, set[int]]: The first two values represent left and right node
indicies of the new edge. The third value is the new dependence set. | Below is the the instruction that describes the task:
### Input:
Find nodes connecting adjacent edges.
Args:
first(Edge): Edge object representing the first edge.
second(Edge): Edge object representing the second edge.
Returns:
tuple[int, int, set[int]]: The first two values represent left and right node
indicies of the new edge. The third value is the new dependence set.
### Response:
def _identify_eds_ing(first, second):
"""Find nodes connecting adjacent edges.
Args:
first(Edge): Edge object representing the first edge.
second(Edge): Edge object representing the second edge.
Returns:
tuple[int, int, set[int]]: The first two values represent left and right node
indicies of the new edge. The third value is the new dependence set.
"""
A = set([first.L, first.R])
A.update(first.D)
B = set([second.L, second.R])
B.update(second.D)
depend_set = A & B
left, right = sorted(list(A ^ B))
return left, right, depend_set |
def logging_auditlog_clss_clss(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
logging = ET.SubElement(config, "logging", xmlns="urn:brocade.com:mgmt:brocade-ras")
auditlog = ET.SubElement(logging, "auditlog")
clss = ET.SubElement(auditlog, "class")
clss = ET.SubElement(clss, "class")
clss.text = kwargs.pop('clss')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code | Below is the the instruction that describes the task:
### Input:
Auto Generated Code
### Response:
def logging_auditlog_clss_clss(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
logging = ET.SubElement(config, "logging", xmlns="urn:brocade.com:mgmt:brocade-ras")
auditlog = ET.SubElement(logging, "auditlog")
clss = ET.SubElement(auditlog, "class")
clss = ET.SubElement(clss, "class")
clss.text = kwargs.pop('clss')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def save(self, refreshing=None, next_action=None, json_last_refresh=None, data_blob=None):
"""
save or update the component on the Ariane server cache
:param refreshing: the new refreshing value - default None and ignored
:param next_action: the new next action - default None and ignored
:param json_last_refresh: the new json last refresh - default the date of this call
:param data_blob: the new data blob of this component - default None and ignored
:return:
"""
LOGGER.debug("InjectorCachedComponent.save")
ret = True
if refreshing is not None:
self.refreshing = refreshing
if next_action is not None:
self.next_action = next_action
if json_last_refresh is not None:
try:
self.json_last_refresh = json_last_refresh.strftime("%Y-%m-%d %H:%M:%S.%f")
except AttributeError:
self.json_last_refresh = json_last_refresh
if data_blob is not None:
self.blob = data_blob
if self.service is None:
self.service = InjectorCachedComponentService.make_refresh_on_demand_service(self)
if self.service is not None and not self.service.is_started:
self.service.start()
args = {'properties': {'OPERATION': 'PUSH_COMPONENT_IN_CACHE',
'REMOTE_COMPONENT':
str(self.injector_component_2_json(properties_only=True)).replace("'", '"'),
'CACHE_ID': InjectorCachedComponentService.cache_id},
'body': self.blob}
result = InjectorCachedComponentService.requester.call(args).get()
if result.rc != 0:
err_msg = 'InjectorCachedComponent.save - Problem while saving component ( id : ' + self.id + \
'Reason: ' + str(result.response_content) + '-' + str(result.error_message) + \
" (" + str(result.rc) + ")"
LOGGER.warning(err_msg)
ret = False
return ret | save or update the component on the Ariane server cache
:param refreshing: the new refreshing value - default None and ignored
:param next_action: the new next action - default None and ignored
:param json_last_refresh: the new json last refresh - default the date of this call
:param data_blob: the new data blob of this component - default None and ignored
:return: | Below is the the instruction that describes the task:
### Input:
save or update the component on the Ariane server cache
:param refreshing: the new refreshing value - default None and ignored
:param next_action: the new next action - default None and ignored
:param json_last_refresh: the new json last refresh - default the date of this call
:param data_blob: the new data blob of this component - default None and ignored
:return:
### Response:
def save(self, refreshing=None, next_action=None, json_last_refresh=None, data_blob=None):
"""
save or update the component on the Ariane server cache
:param refreshing: the new refreshing value - default None and ignored
:param next_action: the new next action - default None and ignored
:param json_last_refresh: the new json last refresh - default the date of this call
:param data_blob: the new data blob of this component - default None and ignored
:return:
"""
LOGGER.debug("InjectorCachedComponent.save")
ret = True
if refreshing is not None:
self.refreshing = refreshing
if next_action is not None:
self.next_action = next_action
if json_last_refresh is not None:
try:
self.json_last_refresh = json_last_refresh.strftime("%Y-%m-%d %H:%M:%S.%f")
except AttributeError:
self.json_last_refresh = json_last_refresh
if data_blob is not None:
self.blob = data_blob
if self.service is None:
self.service = InjectorCachedComponentService.make_refresh_on_demand_service(self)
if self.service is not None and not self.service.is_started:
self.service.start()
args = {'properties': {'OPERATION': 'PUSH_COMPONENT_IN_CACHE',
'REMOTE_COMPONENT':
str(self.injector_component_2_json(properties_only=True)).replace("'", '"'),
'CACHE_ID': InjectorCachedComponentService.cache_id},
'body': self.blob}
result = InjectorCachedComponentService.requester.call(args).get()
if result.rc != 0:
err_msg = 'InjectorCachedComponent.save - Problem while saving component ( id : ' + self.id + \
'Reason: ' + str(result.response_content) + '-' + str(result.error_message) + \
" (" + str(result.rc) + ")"
LOGGER.warning(err_msg)
ret = False
return ret |
def set_contents_from_file(self, fp, headers=None, replace=True,
cb=None, num_cb=10, policy=None, md5=None,
res_upload_handler=None, size=None):
"""
Store an object in GS using the name of the Key object as the
key in GS and the contents of the file pointed to by 'fp' as the
contents.
:type fp: file
:param fp: the file whose contents are to be uploaded
:type headers: dict
:param headers: additional HTTP headers to be sent with the PUT request.
:type replace: bool
:param replace: If this parameter is False, the method will first check
to see if an object exists in the bucket with the same key. If it
does, it won't overwrite it. The default value is True which will
overwrite the object.
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept two integer
parameters, the first representing the number of bytes that have
been successfully transmitted to GS and the second representing the
total number of bytes that need to be transmitted.
:type num_cb: int
:param num_cb: (optional) If a callback is specified with the cb
parameter, this parameter determines the granularity of the callback
by defining the maximum number of times the callback will be called
during the file transfer.
:type policy: :class:`boto.gs.acl.CannedACLStrings`
:param policy: A canned ACL policy that will be applied to the new key
in GS.
:type md5: A tuple containing the hexdigest version of the MD5 checksum
of the file as the first element and the Base64-encoded version of
the plain checksum as the second element. This is the same format
returned by the compute_md5 method.
:param md5: If you need to compute the MD5 for any reason prior to
upload, it's silly to have to do it twice so this param, if present,
will be used as the MD5 values of the file. Otherwise, the checksum
will be computed.
:type res_upload_handler: ResumableUploadHandler
:param res_upload_handler: If provided, this handler will perform the
upload.
:type size: int
:param size: (optional) The Maximum number of bytes to read from
the file pointer (fp). This is useful when uploading
a file in multiple parts where you are splitting the
file up into different ranges to be uploaded. If not
specified, the default behaviour is to read all bytes
from the file pointer. Less bytes may be available.
Notes:
1. The "size" parameter currently cannot be used when
a resumable upload handler is given but is still
useful for uploading part of a file as implemented
by the parent class.
2. At present Google Cloud Storage does not support
multipart uploads.
TODO: At some point we should refactor the Bucket and Key classes,
to move functionality common to all providers into a parent class,
and provider-specific functionality into subclasses (rather than
just overriding/sharing code the way it currently works).
"""
provider = self.bucket.connection.provider
if res_upload_handler and size:
# could use size instead of file_length if provided but...
raise BotoClientError('"size" param not supported for resumable uploads.')
headers = headers or {}
if policy:
headers[provider.acl_header] = policy
if hasattr(fp, 'name'):
self.path = fp.name
if self.bucket != None:
if not md5:
# compute_md5() and also set self.size to actual
# size of the bytes read computing the md5.
md5 = self.compute_md5(fp, size)
# adjust size if required
size = self.size
elif size:
self.size = size
else:
# If md5 is provided, still need to size so
# calculate based on bytes to end of content
spos = fp.tell()
fp.seek(0, os.SEEK_END)
self.size = fp.tell() - spos
fp.seek(spos)
size = self.size
self.md5 = md5[0]
self.base64md5 = md5[1]
if self.name == None:
self.name = self.md5
if not replace:
if self.bucket.lookup(self.name):
return
if res_upload_handler:
res_upload_handler.send_file(self, fp, headers, cb, num_cb)
else:
# Not a resumable transfer so use basic send_file mechanism.
self.send_file(fp, headers, cb, num_cb, size=size) | Store an object in GS using the name of the Key object as the
key in GS and the contents of the file pointed to by 'fp' as the
contents.
:type fp: file
:param fp: the file whose contents are to be uploaded
:type headers: dict
:param headers: additional HTTP headers to be sent with the PUT request.
:type replace: bool
:param replace: If this parameter is False, the method will first check
to see if an object exists in the bucket with the same key. If it
does, it won't overwrite it. The default value is True which will
overwrite the object.
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept two integer
parameters, the first representing the number of bytes that have
been successfully transmitted to GS and the second representing the
total number of bytes that need to be transmitted.
:type num_cb: int
:param num_cb: (optional) If a callback is specified with the cb
parameter, this parameter determines the granularity of the callback
by defining the maximum number of times the callback will be called
during the file transfer.
:type policy: :class:`boto.gs.acl.CannedACLStrings`
:param policy: A canned ACL policy that will be applied to the new key
in GS.
:type md5: A tuple containing the hexdigest version of the MD5 checksum
of the file as the first element and the Base64-encoded version of
the plain checksum as the second element. This is the same format
returned by the compute_md5 method.
:param md5: If you need to compute the MD5 for any reason prior to
upload, it's silly to have to do it twice so this param, if present,
will be used as the MD5 values of the file. Otherwise, the checksum
will be computed.
:type res_upload_handler: ResumableUploadHandler
:param res_upload_handler: If provided, this handler will perform the
upload.
:type size: int
:param size: (optional) The Maximum number of bytes to read from
the file pointer (fp). This is useful when uploading
a file in multiple parts where you are splitting the
file up into different ranges to be uploaded. If not
specified, the default behaviour is to read all bytes
from the file pointer. Less bytes may be available.
Notes:
1. The "size" parameter currently cannot be used when
a resumable upload handler is given but is still
useful for uploading part of a file as implemented
by the parent class.
2. At present Google Cloud Storage does not support
multipart uploads.
TODO: At some point we should refactor the Bucket and Key classes,
to move functionality common to all providers into a parent class,
and provider-specific functionality into subclasses (rather than
just overriding/sharing code the way it currently works). | Below is the the instruction that describes the task:
### Input:
Store an object in GS using the name of the Key object as the
key in GS and the contents of the file pointed to by 'fp' as the
contents.
:type fp: file
:param fp: the file whose contents are to be uploaded
:type headers: dict
:param headers: additional HTTP headers to be sent with the PUT request.
:type replace: bool
:param replace: If this parameter is False, the method will first check
to see if an object exists in the bucket with the same key. If it
does, it won't overwrite it. The default value is True which will
overwrite the object.
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept two integer
parameters, the first representing the number of bytes that have
been successfully transmitted to GS and the second representing the
total number of bytes that need to be transmitted.
:type num_cb: int
:param num_cb: (optional) If a callback is specified with the cb
parameter, this parameter determines the granularity of the callback
by defining the maximum number of times the callback will be called
during the file transfer.
:type policy: :class:`boto.gs.acl.CannedACLStrings`
:param policy: A canned ACL policy that will be applied to the new key
in GS.
:type md5: A tuple containing the hexdigest version of the MD5 checksum
of the file as the first element and the Base64-encoded version of
the plain checksum as the second element. This is the same format
returned by the compute_md5 method.
:param md5: If you need to compute the MD5 for any reason prior to
upload, it's silly to have to do it twice so this param, if present,
will be used as the MD5 values of the file. Otherwise, the checksum
will be computed.
:type res_upload_handler: ResumableUploadHandler
:param res_upload_handler: If provided, this handler will perform the
upload.
:type size: int
:param size: (optional) The Maximum number of bytes to read from
the file pointer (fp). This is useful when uploading
a file in multiple parts where you are splitting the
file up into different ranges to be uploaded. If not
specified, the default behaviour is to read all bytes
from the file pointer. Less bytes may be available.
Notes:
1. The "size" parameter currently cannot be used when
a resumable upload handler is given but is still
useful for uploading part of a file as implemented
by the parent class.
2. At present Google Cloud Storage does not support
multipart uploads.
TODO: At some point we should refactor the Bucket and Key classes,
to move functionality common to all providers into a parent class,
and provider-specific functionality into subclasses (rather than
just overriding/sharing code the way it currently works).
### Response:
def set_contents_from_file(self, fp, headers=None, replace=True,
cb=None, num_cb=10, policy=None, md5=None,
res_upload_handler=None, size=None):
"""
Store an object in GS using the name of the Key object as the
key in GS and the contents of the file pointed to by 'fp' as the
contents.
:type fp: file
:param fp: the file whose contents are to be uploaded
:type headers: dict
:param headers: additional HTTP headers to be sent with the PUT request.
:type replace: bool
:param replace: If this parameter is False, the method will first check
to see if an object exists in the bucket with the same key. If it
does, it won't overwrite it. The default value is True which will
overwrite the object.
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept two integer
parameters, the first representing the number of bytes that have
been successfully transmitted to GS and the second representing the
total number of bytes that need to be transmitted.
:type num_cb: int
:param num_cb: (optional) If a callback is specified with the cb
parameter, this parameter determines the granularity of the callback
by defining the maximum number of times the callback will be called
during the file transfer.
:type policy: :class:`boto.gs.acl.CannedACLStrings`
:param policy: A canned ACL policy that will be applied to the new key
in GS.
:type md5: A tuple containing the hexdigest version of the MD5 checksum
of the file as the first element and the Base64-encoded version of
the plain checksum as the second element. This is the same format
returned by the compute_md5 method.
:param md5: If you need to compute the MD5 for any reason prior to
upload, it's silly to have to do it twice so this param, if present,
will be used as the MD5 values of the file. Otherwise, the checksum
will be computed.
:type res_upload_handler: ResumableUploadHandler
:param res_upload_handler: If provided, this handler will perform the
upload.
:type size: int
:param size: (optional) The Maximum number of bytes to read from
the file pointer (fp). This is useful when uploading
a file in multiple parts where you are splitting the
file up into different ranges to be uploaded. If not
specified, the default behaviour is to read all bytes
from the file pointer. Less bytes may be available.
Notes:
1. The "size" parameter currently cannot be used when
a resumable upload handler is given but is still
useful for uploading part of a file as implemented
by the parent class.
2. At present Google Cloud Storage does not support
multipart uploads.
TODO: At some point we should refactor the Bucket and Key classes,
to move functionality common to all providers into a parent class,
and provider-specific functionality into subclasses (rather than
just overriding/sharing code the way it currently works).
"""
provider = self.bucket.connection.provider
if res_upload_handler and size:
# could use size instead of file_length if provided but...
raise BotoClientError('"size" param not supported for resumable uploads.')
headers = headers or {}
if policy:
headers[provider.acl_header] = policy
if hasattr(fp, 'name'):
self.path = fp.name
if self.bucket != None:
if not md5:
# compute_md5() and also set self.size to actual
# size of the bytes read computing the md5.
md5 = self.compute_md5(fp, size)
# adjust size if required
size = self.size
elif size:
self.size = size
else:
# If md5 is provided, still need to size so
# calculate based on bytes to end of content
spos = fp.tell()
fp.seek(0, os.SEEK_END)
self.size = fp.tell() - spos
fp.seek(spos)
size = self.size
self.md5 = md5[0]
self.base64md5 = md5[1]
if self.name == None:
self.name = self.md5
if not replace:
if self.bucket.lookup(self.name):
return
if res_upload_handler:
res_upload_handler.send_file(self, fp, headers, cb, num_cb)
else:
# Not a resumable transfer so use basic send_file mechanism.
self.send_file(fp, headers, cb, num_cb, size=size) |
def single(a, b, distance_function):
"""
Given two collections ``a`` and ``b``, this will return the distance of the
points which are closest together. ``distance_function`` is used to
determine the distance between two elements.
Example::
>>> single([1, 2], [3, 4], lambda x, y: abs(x-y))
1 # (distance between 2 and 3)
"""
left_a, right_a = min(a), max(a)
left_b, right_b = min(b), max(b)
result = min(distance_function(left_a, right_b),
distance_function(left_b, right_a))
return result | Given two collections ``a`` and ``b``, this will return the distance of the
points which are closest together. ``distance_function`` is used to
determine the distance between two elements.
Example::
>>> single([1, 2], [3, 4], lambda x, y: abs(x-y))
1 # (distance between 2 and 3) | Below is the the instruction that describes the task:
### Input:
Given two collections ``a`` and ``b``, this will return the distance of the
points which are closest together. ``distance_function`` is used to
determine the distance between two elements.
Example::
>>> single([1, 2], [3, 4], lambda x, y: abs(x-y))
1 # (distance between 2 and 3)
### Response:
def single(a, b, distance_function):
"""
Given two collections ``a`` and ``b``, this will return the distance of the
points which are closest together. ``distance_function`` is used to
determine the distance between two elements.
Example::
>>> single([1, 2], [3, 4], lambda x, y: abs(x-y))
1 # (distance between 2 and 3)
"""
left_a, right_a = min(a), max(a)
left_b, right_b = min(b), max(b)
result = min(distance_function(left_a, right_b),
distance_function(left_b, right_a))
return result |
def put_file(self, key, file, ttl_secs=None):
"""Like :meth:`~simplekv.KeyValueStore.put_file`, but with an
additional parameter:
:param ttl_secs: Number of seconds until the key expires. See above
for valid values.
:raises exceptions.ValueError: If ``ttl_secs`` is invalid.
"""
if ttl_secs is None:
ttl_secs = self.default_ttl_secs
self._check_valid_key(key)
if isinstance(file, str):
return self._put_filename(key, file, self._valid_ttl(ttl_secs))
else:
return self._put_file(key, file, self._valid_ttl(ttl_secs)) | Like :meth:`~simplekv.KeyValueStore.put_file`, but with an
additional parameter:
:param ttl_secs: Number of seconds until the key expires. See above
for valid values.
:raises exceptions.ValueError: If ``ttl_secs`` is invalid. | Below is the the instruction that describes the task:
### Input:
Like :meth:`~simplekv.KeyValueStore.put_file`, but with an
additional parameter:
:param ttl_secs: Number of seconds until the key expires. See above
for valid values.
:raises exceptions.ValueError: If ``ttl_secs`` is invalid.
### Response:
def put_file(self, key, file, ttl_secs=None):
"""Like :meth:`~simplekv.KeyValueStore.put_file`, but with an
additional parameter:
:param ttl_secs: Number of seconds until the key expires. See above
for valid values.
:raises exceptions.ValueError: If ``ttl_secs`` is invalid.
"""
if ttl_secs is None:
ttl_secs = self.default_ttl_secs
self._check_valid_key(key)
if isinstance(file, str):
return self._put_filename(key, file, self._valid_ttl(ttl_secs))
else:
return self._put_file(key, file, self._valid_ttl(ttl_secs)) |
def loads(self, param):
'''
Checks the return parameters generating new proxy instances to
avoid query concurrences from shared proxies and creating
proxies for actors from another host.
'''
if isinstance(param, ProxyRef):
try:
return self.lookup_url(param.url, param.klass, param.module)
except HostError:
print "Can't lookup for the actor received with the call. \
It does not exist or the url is unreachable.", param
raise HostError(param)
elif isinstance(param, list):
return [self.loads(elem) for elem in param]
elif isinstance(param, tuple):
return tuple([self.loads(elem) for elem in param])
elif isinstance(param, dict):
new_dict = param
for key in new_dict.keys():
new_dict[key] = self.loads(new_dict[key])
return new_dict
else:
return param | Checks the return parameters generating new proxy instances to
avoid query concurrences from shared proxies and creating
proxies for actors from another host. | Below is the the instruction that describes the task:
### Input:
Checks the return parameters generating new proxy instances to
avoid query concurrences from shared proxies and creating
proxies for actors from another host.
### Response:
def loads(self, param):
'''
Checks the return parameters generating new proxy instances to
avoid query concurrences from shared proxies and creating
proxies for actors from another host.
'''
if isinstance(param, ProxyRef):
try:
return self.lookup_url(param.url, param.klass, param.module)
except HostError:
print "Can't lookup for the actor received with the call. \
It does not exist or the url is unreachable.", param
raise HostError(param)
elif isinstance(param, list):
return [self.loads(elem) for elem in param]
elif isinstance(param, tuple):
return tuple([self.loads(elem) for elem in param])
elif isinstance(param, dict):
new_dict = param
for key in new_dict.keys():
new_dict[key] = self.loads(new_dict[key])
return new_dict
else:
return param |
def encode_data(self):
"""Encode the data back into a dict."""
output_data = {}
output_data["groupTypeList"] = encode_array(self.group_type_list, 4, 0)
output_data["xCoordList"] = encode_array(self.x_coord_list, 10, 1000)
output_data["yCoordList"] = encode_array(self.y_coord_list, 10, 1000)
output_data["zCoordList"] = encode_array(self.z_coord_list, 10, 1000)
output_data["bFactorList"] = encode_array(self.b_factor_list, 10, 100)
output_data["occupancyList"] = encode_array(self.occupancy_list, 9, 100)
output_data["atomIdList"] = encode_array(self.atom_id_list, 8, 0)
output_data["altLocList"] = encode_array(self.alt_loc_list, 6, 0)
output_data["insCodeList"] = encode_array(self.ins_code_list, 6, 0)
output_data["groupIdList"] = encode_array(self.group_id_list, 8, 0)
output_data["groupList"] = self.group_list
output_data["sequenceIndexList"] = encode_array(self.sequence_index_list, 8, 0)
output_data["chainNameList"] = encode_array(self.chain_name_list, 5, 4)
output_data["chainIdList"] = encode_array(self.chain_id_list, 5, 4)
output_data["bondAtomList"] = encode_array(self.bond_atom_list, 4, 0)
output_data["bondOrderList"] = encode_array(self.bond_order_list, 2, 0)
output_data["secStructList"] = encode_array(self.sec_struct_list, 2, 0)
output_data["chainsPerModel"] = self.chains_per_model
output_data["groupsPerChain"] = self.groups_per_chain
output_data["spaceGroup"] = self.space_group
output_data["mmtfVersion"] = self.mmtf_version
output_data["mmtfProducer"] = self.mmtf_producer
output_data["structureId"] = self.structure_id
output_data["entityList"] = self.entity_list
output_data["bioAssemblyList"] = self.bio_assembly
output_data["rFree"] = self.r_free
output_data["rWork"] = self.r_work
output_data["resolution"] = self.resolution
output_data["title"] = self.title
output_data["experimentalMethods"] = self.experimental_methods
output_data["depositionDate"] = self.deposition_date
output_data["releaseDate"] = self.release_date
output_data["unitCell"] = self.unit_cell
output_data["numBonds"] = self.num_bonds
output_data["numChains"] = self.num_chains
output_data["numModels"] = self.num_models
output_data["numAtoms"] = self.num_atoms
output_data["numGroups"] = self.num_groups
return output_data | Encode the data back into a dict. | Below is the the instruction that describes the task:
### Input:
Encode the data back into a dict.
### Response:
def encode_data(self):
"""Encode the data back into a dict."""
output_data = {}
output_data["groupTypeList"] = encode_array(self.group_type_list, 4, 0)
output_data["xCoordList"] = encode_array(self.x_coord_list, 10, 1000)
output_data["yCoordList"] = encode_array(self.y_coord_list, 10, 1000)
output_data["zCoordList"] = encode_array(self.z_coord_list, 10, 1000)
output_data["bFactorList"] = encode_array(self.b_factor_list, 10, 100)
output_data["occupancyList"] = encode_array(self.occupancy_list, 9, 100)
output_data["atomIdList"] = encode_array(self.atom_id_list, 8, 0)
output_data["altLocList"] = encode_array(self.alt_loc_list, 6, 0)
output_data["insCodeList"] = encode_array(self.ins_code_list, 6, 0)
output_data["groupIdList"] = encode_array(self.group_id_list, 8, 0)
output_data["groupList"] = self.group_list
output_data["sequenceIndexList"] = encode_array(self.sequence_index_list, 8, 0)
output_data["chainNameList"] = encode_array(self.chain_name_list, 5, 4)
output_data["chainIdList"] = encode_array(self.chain_id_list, 5, 4)
output_data["bondAtomList"] = encode_array(self.bond_atom_list, 4, 0)
output_data["bondOrderList"] = encode_array(self.bond_order_list, 2, 0)
output_data["secStructList"] = encode_array(self.sec_struct_list, 2, 0)
output_data["chainsPerModel"] = self.chains_per_model
output_data["groupsPerChain"] = self.groups_per_chain
output_data["spaceGroup"] = self.space_group
output_data["mmtfVersion"] = self.mmtf_version
output_data["mmtfProducer"] = self.mmtf_producer
output_data["structureId"] = self.structure_id
output_data["entityList"] = self.entity_list
output_data["bioAssemblyList"] = self.bio_assembly
output_data["rFree"] = self.r_free
output_data["rWork"] = self.r_work
output_data["resolution"] = self.resolution
output_data["title"] = self.title
output_data["experimentalMethods"] = self.experimental_methods
output_data["depositionDate"] = self.deposition_date
output_data["releaseDate"] = self.release_date
output_data["unitCell"] = self.unit_cell
output_data["numBonds"] = self.num_bonds
output_data["numChains"] = self.num_chains
output_data["numModels"] = self.num_models
output_data["numAtoms"] = self.num_atoms
output_data["numGroups"] = self.num_groups
return output_data |
def json_doc_to_xml(json_obj, lang='en', custom_namespace=None):
"""Converts a Open511 JSON document to XML.
lang: the appropriate language code
Takes a dict deserialized from JSON, returns an lxml Element.
Accepts only the full root-level JSON object from an Open511 response."""
if 'meta' not in json_obj:
raise Exception("This function requires a conforming Open511 JSON document with a 'meta' section.")
json_obj = dict(json_obj)
meta = json_obj.pop('meta')
elem = get_base_open511_element(lang=lang, version=meta.pop('version'))
pagination = json_obj.pop('pagination', None)
json_struct_to_xml(json_obj, elem, custom_namespace=custom_namespace)
if pagination:
elem.append(json_struct_to_xml(pagination, 'pagination', custom_namespace=custom_namespace))
json_struct_to_xml(meta, elem)
return elem | Converts a Open511 JSON document to XML.
lang: the appropriate language code
Takes a dict deserialized from JSON, returns an lxml Element.
Accepts only the full root-level JSON object from an Open511 response. | Below is the the instruction that describes the task:
### Input:
Converts a Open511 JSON document to XML.
lang: the appropriate language code
Takes a dict deserialized from JSON, returns an lxml Element.
Accepts only the full root-level JSON object from an Open511 response.
### Response:
def json_doc_to_xml(json_obj, lang='en', custom_namespace=None):
"""Converts a Open511 JSON document to XML.
lang: the appropriate language code
Takes a dict deserialized from JSON, returns an lxml Element.
Accepts only the full root-level JSON object from an Open511 response."""
if 'meta' not in json_obj:
raise Exception("This function requires a conforming Open511 JSON document with a 'meta' section.")
json_obj = dict(json_obj)
meta = json_obj.pop('meta')
elem = get_base_open511_element(lang=lang, version=meta.pop('version'))
pagination = json_obj.pop('pagination', None)
json_struct_to_xml(json_obj, elem, custom_namespace=custom_namespace)
if pagination:
elem.append(json_struct_to_xml(pagination, 'pagination', custom_namespace=custom_namespace))
json_struct_to_xml(meta, elem)
return elem |
def get_ccle_mutations(gene_list, cell_lines, mutation_type=None):
"""Return a dict of mutations in given genes and cell lines from CCLE.
This is a specialized call to get_mutations tailored to CCLE cell lines.
Parameters
----------
gene_list : list[str]
A list of HGNC gene symbols to get mutations in
cell_lines : list[str]
A list of CCLE cell line names to get mutations for.
mutation_type : Optional[str]
The type of mutation to filter to.
mutation_type can be one of: missense, nonsense, frame_shift_ins,
frame_shift_del, splice_site
Returns
-------
mutations : dict
The result from cBioPortal as a dict in the format
{cell_line : {gene : [mutation1, mutation2, ...] }}
Example:
{'LOXIMVI_SKIN': {'BRAF': ['V600E', 'I208V']},
'SKMEL30_SKIN': {'BRAF': ['D287H', 'E275K']}}
"""
mutations = {cl: {g: [] for g in gene_list} for cl in cell_lines}
for cell_line in cell_lines:
mutations_cl = get_mutations(ccle_study, gene_list,
mutation_type=mutation_type,
case_id=cell_line)
for gene, aa_change in zip(mutations_cl['gene_symbol'],
mutations_cl['amino_acid_change']):
aa_change = str(aa_change)
mutations[cell_line][gene].append(aa_change)
return mutations | Return a dict of mutations in given genes and cell lines from CCLE.
This is a specialized call to get_mutations tailored to CCLE cell lines.
Parameters
----------
gene_list : list[str]
A list of HGNC gene symbols to get mutations in
cell_lines : list[str]
A list of CCLE cell line names to get mutations for.
mutation_type : Optional[str]
The type of mutation to filter to.
mutation_type can be one of: missense, nonsense, frame_shift_ins,
frame_shift_del, splice_site
Returns
-------
mutations : dict
The result from cBioPortal as a dict in the format
{cell_line : {gene : [mutation1, mutation2, ...] }}
Example:
{'LOXIMVI_SKIN': {'BRAF': ['V600E', 'I208V']},
'SKMEL30_SKIN': {'BRAF': ['D287H', 'E275K']}} | Below is the the instruction that describes the task:
### Input:
Return a dict of mutations in given genes and cell lines from CCLE.
This is a specialized call to get_mutations tailored to CCLE cell lines.
Parameters
----------
gene_list : list[str]
A list of HGNC gene symbols to get mutations in
cell_lines : list[str]
A list of CCLE cell line names to get mutations for.
mutation_type : Optional[str]
The type of mutation to filter to.
mutation_type can be one of: missense, nonsense, frame_shift_ins,
frame_shift_del, splice_site
Returns
-------
mutations : dict
The result from cBioPortal as a dict in the format
{cell_line : {gene : [mutation1, mutation2, ...] }}
Example:
{'LOXIMVI_SKIN': {'BRAF': ['V600E', 'I208V']},
'SKMEL30_SKIN': {'BRAF': ['D287H', 'E275K']}}
### Response:
def get_ccle_mutations(gene_list, cell_lines, mutation_type=None):
"""Return a dict of mutations in given genes and cell lines from CCLE.
This is a specialized call to get_mutations tailored to CCLE cell lines.
Parameters
----------
gene_list : list[str]
A list of HGNC gene symbols to get mutations in
cell_lines : list[str]
A list of CCLE cell line names to get mutations for.
mutation_type : Optional[str]
The type of mutation to filter to.
mutation_type can be one of: missense, nonsense, frame_shift_ins,
frame_shift_del, splice_site
Returns
-------
mutations : dict
The result from cBioPortal as a dict in the format
{cell_line : {gene : [mutation1, mutation2, ...] }}
Example:
{'LOXIMVI_SKIN': {'BRAF': ['V600E', 'I208V']},
'SKMEL30_SKIN': {'BRAF': ['D287H', 'E275K']}}
"""
mutations = {cl: {g: [] for g in gene_list} for cl in cell_lines}
for cell_line in cell_lines:
mutations_cl = get_mutations(ccle_study, gene_list,
mutation_type=mutation_type,
case_id=cell_line)
for gene, aa_change in zip(mutations_cl['gene_symbol'],
mutations_cl['amino_acid_change']):
aa_change = str(aa_change)
mutations[cell_line][gene].append(aa_change)
return mutations |
def create_attribute_model(self, initial_value=None):
# type: (Any) -> AttributeModel
"""Make an AttributeModel instance of the correct type for this Meta
Args:
initial_value: The initial value the Attribute should take
Returns:
AttributeModel: The created attribute model instance
"""
attr = self.attribute_class(meta=self, value=initial_value)
return attr | Make an AttributeModel instance of the correct type for this Meta
Args:
initial_value: The initial value the Attribute should take
Returns:
AttributeModel: The created attribute model instance | Below is the the instruction that describes the task:
### Input:
Make an AttributeModel instance of the correct type for this Meta
Args:
initial_value: The initial value the Attribute should take
Returns:
AttributeModel: The created attribute model instance
### Response:
def create_attribute_model(self, initial_value=None):
# type: (Any) -> AttributeModel
"""Make an AttributeModel instance of the correct type for this Meta
Args:
initial_value: The initial value the Attribute should take
Returns:
AttributeModel: The created attribute model instance
"""
attr = self.attribute_class(meta=self, value=initial_value)
return attr |
def hsl_to_rgb(h, s, l):
"""Convert a color in h, s, l to a color in r, g, b"""
h /= 360
s /= 100
l /= 100
m2 = l * (s + 1) if l <= .5 else l + s - l * s
m1 = 2 * l - m2
def h_to_rgb(h):
h = h % 1
if 6 * h < 1:
return m1 + 6 * h * (m2 - m1)
if 2 * h < 1:
return m2
if 3 * h < 2:
return m1 + 6 * (2 / 3 - h) * (m2 - m1)
return m1
r, g, b = map(
lambda x: round(x * 255), map(h_to_rgb, (h + 1 / 3, h, h - 1 / 3))
)
return r, g, b | Convert a color in h, s, l to a color in r, g, b | Below is the the instruction that describes the task:
### Input:
Convert a color in h, s, l to a color in r, g, b
### Response:
def hsl_to_rgb(h, s, l):
"""Convert a color in h, s, l to a color in r, g, b"""
h /= 360
s /= 100
l /= 100
m2 = l * (s + 1) if l <= .5 else l + s - l * s
m1 = 2 * l - m2
def h_to_rgb(h):
h = h % 1
if 6 * h < 1:
return m1 + 6 * h * (m2 - m1)
if 2 * h < 1:
return m2
if 3 * h < 2:
return m1 + 6 * (2 / 3 - h) * (m2 - m1)
return m1
r, g, b = map(
lambda x: round(x * 255), map(h_to_rgb, (h + 1 / 3, h, h - 1 / 3))
)
return r, g, b |
def _createReservoir(self, linkResult, replaceParamFile):
"""
Create GSSHAPY Reservoir Objects Method
"""
# Extract header variables from link result object
header = linkResult['header']
# Cases
if linkResult['type'] == 'LAKE':
# Lake handler
initWSE = vrp(header['initwse'], replaceParamFile)
minWSE = vrp(header['minwse'], replaceParamFile)
maxWSE = vrp(header['maxwse'], replaceParamFile)
numPts = header['numpts']
elif linkResult['type'] == 'RESERVOIR':
# Reservoir handler
initWSE = vrp(header['res_initwse'], replaceParamFile)
minWSE = vrp(header['res_minwse'], replaceParamFile)
maxWSE = vrp(header['res_maxwse'], replaceParamFile)
numPts = header['res_numpts']
# Initialize GSSHAPY Reservoir object
reservoir = Reservoir(initWSE=initWSE,
minWSE=minWSE,
maxWSE=maxWSE)
# Initialize GSSHAPY StreamLink object
link = StreamLink(linkNumber=int(header['link']),
type=linkResult['type'],
numElements=numPts)
# Associate StreamLink with ChannelInputFile
link.channelInputFile = self
# Associate Reservoir with StreamLink
reservoir.streamLink = link
# Create ReservoirPoint objects
for p in linkResult['points']:
# Initialize GSSHAPY ReservoirPoint object
resPoint = ReservoirPoint(i=p['i'],
j=p['j'])
# Associate ReservoirPoint with Reservoir
resPoint.reservoir = reservoir
return link | Create GSSHAPY Reservoir Objects Method | Below is the the instruction that describes the task:
### Input:
Create GSSHAPY Reservoir Objects Method
### Response:
def _createReservoir(self, linkResult, replaceParamFile):
"""
Create GSSHAPY Reservoir Objects Method
"""
# Extract header variables from link result object
header = linkResult['header']
# Cases
if linkResult['type'] == 'LAKE':
# Lake handler
initWSE = vrp(header['initwse'], replaceParamFile)
minWSE = vrp(header['minwse'], replaceParamFile)
maxWSE = vrp(header['maxwse'], replaceParamFile)
numPts = header['numpts']
elif linkResult['type'] == 'RESERVOIR':
# Reservoir handler
initWSE = vrp(header['res_initwse'], replaceParamFile)
minWSE = vrp(header['res_minwse'], replaceParamFile)
maxWSE = vrp(header['res_maxwse'], replaceParamFile)
numPts = header['res_numpts']
# Initialize GSSHAPY Reservoir object
reservoir = Reservoir(initWSE=initWSE,
minWSE=minWSE,
maxWSE=maxWSE)
# Initialize GSSHAPY StreamLink object
link = StreamLink(linkNumber=int(header['link']),
type=linkResult['type'],
numElements=numPts)
# Associate StreamLink with ChannelInputFile
link.channelInputFile = self
# Associate Reservoir with StreamLink
reservoir.streamLink = link
# Create ReservoirPoint objects
for p in linkResult['points']:
# Initialize GSSHAPY ReservoirPoint object
resPoint = ReservoirPoint(i=p['i'],
j=p['j'])
# Associate ReservoirPoint with Reservoir
resPoint.reservoir = reservoir
return link |
def plot_shape(gdf, fc='#cbe0f0', ec='#999999', linewidth=1, alpha=1,
figsize=(6,6), margin=0.02, axis_off=True):
"""
Plot a GeoDataFrame of place boundary geometries.
Parameters
----------
gdf : GeoDataFrame
the gdf containing the geometries to plot
fc : string or list
the facecolor (or list of facecolors) for the polygons
ec : string or list
the edgecolor (or list of edgecolors) for the polygons
linewidth : numeric
the width of the polygon edge lines
alpha : numeric
the opacity
figsize : tuple
the size of the plotting figure
margin : numeric
the size of the figure margins
axis_off : bool
if True, disable the matplotlib axes display
Returns
-------
fig, ax : tuple
"""
# if facecolor or edgecolor is a string instead of a list, make sure we have
# as many colors as gdf elements
if isinstance(fc, str):
fc = [fc] * len(gdf)
if isinstance(ec, str):
ec = [ec] * len(gdf)
# plot the geometries one at a time
fig, ax = plt.subplots(figsize=figsize)
for geometry, facecolor, edgecolor in zip(gdf['geometry'], fc, ec):
if isinstance(geometry, (Polygon, MultiPolygon)):
if isinstance(geometry, Polygon):
geometry = MultiPolygon([geometry])
for polygon in geometry:
patch = PolygonPatch(polygon, fc=facecolor, ec=edgecolor, linewidth=linewidth, alpha=alpha)
ax.add_patch(patch)
else:
raise ValueError('All geometries in GeoDataFrame must be shapely Polygons or MultiPolygons')
# adjust the axis margins and limits around the image and make axes
# equal-aspect
west, south, east, north = gdf.unary_union.bounds
margin_ns = (north - south) * margin
margin_ew = (east - west) * margin
ax.set_ylim((south - margin_ns, north + margin_ns))
ax.set_xlim((west - margin_ew, east + margin_ew))
ax.set_aspect(aspect='equal', adjustable='box')
if axis_off:
ax.axis('off')
plt.show()
return fig, ax | Plot a GeoDataFrame of place boundary geometries.
Parameters
----------
gdf : GeoDataFrame
the gdf containing the geometries to plot
fc : string or list
the facecolor (or list of facecolors) for the polygons
ec : string or list
the edgecolor (or list of edgecolors) for the polygons
linewidth : numeric
the width of the polygon edge lines
alpha : numeric
the opacity
figsize : tuple
the size of the plotting figure
margin : numeric
the size of the figure margins
axis_off : bool
if True, disable the matplotlib axes display
Returns
-------
fig, ax : tuple | Below is the the instruction that describes the task:
### Input:
Plot a GeoDataFrame of place boundary geometries.
Parameters
----------
gdf : GeoDataFrame
the gdf containing the geometries to plot
fc : string or list
the facecolor (or list of facecolors) for the polygons
ec : string or list
the edgecolor (or list of edgecolors) for the polygons
linewidth : numeric
the width of the polygon edge lines
alpha : numeric
the opacity
figsize : tuple
the size of the plotting figure
margin : numeric
the size of the figure margins
axis_off : bool
if True, disable the matplotlib axes display
Returns
-------
fig, ax : tuple
### Response:
def plot_shape(gdf, fc='#cbe0f0', ec='#999999', linewidth=1, alpha=1,
figsize=(6,6), margin=0.02, axis_off=True):
"""
Plot a GeoDataFrame of place boundary geometries.
Parameters
----------
gdf : GeoDataFrame
the gdf containing the geometries to plot
fc : string or list
the facecolor (or list of facecolors) for the polygons
ec : string or list
the edgecolor (or list of edgecolors) for the polygons
linewidth : numeric
the width of the polygon edge lines
alpha : numeric
the opacity
figsize : tuple
the size of the plotting figure
margin : numeric
the size of the figure margins
axis_off : bool
if True, disable the matplotlib axes display
Returns
-------
fig, ax : tuple
"""
# if facecolor or edgecolor is a string instead of a list, make sure we have
# as many colors as gdf elements
if isinstance(fc, str):
fc = [fc] * len(gdf)
if isinstance(ec, str):
ec = [ec] * len(gdf)
# plot the geometries one at a time
fig, ax = plt.subplots(figsize=figsize)
for geometry, facecolor, edgecolor in zip(gdf['geometry'], fc, ec):
if isinstance(geometry, (Polygon, MultiPolygon)):
if isinstance(geometry, Polygon):
geometry = MultiPolygon([geometry])
for polygon in geometry:
patch = PolygonPatch(polygon, fc=facecolor, ec=edgecolor, linewidth=linewidth, alpha=alpha)
ax.add_patch(patch)
else:
raise ValueError('All geometries in GeoDataFrame must be shapely Polygons or MultiPolygons')
# adjust the axis margins and limits around the image and make axes
# equal-aspect
west, south, east, north = gdf.unary_union.bounds
margin_ns = (north - south) * margin
margin_ew = (east - west) * margin
ax.set_ylim((south - margin_ns, north + margin_ns))
ax.set_xlim((west - margin_ew, east + margin_ew))
ax.set_aspect(aspect='equal', adjustable='box')
if axis_off:
ax.axis('off')
plt.show()
return fig, ax |
def create_v4_signature(self, request_params):
'''
Create URI and signature headers based on AWS V4 signing process.
Refer to https://docs.aws.amazon.com/AlexaWebInfoService/latest/ApiReferenceArticle.html for request params.
:param request_params: dictionary of request parameters
:return: URL and header to be passed to requests.get
'''
method = 'GET'
service = 'awis'
host = 'awis.us-west-1.amazonaws.com'
region = 'us-west-1'
endpoint = 'https://awis.amazonaws.com/api'
request_parameters = urlencode([(key, request_params[key]) for key in sorted(request_params.keys())])
# Key derivation functions. See:
# http://docs.aws.amazon.com/general/latest/gr/signature-v4-examples.html#signature-v4-examples-python
def sign(key, msg):
return hmac.new(key, msg.encode('utf-8'), hashlib.sha256).digest()
def getSignatureKey(key, dateStamp, regionName, serviceName):
kDate = sign(('AWS4' + key).encode('utf-8'), dateStamp)
kRegion = sign(kDate, regionName)
kService = sign(kRegion, serviceName)
kSigning = sign(kService, 'aws4_request')
return kSigning
# Create a date for headers and the credential string
t = datetime.datetime.utcnow()
amzdate = t.strftime('%Y%m%dT%H%M%SZ')
datestamp = t.strftime('%Y%m%d') # Date w/o time, used in credential scope
# Create canonical request
canonical_uri = '/api'
canonical_querystring = request_parameters
canonical_headers = 'host:' + host + '\n' + 'x-amz-date:' + amzdate + '\n'
signed_headers = 'host;x-amz-date'
payload_hash = hashlib.sha256(''.encode('utf8')).hexdigest()
canonical_request = method + '\n' + canonical_uri + '\n' + canonical_querystring + '\n' + canonical_headers + '\n' + signed_headers + '\n' + payload_hash
# Create string to sign
algorithm = 'AWS4-HMAC-SHA256'
credential_scope = datestamp + '/' + region + '/' + service + '/' + 'aws4_request'
string_to_sign = algorithm + '\n' + amzdate + '\n' + credential_scope + '\n' + hashlib.sha256(canonical_request.encode('utf8')).hexdigest()
# Calculate signature
signing_key = getSignatureKey(self.secret_access_key, datestamp, region, service)
# Sign the string_to_sign using the signing_key
signature = hmac.new(signing_key, (string_to_sign).encode('utf-8'), hashlib.sha256).hexdigest()
# Add signing information to the request
authorization_header = algorithm + ' ' + 'Credential=' + self.access_id + '/' + credential_scope + ', ' + 'SignedHeaders=' + signed_headers + ', ' + 'Signature=' + signature
headers = {'X-Amz-Date':amzdate, 'Authorization':authorization_header, 'Content-Type': 'application/xml', 'Accept': 'application/xml'}
# Create request url
request_url = endpoint + '?' + canonical_querystring
return request_url, headers | Create URI and signature headers based on AWS V4 signing process.
Refer to https://docs.aws.amazon.com/AlexaWebInfoService/latest/ApiReferenceArticle.html for request params.
:param request_params: dictionary of request parameters
:return: URL and header to be passed to requests.get | Below is the the instruction that describes the task:
### Input:
Create URI and signature headers based on AWS V4 signing process.
Refer to https://docs.aws.amazon.com/AlexaWebInfoService/latest/ApiReferenceArticle.html for request params.
:param request_params: dictionary of request parameters
:return: URL and header to be passed to requests.get
### Response:
def create_v4_signature(self, request_params):
'''
Create URI and signature headers based on AWS V4 signing process.
Refer to https://docs.aws.amazon.com/AlexaWebInfoService/latest/ApiReferenceArticle.html for request params.
:param request_params: dictionary of request parameters
:return: URL and header to be passed to requests.get
'''
method = 'GET'
service = 'awis'
host = 'awis.us-west-1.amazonaws.com'
region = 'us-west-1'
endpoint = 'https://awis.amazonaws.com/api'
request_parameters = urlencode([(key, request_params[key]) for key in sorted(request_params.keys())])
# Key derivation functions. See:
# http://docs.aws.amazon.com/general/latest/gr/signature-v4-examples.html#signature-v4-examples-python
def sign(key, msg):
return hmac.new(key, msg.encode('utf-8'), hashlib.sha256).digest()
def getSignatureKey(key, dateStamp, regionName, serviceName):
kDate = sign(('AWS4' + key).encode('utf-8'), dateStamp)
kRegion = sign(kDate, regionName)
kService = sign(kRegion, serviceName)
kSigning = sign(kService, 'aws4_request')
return kSigning
# Create a date for headers and the credential string
t = datetime.datetime.utcnow()
amzdate = t.strftime('%Y%m%dT%H%M%SZ')
datestamp = t.strftime('%Y%m%d') # Date w/o time, used in credential scope
# Create canonical request
canonical_uri = '/api'
canonical_querystring = request_parameters
canonical_headers = 'host:' + host + '\n' + 'x-amz-date:' + amzdate + '\n'
signed_headers = 'host;x-amz-date'
payload_hash = hashlib.sha256(''.encode('utf8')).hexdigest()
canonical_request = method + '\n' + canonical_uri + '\n' + canonical_querystring + '\n' + canonical_headers + '\n' + signed_headers + '\n' + payload_hash
# Create string to sign
algorithm = 'AWS4-HMAC-SHA256'
credential_scope = datestamp + '/' + region + '/' + service + '/' + 'aws4_request'
string_to_sign = algorithm + '\n' + amzdate + '\n' + credential_scope + '\n' + hashlib.sha256(canonical_request.encode('utf8')).hexdigest()
# Calculate signature
signing_key = getSignatureKey(self.secret_access_key, datestamp, region, service)
# Sign the string_to_sign using the signing_key
signature = hmac.new(signing_key, (string_to_sign).encode('utf-8'), hashlib.sha256).hexdigest()
# Add signing information to the request
authorization_header = algorithm + ' ' + 'Credential=' + self.access_id + '/' + credential_scope + ', ' + 'SignedHeaders=' + signed_headers + ', ' + 'Signature=' + signature
headers = {'X-Amz-Date':amzdate, 'Authorization':authorization_header, 'Content-Type': 'application/xml', 'Accept': 'application/xml'}
# Create request url
request_url = endpoint + '?' + canonical_querystring
return request_url, headers |
def _setupCache(self):
"""
Setup the cache based on the provided values for localCacheDir.
"""
# we first check whether the cache directory exists. If it doesn't, create it.
if not os.path.exists(self.localCacheDir):
# Create a temporary directory as this worker's private cache. If all goes well, it
# will be renamed into the cache for this node.
personalCacheDir = ''.join([os.path.dirname(self.localCacheDir), '/.ctmp-',
str(uuid.uuid4())])
os.mkdir(personalCacheDir, 0o755)
self._createCacheLockFile(personalCacheDir)
try:
os.rename(personalCacheDir, self.localCacheDir)
except OSError as err:
# The only acceptable FAIL case is that the destination is a non-empty directory
# directory. Assuming (it's ambiguous) atomic renaming of directories, if the
# dst is non-empty, it only means that another worker has beaten this one to the
# rename.
if err.errno == errno.ENOTEMPTY:
# Cleanup your own mess. It's only polite.
shutil.rmtree(personalCacheDir)
else:
raise
# You can't reach here unless a local cache directory has been created successfully
with self._CacheState.open(self) as cacheInfo:
# Ensure this cache is from the correct attempt at the workflow! If it isn't, we
# need to reset the cache lock file
if cacheInfo.attemptNumber != self.workflowAttemptNumber:
if cacheInfo.nlink == 2:
cacheInfo.cached = 0 # cached file sizes are accounted for by job store
else:
allCachedFiles = [os.path.join(self.localCacheDir, x)
for x in os.listdir(self.localCacheDir)
if not self._isHidden(x)]
cacheInfo.cached = sum([os.stat(cachedFile).st_size
for cachedFile in allCachedFiles])
# TODO: Delete the working directories
cacheInfo.sigmaJob = 0
cacheInfo.attemptNumber = self.workflowAttemptNumber
self.nlinkThreshold = cacheInfo.nlink | Setup the cache based on the provided values for localCacheDir. | Below is the the instruction that describes the task:
### Input:
Setup the cache based on the provided values for localCacheDir.
### Response:
def _setupCache(self):
"""
Setup the cache based on the provided values for localCacheDir.
"""
# we first check whether the cache directory exists. If it doesn't, create it.
if not os.path.exists(self.localCacheDir):
# Create a temporary directory as this worker's private cache. If all goes well, it
# will be renamed into the cache for this node.
personalCacheDir = ''.join([os.path.dirname(self.localCacheDir), '/.ctmp-',
str(uuid.uuid4())])
os.mkdir(personalCacheDir, 0o755)
self._createCacheLockFile(personalCacheDir)
try:
os.rename(personalCacheDir, self.localCacheDir)
except OSError as err:
# The only acceptable FAIL case is that the destination is a non-empty directory
# directory. Assuming (it's ambiguous) atomic renaming of directories, if the
# dst is non-empty, it only means that another worker has beaten this one to the
# rename.
if err.errno == errno.ENOTEMPTY:
# Cleanup your own mess. It's only polite.
shutil.rmtree(personalCacheDir)
else:
raise
# You can't reach here unless a local cache directory has been created successfully
with self._CacheState.open(self) as cacheInfo:
# Ensure this cache is from the correct attempt at the workflow! If it isn't, we
# need to reset the cache lock file
if cacheInfo.attemptNumber != self.workflowAttemptNumber:
if cacheInfo.nlink == 2:
cacheInfo.cached = 0 # cached file sizes are accounted for by job store
else:
allCachedFiles = [os.path.join(self.localCacheDir, x)
for x in os.listdir(self.localCacheDir)
if not self._isHidden(x)]
cacheInfo.cached = sum([os.stat(cachedFile).st_size
for cachedFile in allCachedFiles])
# TODO: Delete the working directories
cacheInfo.sigmaJob = 0
cacheInfo.attemptNumber = self.workflowAttemptNumber
self.nlinkThreshold = cacheInfo.nlink |
def strip_rst(docs):
'''
Strip/replace reStructuredText directives in docstrings
'''
for func, docstring in six.iteritems(docs):
log.debug('Stripping docstring for %s', func)
if not docstring:
continue
docstring_new = docstring if six.PY3 else salt.utils.data.encode(docstring)
for regex, repl in (
(r' *.. code-block:: \S+\n{1,2}', ''),
('.. note::', 'Note:'),
('.. warning::', 'Warning:'),
('.. versionadded::', 'New in version'),
('.. versionchanged::', 'Changed in version')):
if six.PY2:
regex = salt.utils.data.encode(regex)
repl = salt.utils.data.encode(repl)
try:
docstring_new = re.sub(regex, repl, docstring_new)
except Exception:
log.debug(
'Exception encountered while matching regex %r to '
'docstring for function %s', regex, func,
exc_info=True
)
if six.PY2:
docstring_new = salt.utils.data.decode(docstring_new)
if docstring != docstring_new:
docs[func] = docstring_new
return docs | Strip/replace reStructuredText directives in docstrings | Below is the the instruction that describes the task:
### Input:
Strip/replace reStructuredText directives in docstrings
### Response:
def strip_rst(docs):
'''
Strip/replace reStructuredText directives in docstrings
'''
for func, docstring in six.iteritems(docs):
log.debug('Stripping docstring for %s', func)
if not docstring:
continue
docstring_new = docstring if six.PY3 else salt.utils.data.encode(docstring)
for regex, repl in (
(r' *.. code-block:: \S+\n{1,2}', ''),
('.. note::', 'Note:'),
('.. warning::', 'Warning:'),
('.. versionadded::', 'New in version'),
('.. versionchanged::', 'Changed in version')):
if six.PY2:
regex = salt.utils.data.encode(regex)
repl = salt.utils.data.encode(repl)
try:
docstring_new = re.sub(regex, repl, docstring_new)
except Exception:
log.debug(
'Exception encountered while matching regex %r to '
'docstring for function %s', regex, func,
exc_info=True
)
if six.PY2:
docstring_new = salt.utils.data.decode(docstring_new)
if docstring != docstring_new:
docs[func] = docstring_new
return docs |
def render_tag(self, tag_func):
"""
Creates a tag using the decorated func as the render function
for the template tag node. The render function takes two
arguments - the template context and the tag token.
"""
@wraps(tag_func)
def tag_wrapper(parser, token):
class RenderTagNode(template.Node):
def render(self, context):
return tag_func(context, token)
return RenderTagNode()
return self.tag(tag_wrapper) | Creates a tag using the decorated func as the render function
for the template tag node. The render function takes two
arguments - the template context and the tag token. | Below is the the instruction that describes the task:
### Input:
Creates a tag using the decorated func as the render function
for the template tag node. The render function takes two
arguments - the template context and the tag token.
### Response:
def render_tag(self, tag_func):
"""
Creates a tag using the decorated func as the render function
for the template tag node. The render function takes two
arguments - the template context and the tag token.
"""
@wraps(tag_func)
def tag_wrapper(parser, token):
class RenderTagNode(template.Node):
def render(self, context):
return tag_func(context, token)
return RenderTagNode()
return self.tag(tag_wrapper) |
def PublicIPs(self):
"""Returns PublicIPs object associated with the server.
"""
if not self.public_ips: self.public_ips = clc.v2.PublicIPs(server=self,public_ips_lst=self.ip_addresses,session=self.session)
return(self.public_ips) | Returns PublicIPs object associated with the server. | Below is the the instruction that describes the task:
### Input:
Returns PublicIPs object associated with the server.
### Response:
def PublicIPs(self):
"""Returns PublicIPs object associated with the server.
"""
if not self.public_ips: self.public_ips = clc.v2.PublicIPs(server=self,public_ips_lst=self.ip_addresses,session=self.session)
return(self.public_ips) |
def _symbol_trades(self, symbols):
'''
Query last_trade in parallel for multiple symbols and
return in dict.
symbols: list[str]
return: dict[str -> polygon.Trade]
'''
@skip_http_error((404, 504))
def fetch(symbol):
return self._api.polygon.last_trade(symbol)
return parallelize(fetch)(symbols) | Query last_trade in parallel for multiple symbols and
return in dict.
symbols: list[str]
return: dict[str -> polygon.Trade] | Below is the the instruction that describes the task:
### Input:
Query last_trade in parallel for multiple symbols and
return in dict.
symbols: list[str]
return: dict[str -> polygon.Trade]
### Response:
def _symbol_trades(self, symbols):
'''
Query last_trade in parallel for multiple symbols and
return in dict.
symbols: list[str]
return: dict[str -> polygon.Trade]
'''
@skip_http_error((404, 504))
def fetch(symbol):
return self._api.polygon.last_trade(symbol)
return parallelize(fetch)(symbols) |
def calibrate(self, data, dsid):
"""Calibrate the data."""
tic = datetime.now()
data15hdr = self.header['15_DATA_HEADER']
calibration = dsid.calibration
channel = dsid.name
# even though all the channels may not be present in the file,
# the header does have calibration coefficients for all the channels
# hence, this channel index needs to refer to full channel list
i = list(CHANNEL_NAMES.values()).index(channel)
if calibration == 'counts':
return data
if calibration in ['radiance', 'reflectance', 'brightness_temperature']:
# determine the required calibration coefficients to use
# for the Level 1.5 Header
if (self.calib_mode.upper() != 'GSICS' and self.calib_mode.upper() != 'NOMINAL'):
raise NotImplementedError(
'Unknown Calibration mode : Please check')
# NB GSICS doesn't have calibration coeffs for VIS channels
if (self.calib_mode.upper() != 'GSICS' or channel in VIS_CHANNELS):
coeffs = data15hdr[
'RadiometricProcessing']['Level15ImageCalibration']
gain = coeffs['CalSlope'][i]
offset = coeffs['CalOffset'][i]
else:
coeffs = data15hdr[
'RadiometricProcessing']['MPEFCalFeedback']
gain = coeffs['GSICSCalCoeff'][i]
offset = coeffs['GSICSOffsetCount'][i]
offset = offset * gain
res = self._convert_to_radiance(data, gain, offset)
if calibration == 'reflectance':
solar_irradiance = CALIB[self.platform_id][channel]["F"]
res = self._vis_calibrate(res, solar_irradiance)
elif calibration == 'brightness_temperature':
cal_type = data15hdr['ImageDescription'][
'Level15ImageProduction']['PlannedChanProcessing'][i]
res = self._ir_calibrate(res, channel, cal_type)
logger.debug("Calibration time " + str(datetime.now() - tic))
return res | Calibrate the data. | Below is the the instruction that describes the task:
### Input:
Calibrate the data.
### Response:
def calibrate(self, data, dsid):
"""Calibrate the data."""
tic = datetime.now()
data15hdr = self.header['15_DATA_HEADER']
calibration = dsid.calibration
channel = dsid.name
# even though all the channels may not be present in the file,
# the header does have calibration coefficients for all the channels
# hence, this channel index needs to refer to full channel list
i = list(CHANNEL_NAMES.values()).index(channel)
if calibration == 'counts':
return data
if calibration in ['radiance', 'reflectance', 'brightness_temperature']:
# determine the required calibration coefficients to use
# for the Level 1.5 Header
if (self.calib_mode.upper() != 'GSICS' and self.calib_mode.upper() != 'NOMINAL'):
raise NotImplementedError(
'Unknown Calibration mode : Please check')
# NB GSICS doesn't have calibration coeffs for VIS channels
if (self.calib_mode.upper() != 'GSICS' or channel in VIS_CHANNELS):
coeffs = data15hdr[
'RadiometricProcessing']['Level15ImageCalibration']
gain = coeffs['CalSlope'][i]
offset = coeffs['CalOffset'][i]
else:
coeffs = data15hdr[
'RadiometricProcessing']['MPEFCalFeedback']
gain = coeffs['GSICSCalCoeff'][i]
offset = coeffs['GSICSOffsetCount'][i]
offset = offset * gain
res = self._convert_to_radiance(data, gain, offset)
if calibration == 'reflectance':
solar_irradiance = CALIB[self.platform_id][channel]["F"]
res = self._vis_calibrate(res, solar_irradiance)
elif calibration == 'brightness_temperature':
cal_type = data15hdr['ImageDescription'][
'Level15ImageProduction']['PlannedChanProcessing'][i]
res = self._ir_calibrate(res, channel, cal_type)
logger.debug("Calibration time " + str(datetime.now() - tic))
return res |
def show_vcs_output_vcs_nodes_vcs_node_info_node_hw_sync_state(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
show_vcs = ET.Element("show_vcs")
config = show_vcs
output = ET.SubElement(show_vcs, "output")
vcs_nodes = ET.SubElement(output, "vcs-nodes")
vcs_node_info = ET.SubElement(vcs_nodes, "vcs-node-info")
node_hw_sync_state = ET.SubElement(vcs_node_info, "node-hw-sync-state")
node_hw_sync_state.text = kwargs.pop('node_hw_sync_state')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code | Below is the the instruction that describes the task:
### Input:
Auto Generated Code
### Response:
def show_vcs_output_vcs_nodes_vcs_node_info_node_hw_sync_state(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
show_vcs = ET.Element("show_vcs")
config = show_vcs
output = ET.SubElement(show_vcs, "output")
vcs_nodes = ET.SubElement(output, "vcs-nodes")
vcs_node_info = ET.SubElement(vcs_nodes, "vcs-node-info")
node_hw_sync_state = ET.SubElement(vcs_node_info, "node-hw-sync-state")
node_hw_sync_state.text = kwargs.pop('node_hw_sync_state')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def getSNPSetsList() :
"""Return the names of all imported snp sets"""
import rabaDB.filters as rfilt
f = rfilt.RabaQuery(SNPMaster)
names = []
for g in f.iterRun() :
names.append(g.setName)
return names | Return the names of all imported snp sets | Below is the the instruction that describes the task:
### Input:
Return the names of all imported snp sets
### Response:
def getSNPSetsList() :
"""Return the names of all imported snp sets"""
import rabaDB.filters as rfilt
f = rfilt.RabaQuery(SNPMaster)
names = []
for g in f.iterRun() :
names.append(g.setName)
return names |
def send_post(self, mri, method_name, **params):
"""Abstract method to dispatch a Post to the server
Args:
mri (str): The mri of the Block
method_name (str): The name of the Method within the Block
params: The parameters to send
Returns:
The return results from the server
"""
typ, parameters = convert_to_type_tuple_value(serialize_object(params))
uri = NTURI(typ[2])
uri = uri.wrap(
path="%s.%s" % (mri, method_name),
kws=parameters,
scheme="pva"
)
value = self._ctxt.rpc(mri, uri, timeout=None)
return convert_value_to_dict(value) | Abstract method to dispatch a Post to the server
Args:
mri (str): The mri of the Block
method_name (str): The name of the Method within the Block
params: The parameters to send
Returns:
The return results from the server | Below is the the instruction that describes the task:
### Input:
Abstract method to dispatch a Post to the server
Args:
mri (str): The mri of the Block
method_name (str): The name of the Method within the Block
params: The parameters to send
Returns:
The return results from the server
### Response:
def send_post(self, mri, method_name, **params):
"""Abstract method to dispatch a Post to the server
Args:
mri (str): The mri of the Block
method_name (str): The name of the Method within the Block
params: The parameters to send
Returns:
The return results from the server
"""
typ, parameters = convert_to_type_tuple_value(serialize_object(params))
uri = NTURI(typ[2])
uri = uri.wrap(
path="%s.%s" % (mri, method_name),
kws=parameters,
scheme="pva"
)
value = self._ctxt.rpc(mri, uri, timeout=None)
return convert_value_to_dict(value) |
def _get_struct_glowfilter(self):
"""Get the values for the GLOWFILTER record."""
obj = _make_object("GlowFilter")
obj.GlowColor = self._get_struct_rgba()
obj.BlurX = unpack_fixed16(self._src)
obj.BlurY = unpack_fixed16(self._src)
obj.Strength = unpack_fixed8(self._src)
bc = BitConsumer(self._src)
obj.InnerGlow = bc.u_get(1)
obj.Knockout = bc.u_get(1)
obj.CompositeSource = bc.u_get(1)
obj.Passes = bc.u_get(5)
return obj | Get the values for the GLOWFILTER record. | Below is the the instruction that describes the task:
### Input:
Get the values for the GLOWFILTER record.
### Response:
def _get_struct_glowfilter(self):
"""Get the values for the GLOWFILTER record."""
obj = _make_object("GlowFilter")
obj.GlowColor = self._get_struct_rgba()
obj.BlurX = unpack_fixed16(self._src)
obj.BlurY = unpack_fixed16(self._src)
obj.Strength = unpack_fixed8(self._src)
bc = BitConsumer(self._src)
obj.InnerGlow = bc.u_get(1)
obj.Knockout = bc.u_get(1)
obj.CompositeSource = bc.u_get(1)
obj.Passes = bc.u_get(5)
return obj |
def load(file,encoding=None):
"""load(file,encoding=None) -> object
This function reads a tnetstring from a file and parses it into a
python object. The file must support the read() method, and this
function promises not to read more data than necessary.
"""
# Read the length prefix one char at a time.
# Note that the netstring spec explicitly forbids padding zeros.
c = file.read(1)
if not c.isdigit():
raise ValueError("not a tnetstring: missing or invalid length prefix")
datalen = ord(c) - ord("0")
c = file.read(1)
if datalen != 0:
while c.isdigit():
datalen = (10 * datalen) + (ord(c) - ord("0"))
if datalen > 999999999:
errmsg = "not a tnetstring: absurdly large length prefix"
raise ValueError(errmsg)
c = file.read(1)
if c != ":":
raise ValueError("not a tnetstring: missing or invalid length prefix")
# Now we can read and parse the payload.
# This repeats the dispatch logic of pop() so we can avoid
# re-constructing the outermost tnetstring.
data = file.read(datalen)
if len(data) != datalen:
raise ValueError("not a tnetstring: length prefix too big")
type = file.read(1)
if type == ",":
if encoding is not None:
return data.decode(encoding)
return data
if type == "#":
try:
return int(data)
except ValueError:
raise ValueError("not a tnetstring: invalid integer literal")
if type == "^":
try:
return float(data)
except ValueError:
raise ValueError("not a tnetstring: invalid float literal")
if type == "!":
if data == "true":
return True
elif data == "false":
return False
else:
raise ValueError("not a tnetstring: invalid boolean literal")
if type == "~":
if data:
raise ValueError("not a tnetstring: invalid null literal")
return None
if type == "]":
l = []
while data:
(item,data) = pop(data,encoding)
l.append(item)
return l
if type == "}":
d = {}
while data:
(key,data) = pop(data,encoding)
(val,data) = pop(data,encoding)
d[key] = val
return d
raise ValueError("unknown type tag") | load(file,encoding=None) -> object
This function reads a tnetstring from a file and parses it into a
python object. The file must support the read() method, and this
function promises not to read more data than necessary. | Below is the the instruction that describes the task:
### Input:
load(file,encoding=None) -> object
This function reads a tnetstring from a file and parses it into a
python object. The file must support the read() method, and this
function promises not to read more data than necessary.
### Response:
def load(file,encoding=None):
"""load(file,encoding=None) -> object
This function reads a tnetstring from a file and parses it into a
python object. The file must support the read() method, and this
function promises not to read more data than necessary.
"""
# Read the length prefix one char at a time.
# Note that the netstring spec explicitly forbids padding zeros.
c = file.read(1)
if not c.isdigit():
raise ValueError("not a tnetstring: missing or invalid length prefix")
datalen = ord(c) - ord("0")
c = file.read(1)
if datalen != 0:
while c.isdigit():
datalen = (10 * datalen) + (ord(c) - ord("0"))
if datalen > 999999999:
errmsg = "not a tnetstring: absurdly large length prefix"
raise ValueError(errmsg)
c = file.read(1)
if c != ":":
raise ValueError("not a tnetstring: missing or invalid length prefix")
# Now we can read and parse the payload.
# This repeats the dispatch logic of pop() so we can avoid
# re-constructing the outermost tnetstring.
data = file.read(datalen)
if len(data) != datalen:
raise ValueError("not a tnetstring: length prefix too big")
type = file.read(1)
if type == ",":
if encoding is not None:
return data.decode(encoding)
return data
if type == "#":
try:
return int(data)
except ValueError:
raise ValueError("not a tnetstring: invalid integer literal")
if type == "^":
try:
return float(data)
except ValueError:
raise ValueError("not a tnetstring: invalid float literal")
if type == "!":
if data == "true":
return True
elif data == "false":
return False
else:
raise ValueError("not a tnetstring: invalid boolean literal")
if type == "~":
if data:
raise ValueError("not a tnetstring: invalid null literal")
return None
if type == "]":
l = []
while data:
(item,data) = pop(data,encoding)
l.append(item)
return l
if type == "}":
d = {}
while data:
(key,data) = pop(data,encoding)
(val,data) = pop(data,encoding)
d[key] = val
return d
raise ValueError("unknown type tag") |
def parse_radl(data):
"""
Parse a RADL document in JSON.
Args.:
- data(str or list): document to parse.
Return(RADL): RADL object.
"""
if not isinstance(data, list):
if os.path.isfile(data):
f = open(data)
data = "".join(f.readlines())
f.close()
data = json.loads(data)
data = encode_simple(data)
res = RADL()
for aspect in [p_aspect(a) for a in data]:
res.add(aspect)
return res | Parse a RADL document in JSON.
Args.:
- data(str or list): document to parse.
Return(RADL): RADL object. | Below is the the instruction that describes the task:
### Input:
Parse a RADL document in JSON.
Args.:
- data(str or list): document to parse.
Return(RADL): RADL object.
### Response:
def parse_radl(data):
"""
Parse a RADL document in JSON.
Args.:
- data(str or list): document to parse.
Return(RADL): RADL object.
"""
if not isinstance(data, list):
if os.path.isfile(data):
f = open(data)
data = "".join(f.readlines())
f.close()
data = json.loads(data)
data = encode_simple(data)
res = RADL()
for aspect in [p_aspect(a) for a in data]:
res.add(aspect)
return res |
def make_tree(data, rng_state, leaf_size=30, angular=False):
"""Construct a random projection tree based on ``data`` with leaves
of size at most ``leaf_size``.
Parameters
----------
data: array of shape (n_samples, n_features)
The original data to be split
rng_state: array of int64, shape (3,)
The internal state of the rng
leaf_size: int (optional, default 30)
The maximum size of any leaf node in the tree. Any node in the tree
with more than ``leaf_size`` will be split further to create child
nodes.
angular: bool (optional, default False)
Whether to use cosine/angular distance to create splits in the tree,
or euclidean distance.
Returns
-------
node: RandomProjectionTreeNode
A random projection tree node which links to its child nodes. This
provides the full tree below the returned node.
"""
is_sparse = scipy.sparse.isspmatrix_csr(data)
indices = np.arange(data.shape[0])
# Make a tree recursively until we get below the leaf size
if is_sparse:
inds = data.indices
indptr = data.indptr
spdata = data.data
if angular:
return make_sparse_angular_tree(
inds, indptr, spdata, indices, rng_state, leaf_size
)
else:
return make_sparse_euclidean_tree(
inds, indptr, spdata, indices, rng_state, leaf_size
)
else:
if angular:
return make_angular_tree(data, indices, rng_state, leaf_size)
else:
return make_euclidean_tree(data, indices, rng_state, leaf_size) | Construct a random projection tree based on ``data`` with leaves
of size at most ``leaf_size``.
Parameters
----------
data: array of shape (n_samples, n_features)
The original data to be split
rng_state: array of int64, shape (3,)
The internal state of the rng
leaf_size: int (optional, default 30)
The maximum size of any leaf node in the tree. Any node in the tree
with more than ``leaf_size`` will be split further to create child
nodes.
angular: bool (optional, default False)
Whether to use cosine/angular distance to create splits in the tree,
or euclidean distance.
Returns
-------
node: RandomProjectionTreeNode
A random projection tree node which links to its child nodes. This
provides the full tree below the returned node. | Below is the the instruction that describes the task:
### Input:
Construct a random projection tree based on ``data`` with leaves
of size at most ``leaf_size``.
Parameters
----------
data: array of shape (n_samples, n_features)
The original data to be split
rng_state: array of int64, shape (3,)
The internal state of the rng
leaf_size: int (optional, default 30)
The maximum size of any leaf node in the tree. Any node in the tree
with more than ``leaf_size`` will be split further to create child
nodes.
angular: bool (optional, default False)
Whether to use cosine/angular distance to create splits in the tree,
or euclidean distance.
Returns
-------
node: RandomProjectionTreeNode
A random projection tree node which links to its child nodes. This
provides the full tree below the returned node.
### Response:
def make_tree(data, rng_state, leaf_size=30, angular=False):
"""Construct a random projection tree based on ``data`` with leaves
of size at most ``leaf_size``.
Parameters
----------
data: array of shape (n_samples, n_features)
The original data to be split
rng_state: array of int64, shape (3,)
The internal state of the rng
leaf_size: int (optional, default 30)
The maximum size of any leaf node in the tree. Any node in the tree
with more than ``leaf_size`` will be split further to create child
nodes.
angular: bool (optional, default False)
Whether to use cosine/angular distance to create splits in the tree,
or euclidean distance.
Returns
-------
node: RandomProjectionTreeNode
A random projection tree node which links to its child nodes. This
provides the full tree below the returned node.
"""
is_sparse = scipy.sparse.isspmatrix_csr(data)
indices = np.arange(data.shape[0])
# Make a tree recursively until we get below the leaf size
if is_sparse:
inds = data.indices
indptr = data.indptr
spdata = data.data
if angular:
return make_sparse_angular_tree(
inds, indptr, spdata, indices, rng_state, leaf_size
)
else:
return make_sparse_euclidean_tree(
inds, indptr, spdata, indices, rng_state, leaf_size
)
else:
if angular:
return make_angular_tree(data, indices, rng_state, leaf_size)
else:
return make_euclidean_tree(data, indices, rng_state, leaf_size) |
def start(self):
"""Start a thread to handle Vera blocked polling."""
self._poll_thread = threading.Thread(target=self._run_poll_server,
name='Vera Poll Thread')
self._poll_thread.deamon = True
self._poll_thread.start() | Start a thread to handle Vera blocked polling. | Below is the the instruction that describes the task:
### Input:
Start a thread to handle Vera blocked polling.
### Response:
def start(self):
"""Start a thread to handle Vera blocked polling."""
self._poll_thread = threading.Thread(target=self._run_poll_server,
name='Vera Poll Thread')
self._poll_thread.deamon = True
self._poll_thread.start() |
def http_post(url, data=None, opt=opt_default):
"""
Shortcut for urlopen (POST) + read. We'll probably want to add a
nice timeout here later too.
"""
return _http_request(url, method='POST', data=_marshalled(data), opt=opt) | Shortcut for urlopen (POST) + read. We'll probably want to add a
nice timeout here later too. | Below is the the instruction that describes the task:
### Input:
Shortcut for urlopen (POST) + read. We'll probably want to add a
nice timeout here later too.
### Response:
def http_post(url, data=None, opt=opt_default):
"""
Shortcut for urlopen (POST) + read. We'll probably want to add a
nice timeout here later too.
"""
return _http_request(url, method='POST', data=_marshalled(data), opt=opt) |
def unbind(self, ticket, device_id, user_id):
"""
解绑设备
详情请参考
https://iot.weixin.qq.com/wiki/new/index.html?page=3-4-7
:param ticket: 绑定操作合法性的凭证(由微信后台生成,第三方H5通过客户端jsapi获得)
:param device_id: 设备id
:param user_id: 用户对应的openid
:return: 返回的 JSON 数据包
"""
return self._post(
'unbind',
data={
'ticket': ticket,
'device_id': device_id,
'openid': user_id
}
) | 解绑设备
详情请参考
https://iot.weixin.qq.com/wiki/new/index.html?page=3-4-7
:param ticket: 绑定操作合法性的凭证(由微信后台生成,第三方H5通过客户端jsapi获得)
:param device_id: 设备id
:param user_id: 用户对应的openid
:return: 返回的 JSON 数据包 | Below is the the instruction that describes the task:
### Input:
解绑设备
详情请参考
https://iot.weixin.qq.com/wiki/new/index.html?page=3-4-7
:param ticket: 绑定操作合法性的凭证(由微信后台生成,第三方H5通过客户端jsapi获得)
:param device_id: 设备id
:param user_id: 用户对应的openid
:return: 返回的 JSON 数据包
### Response:
def unbind(self, ticket, device_id, user_id):
"""
解绑设备
详情请参考
https://iot.weixin.qq.com/wiki/new/index.html?page=3-4-7
:param ticket: 绑定操作合法性的凭证(由微信后台生成,第三方H5通过客户端jsapi获得)
:param device_id: 设备id
:param user_id: 用户对应的openid
:return: 返回的 JSON 数据包
"""
return self._post(
'unbind',
data={
'ticket': ticket,
'device_id': device_id,
'openid': user_id
}
) |
def replace(self, key, value):
"""
Replaces the entry for a key only if it is currently mapped to some value.
This is equivalent to:
>>> if map.contains_key(key):
>>> return map.put(key,value)
>>> else:
>>> return None
except that the action is performed atomically.
**Warning: This method uses __hash__ and __eq__ methods of binary form of the key, not the actual implementations
of __hash__ and __eq__ defined in key's class.**
**Warning 2:
This method returns a clone of the previous value, not the original (identically equal) value previously put
into the map.**
:param key: (object), the specified key.
:param value: (object), the value to replace the previous value.
:return: (object), previous value associated with key, or ``None`` if there was no mapping for key.
"""
check_not_none(key, "key can't be None")
check_not_none(value, "value can't be None")
key_data = self._to_data(key)
value_data = self._to_data(value)
return self._replace_internal(key_data, value_data) | Replaces the entry for a key only if it is currently mapped to some value.
This is equivalent to:
>>> if map.contains_key(key):
>>> return map.put(key,value)
>>> else:
>>> return None
except that the action is performed atomically.
**Warning: This method uses __hash__ and __eq__ methods of binary form of the key, not the actual implementations
of __hash__ and __eq__ defined in key's class.**
**Warning 2:
This method returns a clone of the previous value, not the original (identically equal) value previously put
into the map.**
:param key: (object), the specified key.
:param value: (object), the value to replace the previous value.
:return: (object), previous value associated with key, or ``None`` if there was no mapping for key. | Below is the the instruction that describes the task:
### Input:
Replaces the entry for a key only if it is currently mapped to some value.
This is equivalent to:
>>> if map.contains_key(key):
>>> return map.put(key,value)
>>> else:
>>> return None
except that the action is performed atomically.
**Warning: This method uses __hash__ and __eq__ methods of binary form of the key, not the actual implementations
of __hash__ and __eq__ defined in key's class.**
**Warning 2:
This method returns a clone of the previous value, not the original (identically equal) value previously put
into the map.**
:param key: (object), the specified key.
:param value: (object), the value to replace the previous value.
:return: (object), previous value associated with key, or ``None`` if there was no mapping for key.
### Response:
def replace(self, key, value):
"""
Replaces the entry for a key only if it is currently mapped to some value.
This is equivalent to:
>>> if map.contains_key(key):
>>> return map.put(key,value)
>>> else:
>>> return None
except that the action is performed atomically.
**Warning: This method uses __hash__ and __eq__ methods of binary form of the key, not the actual implementations
of __hash__ and __eq__ defined in key's class.**
**Warning 2:
This method returns a clone of the previous value, not the original (identically equal) value previously put
into the map.**
:param key: (object), the specified key.
:param value: (object), the value to replace the previous value.
:return: (object), previous value associated with key, or ``None`` if there was no mapping for key.
"""
check_not_none(key, "key can't be None")
check_not_none(value, "value can't be None")
key_data = self._to_data(key)
value_data = self._to_data(value)
return self._replace_internal(key_data, value_data) |
def read_py_url(url, errors='replace', skip_encoding_cookie=True):
"""Read a Python file from a URL, using the encoding declared inside the file.
Parameters
----------
url : str
The URL from which to fetch the file.
errors : str
How to handle decoding errors in the file. Options are the same as for
bytes.decode(), but here 'replace' is the default.
skip_encoding_cookie : bool
If True (the default), and the encoding declaration is found in the first
two lines, that line will be excluded from the output - compiling a
unicode string with an encoding declaration is a SyntaxError in Python 2.
Returns
-------
A unicode string containing the contents of the file.
"""
response = urllib.urlopen(url)
buffer = io.BytesIO(response.read())
encoding, lines = detect_encoding(buffer.readline)
buffer.seek(0)
text = TextIOWrapper(buffer, encoding, errors=errors, line_buffering=True)
text.mode = 'r'
if skip_encoding_cookie:
return "".join(strip_encoding_cookie(text))
else:
return text.read() | Read a Python file from a URL, using the encoding declared inside the file.
Parameters
----------
url : str
The URL from which to fetch the file.
errors : str
How to handle decoding errors in the file. Options are the same as for
bytes.decode(), but here 'replace' is the default.
skip_encoding_cookie : bool
If True (the default), and the encoding declaration is found in the first
two lines, that line will be excluded from the output - compiling a
unicode string with an encoding declaration is a SyntaxError in Python 2.
Returns
-------
A unicode string containing the contents of the file. | Below is the the instruction that describes the task:
### Input:
Read a Python file from a URL, using the encoding declared inside the file.
Parameters
----------
url : str
The URL from which to fetch the file.
errors : str
How to handle decoding errors in the file. Options are the same as for
bytes.decode(), but here 'replace' is the default.
skip_encoding_cookie : bool
If True (the default), and the encoding declaration is found in the first
two lines, that line will be excluded from the output - compiling a
unicode string with an encoding declaration is a SyntaxError in Python 2.
Returns
-------
A unicode string containing the contents of the file.
### Response:
def read_py_url(url, errors='replace', skip_encoding_cookie=True):
"""Read a Python file from a URL, using the encoding declared inside the file.
Parameters
----------
url : str
The URL from which to fetch the file.
errors : str
How to handle decoding errors in the file. Options are the same as for
bytes.decode(), but here 'replace' is the default.
skip_encoding_cookie : bool
If True (the default), and the encoding declaration is found in the first
two lines, that line will be excluded from the output - compiling a
unicode string with an encoding declaration is a SyntaxError in Python 2.
Returns
-------
A unicode string containing the contents of the file.
"""
response = urllib.urlopen(url)
buffer = io.BytesIO(response.read())
encoding, lines = detect_encoding(buffer.readline)
buffer.seek(0)
text = TextIOWrapper(buffer, encoding, errors=errors, line_buffering=True)
text.mode = 'r'
if skip_encoding_cookie:
return "".join(strip_encoding_cookie(text))
else:
return text.read() |
def _ctypes_ex_assign(executable):
"""Return a list of code lines to allocate and assign the local parameter definitions
that match those in the signature of the wrapped executable.
"""
result = []
for p in executable.ordered_parameters:
_ctypes_code_parameter(result, p, "assign")
if type(executable).__name__ == "Function":
_ctypes_code_parameter(result, executable, "assign")
return result | Return a list of code lines to allocate and assign the local parameter definitions
that match those in the signature of the wrapped executable. | Below is the the instruction that describes the task:
### Input:
Return a list of code lines to allocate and assign the local parameter definitions
that match those in the signature of the wrapped executable.
### Response:
def _ctypes_ex_assign(executable):
"""Return a list of code lines to allocate and assign the local parameter definitions
that match those in the signature of the wrapped executable.
"""
result = []
for p in executable.ordered_parameters:
_ctypes_code_parameter(result, p, "assign")
if type(executable).__name__ == "Function":
_ctypes_code_parameter(result, executable, "assign")
return result |
def open(self):
"""Open the connection with the device."""
try:
self.device.open()
except ConnectTimeoutError as cte:
raise ConnectionException(cte.msg)
self.device.timeout = self.timeout
self.device._conn._session.transport.set_keepalive(self.keepalive)
if hasattr(self.device, "cu"):
# make sure to remove the cu attr from previous session
# ValueError: requested attribute name cu already exists
del self.device.cu
self.device.bind(cu=Config)
if not self.lock_disable and self.session_config_lock:
self._lock() | Open the connection with the device. | Below is the the instruction that describes the task:
### Input:
Open the connection with the device.
### Response:
def open(self):
"""Open the connection with the device."""
try:
self.device.open()
except ConnectTimeoutError as cte:
raise ConnectionException(cte.msg)
self.device.timeout = self.timeout
self.device._conn._session.transport.set_keepalive(self.keepalive)
if hasattr(self.device, "cu"):
# make sure to remove the cu attr from previous session
# ValueError: requested attribute name cu already exists
del self.device.cu
self.device.bind(cu=Config)
if not self.lock_disable and self.session_config_lock:
self._lock() |
def _bnot32(ins):
""" Negates top (Bitwise NOT) of the stack (32 bits in DEHL)
"""
output = _32bit_oper(ins.quad[2])
output.append('call __BNOT32')
output.append('push de')
output.append('push hl')
REQUIRES.add('bnot32.asm')
return output | Negates top (Bitwise NOT) of the stack (32 bits in DEHL) | Below is the the instruction that describes the task:
### Input:
Negates top (Bitwise NOT) of the stack (32 bits in DEHL)
### Response:
def _bnot32(ins):
""" Negates top (Bitwise NOT) of the stack (32 bits in DEHL)
"""
output = _32bit_oper(ins.quad[2])
output.append('call __BNOT32')
output.append('push de')
output.append('push hl')
REQUIRES.add('bnot32.asm')
return output |
def _zscore(a):
""" Calculating z-score of data on the first axis.
If the numbers in any column are all equal, scipy.stats.zscore
will return NaN for this column. We shall correct them all to
be zeros.
Parameters
----------
a: numpy array
Returns
-------
zscore: numpy array
The z-scores of input "a", with any columns including non-finite
numbers replaced by all zeros.
"""
assert a.ndim > 1, 'a must have more than one dimensions'
zscore = scipy.stats.zscore(a, axis=0)
zscore[:, np.logical_not(np.all(np.isfinite(zscore), axis=0))] = 0
return zscore | Calculating z-score of data on the first axis.
If the numbers in any column are all equal, scipy.stats.zscore
will return NaN for this column. We shall correct them all to
be zeros.
Parameters
----------
a: numpy array
Returns
-------
zscore: numpy array
The z-scores of input "a", with any columns including non-finite
numbers replaced by all zeros. | Below is the the instruction that describes the task:
### Input:
Calculating z-score of data on the first axis.
If the numbers in any column are all equal, scipy.stats.zscore
will return NaN for this column. We shall correct them all to
be zeros.
Parameters
----------
a: numpy array
Returns
-------
zscore: numpy array
The z-scores of input "a", with any columns including non-finite
numbers replaced by all zeros.
### Response:
def _zscore(a):
""" Calculating z-score of data on the first axis.
If the numbers in any column are all equal, scipy.stats.zscore
will return NaN for this column. We shall correct them all to
be zeros.
Parameters
----------
a: numpy array
Returns
-------
zscore: numpy array
The z-scores of input "a", with any columns including non-finite
numbers replaced by all zeros.
"""
assert a.ndim > 1, 'a must have more than one dimensions'
zscore = scipy.stats.zscore(a, axis=0)
zscore[:, np.logical_not(np.all(np.isfinite(zscore), axis=0))] = 0
return zscore |
def to(self, to_emails, global_substitutions=None, is_multiple=False, p=0):
"""Adds To objects to the Personalization object
:param to_emails: An To or list of To objects
:type to_emails: To, list(To), str, tuple
:param global_substitutions: A dict of substitutions for all recipients
:type global_substitutions: dict
:param is_multiple: Create a new personilization for each recipient
:type is_multiple: bool
:param p: p is the Personalization object or Personalization object
index
:type p: Personalization, integer, optional
"""
if isinstance(to_emails, list):
for email in to_emails:
if isinstance(email, str):
email = To(email, None)
if isinstance(email, tuple):
email = To(email[0], email[1])
self.add_to(email, global_substitutions, is_multiple, p)
else:
if isinstance(to_emails, str):
to_emails = To(to_emails, None)
if isinstance(to_emails, tuple):
to_emails = To(to_emails[0], to_emails[1])
self.add_to(to_emails, global_substitutions, is_multiple, p) | Adds To objects to the Personalization object
:param to_emails: An To or list of To objects
:type to_emails: To, list(To), str, tuple
:param global_substitutions: A dict of substitutions for all recipients
:type global_substitutions: dict
:param is_multiple: Create a new personilization for each recipient
:type is_multiple: bool
:param p: p is the Personalization object or Personalization object
index
:type p: Personalization, integer, optional | Below is the the instruction that describes the task:
### Input:
Adds To objects to the Personalization object
:param to_emails: An To or list of To objects
:type to_emails: To, list(To), str, tuple
:param global_substitutions: A dict of substitutions for all recipients
:type global_substitutions: dict
:param is_multiple: Create a new personilization for each recipient
:type is_multiple: bool
:param p: p is the Personalization object or Personalization object
index
:type p: Personalization, integer, optional
### Response:
def to(self, to_emails, global_substitutions=None, is_multiple=False, p=0):
"""Adds To objects to the Personalization object
:param to_emails: An To or list of To objects
:type to_emails: To, list(To), str, tuple
:param global_substitutions: A dict of substitutions for all recipients
:type global_substitutions: dict
:param is_multiple: Create a new personilization for each recipient
:type is_multiple: bool
:param p: p is the Personalization object or Personalization object
index
:type p: Personalization, integer, optional
"""
if isinstance(to_emails, list):
for email in to_emails:
if isinstance(email, str):
email = To(email, None)
if isinstance(email, tuple):
email = To(email[0], email[1])
self.add_to(email, global_substitutions, is_multiple, p)
else:
if isinstance(to_emails, str):
to_emails = To(to_emails, None)
if isinstance(to_emails, tuple):
to_emails = To(to_emails[0], to_emails[1])
self.add_to(to_emails, global_substitutions, is_multiple, p) |
def login(self, account=None, app_account=None, flush=True):
"""
Log into the connected host using the best method available.
If an account is not given, default to the account that was
used during the last call to login(). If a previous call was not
made, use the account that was passed to the constructor. If that
also fails, raise a TypeError.
The app_account is passed to :class:`app_authenticate()` and
:class:`app_authorize()`.
If app_account is not given, default to the value of the account
argument.
:type account: Account
:param account: The account for protocol level authentication.
:type app_account: Account
:param app_account: The account for app level authentication.
:type flush: bool
:param flush: Whether to flush the last prompt from the buffer.
"""
with self._get_account(account) as account:
if app_account is None:
app_account = account
self.authenticate(account, flush=False)
if self.get_driver().supports_auto_authorize():
self.expect_prompt()
self.auto_app_authorize(app_account, flush=flush) | Log into the connected host using the best method available.
If an account is not given, default to the account that was
used during the last call to login(). If a previous call was not
made, use the account that was passed to the constructor. If that
also fails, raise a TypeError.
The app_account is passed to :class:`app_authenticate()` and
:class:`app_authorize()`.
If app_account is not given, default to the value of the account
argument.
:type account: Account
:param account: The account for protocol level authentication.
:type app_account: Account
:param app_account: The account for app level authentication.
:type flush: bool
:param flush: Whether to flush the last prompt from the buffer. | Below is the the instruction that describes the task:
### Input:
Log into the connected host using the best method available.
If an account is not given, default to the account that was
used during the last call to login(). If a previous call was not
made, use the account that was passed to the constructor. If that
also fails, raise a TypeError.
The app_account is passed to :class:`app_authenticate()` and
:class:`app_authorize()`.
If app_account is not given, default to the value of the account
argument.
:type account: Account
:param account: The account for protocol level authentication.
:type app_account: Account
:param app_account: The account for app level authentication.
:type flush: bool
:param flush: Whether to flush the last prompt from the buffer.
### Response:
def login(self, account=None, app_account=None, flush=True):
"""
Log into the connected host using the best method available.
If an account is not given, default to the account that was
used during the last call to login(). If a previous call was not
made, use the account that was passed to the constructor. If that
also fails, raise a TypeError.
The app_account is passed to :class:`app_authenticate()` and
:class:`app_authorize()`.
If app_account is not given, default to the value of the account
argument.
:type account: Account
:param account: The account for protocol level authentication.
:type app_account: Account
:param app_account: The account for app level authentication.
:type flush: bool
:param flush: Whether to flush the last prompt from the buffer.
"""
with self._get_account(account) as account:
if app_account is None:
app_account = account
self.authenticate(account, flush=False)
if self.get_driver().supports_auto_authorize():
self.expect_prompt()
self.auto_app_authorize(app_account, flush=flush) |
def send(self, timeout=None):
"""Returns an event or None if no events occur before timeout."""
if self.sigint_event and is_main_thread():
with ReplacedSigIntHandler(self.sigint_handler):
return self._send(timeout)
else:
return self._send(timeout) | Returns an event or None if no events occur before timeout. | Below is the the instruction that describes the task:
### Input:
Returns an event or None if no events occur before timeout.
### Response:
def send(self, timeout=None):
"""Returns an event or None if no events occur before timeout."""
if self.sigint_event and is_main_thread():
with ReplacedSigIntHandler(self.sigint_handler):
return self._send(timeout)
else:
return self._send(timeout) |
def tmpdir(prefix='npythy_tempdir_', delete=True):
'''
tmpdir() creates a temporary directory and yields its path. At python exit, the directory and
all of its contents are recursively deleted (so long as the the normal python exit process is
allowed to call the atexit handlers).
tmpdir(prefix) uses the given prefix in the tempfile.mkdtemp() call.
The option delete may be set to False to specify that the tempdir should not be deleted on exit.
'''
path = tempfile.mkdtemp(prefix=prefix)
if not os.path.isdir(path): raise ValueError('Could not find or create temp directory')
if delete: atexit.register(shutil.rmtree, path)
return path | tmpdir() creates a temporary directory and yields its path. At python exit, the directory and
all of its contents are recursively deleted (so long as the the normal python exit process is
allowed to call the atexit handlers).
tmpdir(prefix) uses the given prefix in the tempfile.mkdtemp() call.
The option delete may be set to False to specify that the tempdir should not be deleted on exit. | Below is the the instruction that describes the task:
### Input:
tmpdir() creates a temporary directory and yields its path. At python exit, the directory and
all of its contents are recursively deleted (so long as the the normal python exit process is
allowed to call the atexit handlers).
tmpdir(prefix) uses the given prefix in the tempfile.mkdtemp() call.
The option delete may be set to False to specify that the tempdir should not be deleted on exit.
### Response:
def tmpdir(prefix='npythy_tempdir_', delete=True):
'''
tmpdir() creates a temporary directory and yields its path. At python exit, the directory and
all of its contents are recursively deleted (so long as the the normal python exit process is
allowed to call the atexit handlers).
tmpdir(prefix) uses the given prefix in the tempfile.mkdtemp() call.
The option delete may be set to False to specify that the tempdir should not be deleted on exit.
'''
path = tempfile.mkdtemp(prefix=prefix)
if not os.path.isdir(path): raise ValueError('Could not find or create temp directory')
if delete: atexit.register(shutil.rmtree, path)
return path |
def pinch(self, direction='in', percent=0.6, duration=2.0, dead_zone=0.1):
"""
Squeezing or expanding 2 fingers on this UI with given motion range and duration.
Args:
direction (:py:obj:`str`): pinching direction, only "in" or "out". "in" for squeezing, "out" for expanding
percent (:py:obj:`float`): squeezing range from or expanding range to of the bounds of the UI
duration (:py:obj:`float`): time interval in which the action is performed
dead_zone (:py:obj:`float`): pinching inner circle radius. should not be greater than ``percent``
Raises:
PocoNoSuchNodeException: raised when the UI element does not exist
"""
if direction not in ('in', 'out'):
raise ValueError('Argument `direction` should be one of "in" or "out". Got {}'.format(repr(direction)))
if dead_zone >= percent:
raise ValueError('Argument `dead_zone` should not be greater than `percent`. dead_zoon={}, percent={}'
.format(repr(dead_zone), repr(percent)))
w, h = self.get_size()
x, y = self.get_position()
# focus = self._focus or [0.5, 0.5]
tracks = make_pinching(direction, [x, y], [w, h], percent, dead_zone, duration)
speed = math.sqrt(w * h) * (percent - dead_zone) / 2 / duration
# 速度慢的时候,精度适当要提高,这样有助于控制准确
ret = self.poco.apply_motion_tracks(tracks, accuracy=speed * 0.03)
return ret | Squeezing or expanding 2 fingers on this UI with given motion range and duration.
Args:
direction (:py:obj:`str`): pinching direction, only "in" or "out". "in" for squeezing, "out" for expanding
percent (:py:obj:`float`): squeezing range from or expanding range to of the bounds of the UI
duration (:py:obj:`float`): time interval in which the action is performed
dead_zone (:py:obj:`float`): pinching inner circle radius. should not be greater than ``percent``
Raises:
PocoNoSuchNodeException: raised when the UI element does not exist | Below is the the instruction that describes the task:
### Input:
Squeezing or expanding 2 fingers on this UI with given motion range and duration.
Args:
direction (:py:obj:`str`): pinching direction, only "in" or "out". "in" for squeezing, "out" for expanding
percent (:py:obj:`float`): squeezing range from or expanding range to of the bounds of the UI
duration (:py:obj:`float`): time interval in which the action is performed
dead_zone (:py:obj:`float`): pinching inner circle radius. should not be greater than ``percent``
Raises:
PocoNoSuchNodeException: raised when the UI element does not exist
### Response:
def pinch(self, direction='in', percent=0.6, duration=2.0, dead_zone=0.1):
"""
Squeezing or expanding 2 fingers on this UI with given motion range and duration.
Args:
direction (:py:obj:`str`): pinching direction, only "in" or "out". "in" for squeezing, "out" for expanding
percent (:py:obj:`float`): squeezing range from or expanding range to of the bounds of the UI
duration (:py:obj:`float`): time interval in which the action is performed
dead_zone (:py:obj:`float`): pinching inner circle radius. should not be greater than ``percent``
Raises:
PocoNoSuchNodeException: raised when the UI element does not exist
"""
if direction not in ('in', 'out'):
raise ValueError('Argument `direction` should be one of "in" or "out". Got {}'.format(repr(direction)))
if dead_zone >= percent:
raise ValueError('Argument `dead_zone` should not be greater than `percent`. dead_zoon={}, percent={}'
.format(repr(dead_zone), repr(percent)))
w, h = self.get_size()
x, y = self.get_position()
# focus = self._focus or [0.5, 0.5]
tracks = make_pinching(direction, [x, y], [w, h], percent, dead_zone, duration)
speed = math.sqrt(w * h) * (percent - dead_zone) / 2 / duration
# 速度慢的时候,精度适当要提高,这样有助于控制准确
ret = self.poco.apply_motion_tracks(tracks, accuracy=speed * 0.03)
return ret |
def pack_lob_data(remaining_size, payload, row_header_start_pos, row_lobs):
"""
After parameter row has been written, append the lobs and update the corresponding lob headers
with lob position and lob size:
:param payload: payload object (io.BytesIO instance)
:param row_header_start_pos: absolute position of start position of row within payload
:param row_lobs: list of row buffer instance (containing binary encoded lob data, header position and DataType)
"""
unwritten_lobs = collections.deque()
for lob_buffer in row_lobs:
# Calculate relative position of lob within the binary packed parameter row.
# Add +1, Hana counts from 1, not 0!
rel_lob_pos = payload.tell() - row_header_start_pos + 1
# Calculate how much space is left in message for lob data:
max_data_to_write = min(lob_buffer.encoded_lob_size, remaining_size - payload.tell())
payload.write(lob_buffer.encoded_data.read(max_data_to_write))
is_last_data = max_data_to_write == lob_buffer.encoded_lob_size
if not is_last_data:
# lob has not been written (partially or entirely) into message -> register for further write requests
unwritten_lobs.append(lob_buffer)
# Write position and size of lob data into lob header block:
payload.seek(lob_buffer.lob_header_pos)
payload.write(lob_buffer.DataType.prepare(None, length=max_data_to_write,
position=rel_lob_pos, is_last_data=is_last_data))
# Set pointer back to end for further writing
payload.seek(0, io.SEEK_END)
return unwritten_lobs | After parameter row has been written, append the lobs and update the corresponding lob headers
with lob position and lob size:
:param payload: payload object (io.BytesIO instance)
:param row_header_start_pos: absolute position of start position of row within payload
:param row_lobs: list of row buffer instance (containing binary encoded lob data, header position and DataType) | Below is the the instruction that describes the task:
### Input:
After parameter row has been written, append the lobs and update the corresponding lob headers
with lob position and lob size:
:param payload: payload object (io.BytesIO instance)
:param row_header_start_pos: absolute position of start position of row within payload
:param row_lobs: list of row buffer instance (containing binary encoded lob data, header position and DataType)
### Response:
def pack_lob_data(remaining_size, payload, row_header_start_pos, row_lobs):
"""
After parameter row has been written, append the lobs and update the corresponding lob headers
with lob position and lob size:
:param payload: payload object (io.BytesIO instance)
:param row_header_start_pos: absolute position of start position of row within payload
:param row_lobs: list of row buffer instance (containing binary encoded lob data, header position and DataType)
"""
unwritten_lobs = collections.deque()
for lob_buffer in row_lobs:
# Calculate relative position of lob within the binary packed parameter row.
# Add +1, Hana counts from 1, not 0!
rel_lob_pos = payload.tell() - row_header_start_pos + 1
# Calculate how much space is left in message for lob data:
max_data_to_write = min(lob_buffer.encoded_lob_size, remaining_size - payload.tell())
payload.write(lob_buffer.encoded_data.read(max_data_to_write))
is_last_data = max_data_to_write == lob_buffer.encoded_lob_size
if not is_last_data:
# lob has not been written (partially or entirely) into message -> register for further write requests
unwritten_lobs.append(lob_buffer)
# Write position and size of lob data into lob header block:
payload.seek(lob_buffer.lob_header_pos)
payload.write(lob_buffer.DataType.prepare(None, length=max_data_to_write,
position=rel_lob_pos, is_last_data=is_last_data))
# Set pointer back to end for further writing
payload.seek(0, io.SEEK_END)
return unwritten_lobs |
def map(self, f_list: List[Callable[[np.ndarray], int]], axis: int = 0, chunksize: int = 1000, selection: np.ndarray = None) -> List[np.ndarray]:
"""
Apply a function along an axis without loading the entire dataset in memory.
Args:
f_list (list of func): Function(s) that takes a numpy ndarray as argument
axis (int): Axis along which to apply the function (0 = rows, 1 = columns)
chunksize (int): Number of rows (columns) to load per chunk
selection (array of bool): Columns (rows) to include
Returns:
numpy.ndarray result of function application
If you supply a list of functions, the result will be a list of numpy arrays. This is more
efficient than repeatedly calling map() one function at a time.
"""
if hasattr(f_list, '__call__'):
raise ValueError("f_list must be a list of functions, not a function itself")
result = []
if axis == 0:
rows_per_chunk = chunksize
for i in range(len(f_list)):
result.append(np.zeros(self.shape[0]))
ix = 0
while ix < self.shape[0]:
rows_per_chunk = min(self.shape[0] - ix, rows_per_chunk)
if selection is not None:
chunk = self[ix:ix + rows_per_chunk, :][:, selection]
else:
chunk = self[ix:ix + rows_per_chunk, :]
for i in range(len(f_list)):
result[i][ix:ix + rows_per_chunk] = np.apply_along_axis(f_list[i], 1, chunk)
ix = ix + rows_per_chunk
elif axis == 1:
cols_per_chunk = chunksize
for i in range(len(f_list)):
result.append(np.zeros(self.shape[1]))
ix = 0
while ix < self.shape[1]:
cols_per_chunk = min(self.shape[1] - ix, cols_per_chunk)
if selection is not None:
chunk = self[:, ix:ix + cols_per_chunk][selection, :]
else:
chunk = self[:, ix:ix + cols_per_chunk]
for i in range(len(f_list)):
result[i][ix:ix + cols_per_chunk] = np.apply_along_axis(f_list[i], 0, chunk)
ix = ix + cols_per_chunk
return result | Apply a function along an axis without loading the entire dataset in memory.
Args:
f_list (list of func): Function(s) that takes a numpy ndarray as argument
axis (int): Axis along which to apply the function (0 = rows, 1 = columns)
chunksize (int): Number of rows (columns) to load per chunk
selection (array of bool): Columns (rows) to include
Returns:
numpy.ndarray result of function application
If you supply a list of functions, the result will be a list of numpy arrays. This is more
efficient than repeatedly calling map() one function at a time. | Below is the the instruction that describes the task:
### Input:
Apply a function along an axis without loading the entire dataset in memory.
Args:
f_list (list of func): Function(s) that takes a numpy ndarray as argument
axis (int): Axis along which to apply the function (0 = rows, 1 = columns)
chunksize (int): Number of rows (columns) to load per chunk
selection (array of bool): Columns (rows) to include
Returns:
numpy.ndarray result of function application
If you supply a list of functions, the result will be a list of numpy arrays. This is more
efficient than repeatedly calling map() one function at a time.
### Response:
def map(self, f_list: List[Callable[[np.ndarray], int]], axis: int = 0, chunksize: int = 1000, selection: np.ndarray = None) -> List[np.ndarray]:
"""
Apply a function along an axis without loading the entire dataset in memory.
Args:
f_list (list of func): Function(s) that takes a numpy ndarray as argument
axis (int): Axis along which to apply the function (0 = rows, 1 = columns)
chunksize (int): Number of rows (columns) to load per chunk
selection (array of bool): Columns (rows) to include
Returns:
numpy.ndarray result of function application
If you supply a list of functions, the result will be a list of numpy arrays. This is more
efficient than repeatedly calling map() one function at a time.
"""
if hasattr(f_list, '__call__'):
raise ValueError("f_list must be a list of functions, not a function itself")
result = []
if axis == 0:
rows_per_chunk = chunksize
for i in range(len(f_list)):
result.append(np.zeros(self.shape[0]))
ix = 0
while ix < self.shape[0]:
rows_per_chunk = min(self.shape[0] - ix, rows_per_chunk)
if selection is not None:
chunk = self[ix:ix + rows_per_chunk, :][:, selection]
else:
chunk = self[ix:ix + rows_per_chunk, :]
for i in range(len(f_list)):
result[i][ix:ix + rows_per_chunk] = np.apply_along_axis(f_list[i], 1, chunk)
ix = ix + rows_per_chunk
elif axis == 1:
cols_per_chunk = chunksize
for i in range(len(f_list)):
result.append(np.zeros(self.shape[1]))
ix = 0
while ix < self.shape[1]:
cols_per_chunk = min(self.shape[1] - ix, cols_per_chunk)
if selection is not None:
chunk = self[:, ix:ix + cols_per_chunk][selection, :]
else:
chunk = self[:, ix:ix + cols_per_chunk]
for i in range(len(f_list)):
result[i][ix:ix + cols_per_chunk] = np.apply_along_axis(f_list[i], 0, chunk)
ix = ix + cols_per_chunk
return result |
def get_app_region_products(self, app_uri):
"""获得指定应用所在区域的产品信息
Args:
- app_uri: 应用的完整标识
Returns:
返回产品信息列表,若失败则返回None
"""
apps, retInfo = self.list_apps()
if apps is None:
return None
for app in apps:
if (app.get('uri') == app_uri):
return self.get_region_products(app.get('region'))
return | 获得指定应用所在区域的产品信息
Args:
- app_uri: 应用的完整标识
Returns:
返回产品信息列表,若失败则返回None | Below is the the instruction that describes the task:
### Input:
获得指定应用所在区域的产品信息
Args:
- app_uri: 应用的完整标识
Returns:
返回产品信息列表,若失败则返回None
### Response:
def get_app_region_products(self, app_uri):
"""获得指定应用所在区域的产品信息
Args:
- app_uri: 应用的完整标识
Returns:
返回产品信息列表,若失败则返回None
"""
apps, retInfo = self.list_apps()
if apps is None:
return None
for app in apps:
if (app.get('uri') == app_uri):
return self.get_region_products(app.get('region'))
return |
def _get_label_uuid(xapi, rectype, label):
'''
Internal, returns label's uuid
'''
try:
return getattr(xapi, rectype).get_by_name_label(label)[0]
except Exception:
return False | Internal, returns label's uuid | Below is the the instruction that describes the task:
### Input:
Internal, returns label's uuid
### Response:
def _get_label_uuid(xapi, rectype, label):
'''
Internal, returns label's uuid
'''
try:
return getattr(xapi, rectype).get_by_name_label(label)[0]
except Exception:
return False |
def dict_str(dict_, **dictkw):
r"""
Makes a pretty printable / human-readable string representation of a
dictionary. In most cases this string could be evaled.
Args:
dict_ (dict_): a dictionary
Args:
dict_ (dict_): a dictionary
**dictkw: stritems, strkeys, strvals, nl, newlines, truncate, nobr,
nobraces, align, trailing_sep, explicit, itemsep,
truncatekw, sorted_, indent_, key_order, precision,
with_comma, key_order_metric, maxlen, recursive, use_numpy,
with_dtype, force_dtype, packed
Kwargs:
sorted_ (None): returns str sorted by a metric (default = None)
nl (int): prefered alias for newline. can be a coundown variable
(default = None)
key_order (None): overrides default ordering (default = None)
key_order_metric (str): special sorting of items. Accepted values:
None, 'strlen', 'val'
precision (int): (default = 8)
explicit (int): can be a countdown variable. if True, uses
dict(a=b) syntax instead of {'a': b}
nobr (bool): removes outer braces (default = False)
Ignore:
python -m utool.util_inspect recursive_parse_kwargs:2 --mod utool.util_str --func dict_str --verbinspect
CommandLine:
python -m utool.util_str --test-dict_str:1
python -m utool.util_str --test-dict_str --truncate=False --no-checkwant
python -m utool.util_str --test-dict_str --truncate=1 --no-checkwant
python -m utool.util_str --test-dict_str --truncate=2 --no-checkwant
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_str import dict_str, dict_itemstr_list
>>> import utool as ut
>>> dict_ = {'foo': {'spam': 'barbarbarbarbar' * 3, 'eggs': 'jam'},
>>> 'baz': 'barbarbarbarbar' * 3}
>>> truncate = ut.get_argval('--truncate', type_=None, default=1)
>>> result = dict_str(dict_, strvals=True, truncate=truncate,
>>> truncatekw={'maxlen': 20})
>>> print(result)
{
'baz': barbarbarbarbarbarbarbarbarbarbarbarbarbarbar,
'foo': {
'eggs': jam,
's ~~~TRUNCATED~~~ r,
},
}
Example:
>>> # ENABLE_DOCTEST
>>> import utool as ut
>>> import numpy as np
>>> a, b, c = 'a', 'b', 'c'
>>> dict_ = {
>>> 'float': 2.3333,
>>> 'slice': slice(1, 2, None),
>>> 'arr': np.eye(3),
>>> 'list1': [1, 2, 3, 2.3333, a, b, c],
>>> 'dict1': {2.3333: 2.3333, a: b, c: [a, b]},
>>> 't': {c: {c: {c: {c : c}}}},
>>> 'set1': {c, a, b},
>>> 'set2': ut.oset([c, a, b]),
>>> 'list2': [
>>> {a: {c, a, b}, 1: slice(1, 2, 3)},
>>> [1, 2, {c, a, 2.333}, {a: [b], b: {c}, c: 2.333}]
>>> ],
>>> }
>>> dictkw = dict(stritems=True, itemsep='', precision=2, nl=1,
>>> nobr=True, explicit=True)
>>> result = ut.dict_str(dict_, **dictkw)
>>> print(result)
>>> dictkw = dict(stritems=0, precision=2, nl=True, nobr=False,
>>> explicit=0)
>>> result = ut.dict_str(dict_, **dictkw)
>>> print(result)
"""
import utool as ut
stritems = dictkw.pop('si', dictkw.pop('stritems', False))
if stritems:
dictkw['strkeys'] = True
dictkw['strvals'] = True
dictkw['strkeys'] = dictkw.pop('sk', dictkw.pop('strkeys', False))
dictkw['strvals'] = dictkw.pop('sv', dictkw.pop('strvals', False))
newlines = dictkw.pop('nl', dictkw.pop('newlines', True))
truncate = dictkw.pop('truncate', False)
dictkw['nl'] = _rectify_countdown_or_bool(newlines)
dictkw['truncate'] = _rectify_countdown_or_bool(truncate)
nobraces = dictkw.pop('nobr', dictkw.pop('nobraces', False))
align = dictkw.pop('align', False)
# Doesn't actually put in trailing comma if on same line
trailing_sep = dictkw.get('trailing_sep', True)
explicit = dictkw.get('explicit', False)
with_comma = True
itemsep = dictkw.get('itemsep', ' ')
if len(dict_) == 0:
return 'dict()' if explicit else '{}'
itemstr_list = dict_itemstr_list(dict_, **dictkw)
do_truncate = truncate is not False and (truncate is True or truncate == 0)
if do_truncate:
truncatekw = dictkw.get('truncatekw', {})
itemstr_list = [truncate_str(item, **truncatekw) for item in itemstr_list]
if nobraces:
lbr, rbr = '', ''
elif explicit:
lbr, rbr = 'dict(', ')'
else:
lbr, rbr = '{', '}'
if newlines:
sep = ',\n' if with_comma else '\n'
if nobraces:
retstr = sep.join(itemstr_list)
if trailing_sep:
retstr += ','
else:
parts = [ut.indent(itemstr, ' ') for itemstr in itemstr_list]
body_str = sep.join(parts)
if trailing_sep:
body_str += ','
retstr = (lbr + '\n' + body_str + '\n' + rbr)
if align:
retstr = ut.align(retstr, ':')
else:
sep = ',' + itemsep if with_comma else itemsep
# hack away last trailing comma
sequence_str = sep.join(itemstr_list)
retstr = lbr + sequence_str + rbr
# Is there a way to make truncate for dict_str compatible with list_str?
return retstr | r"""
Makes a pretty printable / human-readable string representation of a
dictionary. In most cases this string could be evaled.
Args:
dict_ (dict_): a dictionary
Args:
dict_ (dict_): a dictionary
**dictkw: stritems, strkeys, strvals, nl, newlines, truncate, nobr,
nobraces, align, trailing_sep, explicit, itemsep,
truncatekw, sorted_, indent_, key_order, precision,
with_comma, key_order_metric, maxlen, recursive, use_numpy,
with_dtype, force_dtype, packed
Kwargs:
sorted_ (None): returns str sorted by a metric (default = None)
nl (int): prefered alias for newline. can be a coundown variable
(default = None)
key_order (None): overrides default ordering (default = None)
key_order_metric (str): special sorting of items. Accepted values:
None, 'strlen', 'val'
precision (int): (default = 8)
explicit (int): can be a countdown variable. if True, uses
dict(a=b) syntax instead of {'a': b}
nobr (bool): removes outer braces (default = False)
Ignore:
python -m utool.util_inspect recursive_parse_kwargs:2 --mod utool.util_str --func dict_str --verbinspect
CommandLine:
python -m utool.util_str --test-dict_str:1
python -m utool.util_str --test-dict_str --truncate=False --no-checkwant
python -m utool.util_str --test-dict_str --truncate=1 --no-checkwant
python -m utool.util_str --test-dict_str --truncate=2 --no-checkwant
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_str import dict_str, dict_itemstr_list
>>> import utool as ut
>>> dict_ = {'foo': {'spam': 'barbarbarbarbar' * 3, 'eggs': 'jam'},
>>> 'baz': 'barbarbarbarbar' * 3}
>>> truncate = ut.get_argval('--truncate', type_=None, default=1)
>>> result = dict_str(dict_, strvals=True, truncate=truncate,
>>> truncatekw={'maxlen': 20})
>>> print(result)
{
'baz': barbarbarbarbarbarbarbarbarbarbarbarbarbarbar,
'foo': {
'eggs': jam,
's ~~~TRUNCATED~~~ r,
},
}
Example:
>>> # ENABLE_DOCTEST
>>> import utool as ut
>>> import numpy as np
>>> a, b, c = 'a', 'b', 'c'
>>> dict_ = {
>>> 'float': 2.3333,
>>> 'slice': slice(1, 2, None),
>>> 'arr': np.eye(3),
>>> 'list1': [1, 2, 3, 2.3333, a, b, c],
>>> 'dict1': {2.3333: 2.3333, a: b, c: [a, b]},
>>> 't': {c: {c: {c: {c : c}}}},
>>> 'set1': {c, a, b},
>>> 'set2': ut.oset([c, a, b]),
>>> 'list2': [
>>> {a: {c, a, b}, 1: slice(1, 2, 3)},
>>> [1, 2, {c, a, 2.333}, {a: [b], b: {c}, c: 2.333}]
>>> ],
>>> }
>>> dictkw = dict(stritems=True, itemsep='', precision=2, nl=1,
>>> nobr=True, explicit=True)
>>> result = ut.dict_str(dict_, **dictkw)
>>> print(result)
>>> dictkw = dict(stritems=0, precision=2, nl=True, nobr=False,
>>> explicit=0)
>>> result = ut.dict_str(dict_, **dictkw)
>>> print(result) | Below is the the instruction that describes the task:
### Input:
r"""
Makes a pretty printable / human-readable string representation of a
dictionary. In most cases this string could be evaled.
Args:
dict_ (dict_): a dictionary
Args:
dict_ (dict_): a dictionary
**dictkw: stritems, strkeys, strvals, nl, newlines, truncate, nobr,
nobraces, align, trailing_sep, explicit, itemsep,
truncatekw, sorted_, indent_, key_order, precision,
with_comma, key_order_metric, maxlen, recursive, use_numpy,
with_dtype, force_dtype, packed
Kwargs:
sorted_ (None): returns str sorted by a metric (default = None)
nl (int): prefered alias for newline. can be a coundown variable
(default = None)
key_order (None): overrides default ordering (default = None)
key_order_metric (str): special sorting of items. Accepted values:
None, 'strlen', 'val'
precision (int): (default = 8)
explicit (int): can be a countdown variable. if True, uses
dict(a=b) syntax instead of {'a': b}
nobr (bool): removes outer braces (default = False)
Ignore:
python -m utool.util_inspect recursive_parse_kwargs:2 --mod utool.util_str --func dict_str --verbinspect
CommandLine:
python -m utool.util_str --test-dict_str:1
python -m utool.util_str --test-dict_str --truncate=False --no-checkwant
python -m utool.util_str --test-dict_str --truncate=1 --no-checkwant
python -m utool.util_str --test-dict_str --truncate=2 --no-checkwant
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_str import dict_str, dict_itemstr_list
>>> import utool as ut
>>> dict_ = {'foo': {'spam': 'barbarbarbarbar' * 3, 'eggs': 'jam'},
>>> 'baz': 'barbarbarbarbar' * 3}
>>> truncate = ut.get_argval('--truncate', type_=None, default=1)
>>> result = dict_str(dict_, strvals=True, truncate=truncate,
>>> truncatekw={'maxlen': 20})
>>> print(result)
{
'baz': barbarbarbarbarbarbarbarbarbarbarbarbarbarbar,
'foo': {
'eggs': jam,
's ~~~TRUNCATED~~~ r,
},
}
Example:
>>> # ENABLE_DOCTEST
>>> import utool as ut
>>> import numpy as np
>>> a, b, c = 'a', 'b', 'c'
>>> dict_ = {
>>> 'float': 2.3333,
>>> 'slice': slice(1, 2, None),
>>> 'arr': np.eye(3),
>>> 'list1': [1, 2, 3, 2.3333, a, b, c],
>>> 'dict1': {2.3333: 2.3333, a: b, c: [a, b]},
>>> 't': {c: {c: {c: {c : c}}}},
>>> 'set1': {c, a, b},
>>> 'set2': ut.oset([c, a, b]),
>>> 'list2': [
>>> {a: {c, a, b}, 1: slice(1, 2, 3)},
>>> [1, 2, {c, a, 2.333}, {a: [b], b: {c}, c: 2.333}]
>>> ],
>>> }
>>> dictkw = dict(stritems=True, itemsep='', precision=2, nl=1,
>>> nobr=True, explicit=True)
>>> result = ut.dict_str(dict_, **dictkw)
>>> print(result)
>>> dictkw = dict(stritems=0, precision=2, nl=True, nobr=False,
>>> explicit=0)
>>> result = ut.dict_str(dict_, **dictkw)
>>> print(result)
### Response:
def dict_str(dict_, **dictkw):
r"""
Makes a pretty printable / human-readable string representation of a
dictionary. In most cases this string could be evaled.
Args:
dict_ (dict_): a dictionary
Args:
dict_ (dict_): a dictionary
**dictkw: stritems, strkeys, strvals, nl, newlines, truncate, nobr,
nobraces, align, trailing_sep, explicit, itemsep,
truncatekw, sorted_, indent_, key_order, precision,
with_comma, key_order_metric, maxlen, recursive, use_numpy,
with_dtype, force_dtype, packed
Kwargs:
sorted_ (None): returns str sorted by a metric (default = None)
nl (int): prefered alias for newline. can be a coundown variable
(default = None)
key_order (None): overrides default ordering (default = None)
key_order_metric (str): special sorting of items. Accepted values:
None, 'strlen', 'val'
precision (int): (default = 8)
explicit (int): can be a countdown variable. if True, uses
dict(a=b) syntax instead of {'a': b}
nobr (bool): removes outer braces (default = False)
Ignore:
python -m utool.util_inspect recursive_parse_kwargs:2 --mod utool.util_str --func dict_str --verbinspect
CommandLine:
python -m utool.util_str --test-dict_str:1
python -m utool.util_str --test-dict_str --truncate=False --no-checkwant
python -m utool.util_str --test-dict_str --truncate=1 --no-checkwant
python -m utool.util_str --test-dict_str --truncate=2 --no-checkwant
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_str import dict_str, dict_itemstr_list
>>> import utool as ut
>>> dict_ = {'foo': {'spam': 'barbarbarbarbar' * 3, 'eggs': 'jam'},
>>> 'baz': 'barbarbarbarbar' * 3}
>>> truncate = ut.get_argval('--truncate', type_=None, default=1)
>>> result = dict_str(dict_, strvals=True, truncate=truncate,
>>> truncatekw={'maxlen': 20})
>>> print(result)
{
'baz': barbarbarbarbarbarbarbarbarbarbarbarbarbarbar,
'foo': {
'eggs': jam,
's ~~~TRUNCATED~~~ r,
},
}
Example:
>>> # ENABLE_DOCTEST
>>> import utool as ut
>>> import numpy as np
>>> a, b, c = 'a', 'b', 'c'
>>> dict_ = {
>>> 'float': 2.3333,
>>> 'slice': slice(1, 2, None),
>>> 'arr': np.eye(3),
>>> 'list1': [1, 2, 3, 2.3333, a, b, c],
>>> 'dict1': {2.3333: 2.3333, a: b, c: [a, b]},
>>> 't': {c: {c: {c: {c : c}}}},
>>> 'set1': {c, a, b},
>>> 'set2': ut.oset([c, a, b]),
>>> 'list2': [
>>> {a: {c, a, b}, 1: slice(1, 2, 3)},
>>> [1, 2, {c, a, 2.333}, {a: [b], b: {c}, c: 2.333}]
>>> ],
>>> }
>>> dictkw = dict(stritems=True, itemsep='', precision=2, nl=1,
>>> nobr=True, explicit=True)
>>> result = ut.dict_str(dict_, **dictkw)
>>> print(result)
>>> dictkw = dict(stritems=0, precision=2, nl=True, nobr=False,
>>> explicit=0)
>>> result = ut.dict_str(dict_, **dictkw)
>>> print(result)
"""
import utool as ut
stritems = dictkw.pop('si', dictkw.pop('stritems', False))
if stritems:
dictkw['strkeys'] = True
dictkw['strvals'] = True
dictkw['strkeys'] = dictkw.pop('sk', dictkw.pop('strkeys', False))
dictkw['strvals'] = dictkw.pop('sv', dictkw.pop('strvals', False))
newlines = dictkw.pop('nl', dictkw.pop('newlines', True))
truncate = dictkw.pop('truncate', False)
dictkw['nl'] = _rectify_countdown_or_bool(newlines)
dictkw['truncate'] = _rectify_countdown_or_bool(truncate)
nobraces = dictkw.pop('nobr', dictkw.pop('nobraces', False))
align = dictkw.pop('align', False)
# Doesn't actually put in trailing comma if on same line
trailing_sep = dictkw.get('trailing_sep', True)
explicit = dictkw.get('explicit', False)
with_comma = True
itemsep = dictkw.get('itemsep', ' ')
if len(dict_) == 0:
return 'dict()' if explicit else '{}'
itemstr_list = dict_itemstr_list(dict_, **dictkw)
do_truncate = truncate is not False and (truncate is True or truncate == 0)
if do_truncate:
truncatekw = dictkw.get('truncatekw', {})
itemstr_list = [truncate_str(item, **truncatekw) for item in itemstr_list]
if nobraces:
lbr, rbr = '', ''
elif explicit:
lbr, rbr = 'dict(', ')'
else:
lbr, rbr = '{', '}'
if newlines:
sep = ',\n' if with_comma else '\n'
if nobraces:
retstr = sep.join(itemstr_list)
if trailing_sep:
retstr += ','
else:
parts = [ut.indent(itemstr, ' ') for itemstr in itemstr_list]
body_str = sep.join(parts)
if trailing_sep:
body_str += ','
retstr = (lbr + '\n' + body_str + '\n' + rbr)
if align:
retstr = ut.align(retstr, ':')
else:
sep = ',' + itemsep if with_comma else itemsep
# hack away last trailing comma
sequence_str = sep.join(itemstr_list)
retstr = lbr + sequence_str + rbr
# Is there a way to make truncate for dict_str compatible with list_str?
return retstr |
def matches(self, _filter):
"""
Returns whether the instance matches the given filter text.
:param _filter: A regex filter. If it starts with `<identifier>:`, then
the part before the colon will be used as an attribute
and the part after will be applied to that attribute.
:type _filter: ``basestring``
:return: True if the entry matches the filter.
:rtype: ``bool``
"""
within_attrib = re.match(r'^([a-z_.]+):(.*)', _filter)
having_attrib = re.match(r'^([a-z_.]+)\?$', _filter)
if within_attrib is not None:
# Then we're matching against a specific attribute.
val = self._get_attrib(within_attrib.group(1))
sub_regex = within_attrib.group(2)
if len(sub_regex) > 0:
sub_regex = re.compile(sub_regex, re.IGNORECASE)
return _match_regex(sub_regex, val)
else:
# Then we are matching on the value being empty.
return val == '' or val is None or val == []
elif having_attrib is not None:
# Then we're searching for anything that has a specific attribute.
val = self._get_attrib(having_attrib.group(1))
return val != '' and val is not None and val != []
else:
regex = re.compile(_filter, re.IGNORECASE)
return _match_regex(regex, vars(self)) | Returns whether the instance matches the given filter text.
:param _filter: A regex filter. If it starts with `<identifier>:`, then
the part before the colon will be used as an attribute
and the part after will be applied to that attribute.
:type _filter: ``basestring``
:return: True if the entry matches the filter.
:rtype: ``bool`` | Below is the the instruction that describes the task:
### Input:
Returns whether the instance matches the given filter text.
:param _filter: A regex filter. If it starts with `<identifier>:`, then
the part before the colon will be used as an attribute
and the part after will be applied to that attribute.
:type _filter: ``basestring``
:return: True if the entry matches the filter.
:rtype: ``bool``
### Response:
def matches(self, _filter):
"""
Returns whether the instance matches the given filter text.
:param _filter: A regex filter. If it starts with `<identifier>:`, then
the part before the colon will be used as an attribute
and the part after will be applied to that attribute.
:type _filter: ``basestring``
:return: True if the entry matches the filter.
:rtype: ``bool``
"""
within_attrib = re.match(r'^([a-z_.]+):(.*)', _filter)
having_attrib = re.match(r'^([a-z_.]+)\?$', _filter)
if within_attrib is not None:
# Then we're matching against a specific attribute.
val = self._get_attrib(within_attrib.group(1))
sub_regex = within_attrib.group(2)
if len(sub_regex) > 0:
sub_regex = re.compile(sub_regex, re.IGNORECASE)
return _match_regex(sub_regex, val)
else:
# Then we are matching on the value being empty.
return val == '' or val is None or val == []
elif having_attrib is not None:
# Then we're searching for anything that has a specific attribute.
val = self._get_attrib(having_attrib.group(1))
return val != '' and val is not None and val != []
else:
regex = re.compile(_filter, re.IGNORECASE)
return _match_regex(regex, vars(self)) |
def get_binding(self, schema, data):
""" For a given schema, get a binding mediator providing links to the
RDF terms matching that schema. """
schema = self.parent.get_schema(schema)
return Binding(schema, self.parent.resolver, data=data) | For a given schema, get a binding mediator providing links to the
RDF terms matching that schema. | Below is the the instruction that describes the task:
### Input:
For a given schema, get a binding mediator providing links to the
RDF terms matching that schema.
### Response:
def get_binding(self, schema, data):
""" For a given schema, get a binding mediator providing links to the
RDF terms matching that schema. """
schema = self.parent.get_schema(schema)
return Binding(schema, self.parent.resolver, data=data) |
def set_s3_bucket(self, region, name, bucketName):
"""Sets the S3 bucket location for logfile delivery
Args:
region (`str`): Name of the AWS region
name (`str`): Name of the CloudTrail Trail
bucketName (`str`): Name of the S3 bucket to deliver log files to
Returns:
`None`
"""
ct = self.session.client('cloudtrail', region_name=region)
ct.update_trail(Name=name, S3BucketName=bucketName)
auditlog(
event='cloudtrail.set_s3_bucket',
actor=self.ns,
data={
'account': self.account.account_name,
'region': region
}
)
self.log.info('Updated S3BucketName to {} for {} in {}/{}'.format(
bucketName,
name,
self.account.account_name,
region
)) | Sets the S3 bucket location for logfile delivery
Args:
region (`str`): Name of the AWS region
name (`str`): Name of the CloudTrail Trail
bucketName (`str`): Name of the S3 bucket to deliver log files to
Returns:
`None` | Below is the the instruction that describes the task:
### Input:
Sets the S3 bucket location for logfile delivery
Args:
region (`str`): Name of the AWS region
name (`str`): Name of the CloudTrail Trail
bucketName (`str`): Name of the S3 bucket to deliver log files to
Returns:
`None`
### Response:
def set_s3_bucket(self, region, name, bucketName):
"""Sets the S3 bucket location for logfile delivery
Args:
region (`str`): Name of the AWS region
name (`str`): Name of the CloudTrail Trail
bucketName (`str`): Name of the S3 bucket to deliver log files to
Returns:
`None`
"""
ct = self.session.client('cloudtrail', region_name=region)
ct.update_trail(Name=name, S3BucketName=bucketName)
auditlog(
event='cloudtrail.set_s3_bucket',
actor=self.ns,
data={
'account': self.account.account_name,
'region': region
}
)
self.log.info('Updated S3BucketName to {} for {} in {}/{}'.format(
bucketName,
name,
self.account.account_name,
region
)) |
def _index_audio_ibm(self, basename=None, replace_already_indexed=False,
continuous=True, model="en-US_BroadbandModel",
word_confidence=True, word_alternatives_threshold=0.9,
profanity_filter_for_US_results=False):
"""
Implements a search-suitable interface for Watson speech API.
Some explaination of the parameters here have been taken from [1]_
Parameters
----------
basename : str, optional
A specific basename to be indexed and is placed in src_dir
e.g `audio.wav`.
If `None` is selected, all the valid audio files would be indexed.
Default is `None`.
replace_already_indexed : bool
`True`, To reindex some audio file that's already in the
timestamps.
Default is `False`.
continuous : bool
Indicates whether multiple final results that represent consecutive
phrases separated by long pauses are returned.
If true, such phrases are returned; if false (the default),
recognition ends after the first end-of-speech (EOS) incident is
detected.
Default is `True`.
model : {
'ar-AR_BroadbandModel',
'en-UK_BroadbandModel'
'en-UK_NarrowbandModel',
'en-US_BroadbandModel', (the default)
'en-US_NarrowbandModel',
'es-ES_BroadbandModel',
'es-ES_NarrowbandModel',
'fr-FR_BroadbandModel',
'ja-JP_BroadbandModel',
'ja-JP_NarrowbandModel',
'pt-BR_BroadbandModel',
'pt-BR_NarrowbandModel',
'zh-CN_BroadbandModel',
'zh-CN_NarrowbandModel'
}
The identifier of the model to be used for the recognition
Default is 'en-US_BroadbandModel'
word_confidence : bool
Indicates whether a confidence measure in the range of 0 to 1 is
returned for each word.
The default is True. (It's False in the original)
word_alternatives_threshold : numeric
A confidence value that is the lower bound for identifying a
hypothesis as a possible word alternative (also known as
"Confusion Networks"). An alternative word is considered if its
confidence is greater than or equal to the threshold. Specify a
probability between 0 and 1 inclusive.
Default is `0.9`.
profanity_filter_for_US_results : bool
Indicates whether profanity filtering is performed on the
transcript. If true, the service filters profanity from all output
by replacing inappropriate words with a series of asterisks.
If false, the service returns results with no censoring. Applies
to US English transcription only.
Default is `False`.
References
----------
.. [1] : https://ibm.com/watson/developercloud/speech-to-text/api/v1/
"""
params = {'continuous': continuous,
'model': model,
'word_alternatives_threshold': word_alternatives_threshold,
'word_confidence': word_confidence,
'timestamps': True,
'inactivity_timeout': str(-1),
'profanity_filter': profanity_filter_for_US_results}
self._prepare_audio(basename=basename,
replace_already_indexed=replace_already_indexed)
for staging_audio_basename in self._list_audio_files(
sub_dir="staging"):
original_audio_name = ''.join(
staging_audio_basename.split('.')[:-1])[:-3]
with open("{}/staging/{}".format(
self.src_dir, staging_audio_basename), "rb") as f:
if self.get_verbosity():
print("Uploading {}...".format(staging_audio_basename))
response = requests.post(
url=("https://stream.watsonplatform.net/"
"speech-to-text/api/v1/recognize"),
auth=(self.get_username_ibm(), self.get_password_ibm()),
headers={'content-type': 'audio/wav'},
data=f.read(),
params=params)
if self.get_verbosity():
print("Indexing {}...".format(staging_audio_basename))
self.__timestamps_unregulated[
original_audio_name + ".wav"].append(
self._timestamp_extractor_ibm(
staging_audio_basename, json.loads(response.text)))
if self.get_verbosity():
print("Done indexing {}".format(staging_audio_basename))
self._timestamp_regulator()
if self.get_verbosity():
print("Indexing procedure finished") | Implements a search-suitable interface for Watson speech API.
Some explaination of the parameters here have been taken from [1]_
Parameters
----------
basename : str, optional
A specific basename to be indexed and is placed in src_dir
e.g `audio.wav`.
If `None` is selected, all the valid audio files would be indexed.
Default is `None`.
replace_already_indexed : bool
`True`, To reindex some audio file that's already in the
timestamps.
Default is `False`.
continuous : bool
Indicates whether multiple final results that represent consecutive
phrases separated by long pauses are returned.
If true, such phrases are returned; if false (the default),
recognition ends after the first end-of-speech (EOS) incident is
detected.
Default is `True`.
model : {
'ar-AR_BroadbandModel',
'en-UK_BroadbandModel'
'en-UK_NarrowbandModel',
'en-US_BroadbandModel', (the default)
'en-US_NarrowbandModel',
'es-ES_BroadbandModel',
'es-ES_NarrowbandModel',
'fr-FR_BroadbandModel',
'ja-JP_BroadbandModel',
'ja-JP_NarrowbandModel',
'pt-BR_BroadbandModel',
'pt-BR_NarrowbandModel',
'zh-CN_BroadbandModel',
'zh-CN_NarrowbandModel'
}
The identifier of the model to be used for the recognition
Default is 'en-US_BroadbandModel'
word_confidence : bool
Indicates whether a confidence measure in the range of 0 to 1 is
returned for each word.
The default is True. (It's False in the original)
word_alternatives_threshold : numeric
A confidence value that is the lower bound for identifying a
hypothesis as a possible word alternative (also known as
"Confusion Networks"). An alternative word is considered if its
confidence is greater than or equal to the threshold. Specify a
probability between 0 and 1 inclusive.
Default is `0.9`.
profanity_filter_for_US_results : bool
Indicates whether profanity filtering is performed on the
transcript. If true, the service filters profanity from all output
by replacing inappropriate words with a series of asterisks.
If false, the service returns results with no censoring. Applies
to US English transcription only.
Default is `False`.
References
----------
.. [1] : https://ibm.com/watson/developercloud/speech-to-text/api/v1/ | Below is the the instruction that describes the task:
### Input:
Implements a search-suitable interface for Watson speech API.
Some explaination of the parameters here have been taken from [1]_
Parameters
----------
basename : str, optional
A specific basename to be indexed and is placed in src_dir
e.g `audio.wav`.
If `None` is selected, all the valid audio files would be indexed.
Default is `None`.
replace_already_indexed : bool
`True`, To reindex some audio file that's already in the
timestamps.
Default is `False`.
continuous : bool
Indicates whether multiple final results that represent consecutive
phrases separated by long pauses are returned.
If true, such phrases are returned; if false (the default),
recognition ends after the first end-of-speech (EOS) incident is
detected.
Default is `True`.
model : {
'ar-AR_BroadbandModel',
'en-UK_BroadbandModel'
'en-UK_NarrowbandModel',
'en-US_BroadbandModel', (the default)
'en-US_NarrowbandModel',
'es-ES_BroadbandModel',
'es-ES_NarrowbandModel',
'fr-FR_BroadbandModel',
'ja-JP_BroadbandModel',
'ja-JP_NarrowbandModel',
'pt-BR_BroadbandModel',
'pt-BR_NarrowbandModel',
'zh-CN_BroadbandModel',
'zh-CN_NarrowbandModel'
}
The identifier of the model to be used for the recognition
Default is 'en-US_BroadbandModel'
word_confidence : bool
Indicates whether a confidence measure in the range of 0 to 1 is
returned for each word.
The default is True. (It's False in the original)
word_alternatives_threshold : numeric
A confidence value that is the lower bound for identifying a
hypothesis as a possible word alternative (also known as
"Confusion Networks"). An alternative word is considered if its
confidence is greater than or equal to the threshold. Specify a
probability between 0 and 1 inclusive.
Default is `0.9`.
profanity_filter_for_US_results : bool
Indicates whether profanity filtering is performed on the
transcript. If true, the service filters profanity from all output
by replacing inappropriate words with a series of asterisks.
If false, the service returns results with no censoring. Applies
to US English transcription only.
Default is `False`.
References
----------
.. [1] : https://ibm.com/watson/developercloud/speech-to-text/api/v1/
### Response:
def _index_audio_ibm(self, basename=None, replace_already_indexed=False,
continuous=True, model="en-US_BroadbandModel",
word_confidence=True, word_alternatives_threshold=0.9,
profanity_filter_for_US_results=False):
"""
Implements a search-suitable interface for Watson speech API.
Some explaination of the parameters here have been taken from [1]_
Parameters
----------
basename : str, optional
A specific basename to be indexed and is placed in src_dir
e.g `audio.wav`.
If `None` is selected, all the valid audio files would be indexed.
Default is `None`.
replace_already_indexed : bool
`True`, To reindex some audio file that's already in the
timestamps.
Default is `False`.
continuous : bool
Indicates whether multiple final results that represent consecutive
phrases separated by long pauses are returned.
If true, such phrases are returned; if false (the default),
recognition ends after the first end-of-speech (EOS) incident is
detected.
Default is `True`.
model : {
'ar-AR_BroadbandModel',
'en-UK_BroadbandModel'
'en-UK_NarrowbandModel',
'en-US_BroadbandModel', (the default)
'en-US_NarrowbandModel',
'es-ES_BroadbandModel',
'es-ES_NarrowbandModel',
'fr-FR_BroadbandModel',
'ja-JP_BroadbandModel',
'ja-JP_NarrowbandModel',
'pt-BR_BroadbandModel',
'pt-BR_NarrowbandModel',
'zh-CN_BroadbandModel',
'zh-CN_NarrowbandModel'
}
The identifier of the model to be used for the recognition
Default is 'en-US_BroadbandModel'
word_confidence : bool
Indicates whether a confidence measure in the range of 0 to 1 is
returned for each word.
The default is True. (It's False in the original)
word_alternatives_threshold : numeric
A confidence value that is the lower bound for identifying a
hypothesis as a possible word alternative (also known as
"Confusion Networks"). An alternative word is considered if its
confidence is greater than or equal to the threshold. Specify a
probability between 0 and 1 inclusive.
Default is `0.9`.
profanity_filter_for_US_results : bool
Indicates whether profanity filtering is performed on the
transcript. If true, the service filters profanity from all output
by replacing inappropriate words with a series of asterisks.
If false, the service returns results with no censoring. Applies
to US English transcription only.
Default is `False`.
References
----------
.. [1] : https://ibm.com/watson/developercloud/speech-to-text/api/v1/
"""
params = {'continuous': continuous,
'model': model,
'word_alternatives_threshold': word_alternatives_threshold,
'word_confidence': word_confidence,
'timestamps': True,
'inactivity_timeout': str(-1),
'profanity_filter': profanity_filter_for_US_results}
self._prepare_audio(basename=basename,
replace_already_indexed=replace_already_indexed)
for staging_audio_basename in self._list_audio_files(
sub_dir="staging"):
original_audio_name = ''.join(
staging_audio_basename.split('.')[:-1])[:-3]
with open("{}/staging/{}".format(
self.src_dir, staging_audio_basename), "rb") as f:
if self.get_verbosity():
print("Uploading {}...".format(staging_audio_basename))
response = requests.post(
url=("https://stream.watsonplatform.net/"
"speech-to-text/api/v1/recognize"),
auth=(self.get_username_ibm(), self.get_password_ibm()),
headers={'content-type': 'audio/wav'},
data=f.read(),
params=params)
if self.get_verbosity():
print("Indexing {}...".format(staging_audio_basename))
self.__timestamps_unregulated[
original_audio_name + ".wav"].append(
self._timestamp_extractor_ibm(
staging_audio_basename, json.loads(response.text)))
if self.get_verbosity():
print("Done indexing {}".format(staging_audio_basename))
self._timestamp_regulator()
if self.get_verbosity():
print("Indexing procedure finished") |
def last_kstp_from_kper(hds,kper):
""" function to find the last time step (kstp) for a
give stress period (kper) in a modflow head save file.
Parameters
----------
hds : flopy.utils.HeadFile
kper : int
the zero-index stress period number
Returns
-------
kstp : int
the zero-based last time step during stress period
kper in the head save file
"""
#find the last kstp with this kper
kstp = -1
for kkstp,kkper in hds.kstpkper:
if kkper == kper+1 and kkstp > kstp:
kstp = kkstp
if kstp == -1:
raise Exception("kstp not found for kper {0}".format(kper))
kstp -= 1
return kstp | function to find the last time step (kstp) for a
give stress period (kper) in a modflow head save file.
Parameters
----------
hds : flopy.utils.HeadFile
kper : int
the zero-index stress period number
Returns
-------
kstp : int
the zero-based last time step during stress period
kper in the head save file | Below is the the instruction that describes the task:
### Input:
function to find the last time step (kstp) for a
give stress period (kper) in a modflow head save file.
Parameters
----------
hds : flopy.utils.HeadFile
kper : int
the zero-index stress period number
Returns
-------
kstp : int
the zero-based last time step during stress period
kper in the head save file
### Response:
def last_kstp_from_kper(hds,kper):
""" function to find the last time step (kstp) for a
give stress period (kper) in a modflow head save file.
Parameters
----------
hds : flopy.utils.HeadFile
kper : int
the zero-index stress period number
Returns
-------
kstp : int
the zero-based last time step during stress period
kper in the head save file
"""
#find the last kstp with this kper
kstp = -1
for kkstp,kkper in hds.kstpkper:
if kkper == kper+1 and kkstp > kstp:
kstp = kkstp
if kstp == -1:
raise Exception("kstp not found for kper {0}".format(kper))
kstp -= 1
return kstp |
def write_if_allowed(filename: str,
content: str,
overwrite: bool = False,
mock: bool = False) -> None:
"""
Writes the contents to a file, if permitted.
Args:
filename: filename to write
content: contents to write
overwrite: permit overwrites?
mock: pretend to write, but don't
Raises:
RuntimeError: if file exists but overwriting not permitted
"""
# Check we're allowed
if not overwrite and exists(filename):
fail("File exists, not overwriting: {!r}".format(filename))
# Make the directory, if necessary
directory = dirname(filename)
if not mock:
mkdir_p(directory)
# Write the file
log.info("Writing to {!r}", filename)
if mock:
log.warning("Skipping writes as in mock mode")
else:
with open(filename, "wt") as outfile:
outfile.write(content) | Writes the contents to a file, if permitted.
Args:
filename: filename to write
content: contents to write
overwrite: permit overwrites?
mock: pretend to write, but don't
Raises:
RuntimeError: if file exists but overwriting not permitted | Below is the the instruction that describes the task:
### Input:
Writes the contents to a file, if permitted.
Args:
filename: filename to write
content: contents to write
overwrite: permit overwrites?
mock: pretend to write, but don't
Raises:
RuntimeError: if file exists but overwriting not permitted
### Response:
def write_if_allowed(filename: str,
content: str,
overwrite: bool = False,
mock: bool = False) -> None:
"""
Writes the contents to a file, if permitted.
Args:
filename: filename to write
content: contents to write
overwrite: permit overwrites?
mock: pretend to write, but don't
Raises:
RuntimeError: if file exists but overwriting not permitted
"""
# Check we're allowed
if not overwrite and exists(filename):
fail("File exists, not overwriting: {!r}".format(filename))
# Make the directory, if necessary
directory = dirname(filename)
if not mock:
mkdir_p(directory)
# Write the file
log.info("Writing to {!r}", filename)
if mock:
log.warning("Skipping writes as in mock mode")
else:
with open(filename, "wt") as outfile:
outfile.write(content) |
def appendGuideline(self, position=None, angle=None, name=None, color=None, guideline=None):
"""
Append a new guideline to the font.
>>> guideline = font.appendGuideline((50, 0), 90)
>>> guideline = font.appendGuideline((0, 540), 0, name="overshoot",
>>> color=(0, 0, 0, 0.2))
**position** must be a :ref:`type-coordinate`
indicating the position of the guideline.
**angle** indicates the :ref:`type-angle` of
the guideline. **name** indicates the name
for the guideline. This must be a :ref:`type-string`
or ``None``. **color** indicates the color for
the guideline. This must be a :ref:`type-color`
or ``None``. This will return the newly created
:class:`BaseGuidline` object.
``guideline`` may be a :class:`BaseGuideline` object from which
attribute values will be copied. If ``position``, ``angle``, ``name``
or ``color`` are specified as arguments, those values will be used
instead of the values in the given guideline object.
"""
identifier = None
if guideline is not None:
guideline = normalizers.normalizeGuideline(guideline)
if position is None:
position = guideline.position
if angle is None:
angle = guideline.angle
if name is None:
name = guideline.name
if color is None:
color = guideline.color
if guideline.identifier is not None:
existing = set([g.identifier for g in self.guidelines if g.identifier is not None])
if guideline.identifier not in existing:
identifier = guideline.identifier
position = normalizers.normalizeCoordinateTuple(position)
angle = normalizers.normalizeRotationAngle(angle)
if name is not None:
name = normalizers.normalizeGuidelineName(name)
if color is not None:
color = normalizers.normalizeColor(color)
identifier = normalizers.normalizeIdentifier(identifier)
guideline = self._appendGuideline(position, angle, name=name, color=color, identifier=identifier)
guideline.font = self
return guideline | Append a new guideline to the font.
>>> guideline = font.appendGuideline((50, 0), 90)
>>> guideline = font.appendGuideline((0, 540), 0, name="overshoot",
>>> color=(0, 0, 0, 0.2))
**position** must be a :ref:`type-coordinate`
indicating the position of the guideline.
**angle** indicates the :ref:`type-angle` of
the guideline. **name** indicates the name
for the guideline. This must be a :ref:`type-string`
or ``None``. **color** indicates the color for
the guideline. This must be a :ref:`type-color`
or ``None``. This will return the newly created
:class:`BaseGuidline` object.
``guideline`` may be a :class:`BaseGuideline` object from which
attribute values will be copied. If ``position``, ``angle``, ``name``
or ``color`` are specified as arguments, those values will be used
instead of the values in the given guideline object. | Below is the the instruction that describes the task:
### Input:
Append a new guideline to the font.
>>> guideline = font.appendGuideline((50, 0), 90)
>>> guideline = font.appendGuideline((0, 540), 0, name="overshoot",
>>> color=(0, 0, 0, 0.2))
**position** must be a :ref:`type-coordinate`
indicating the position of the guideline.
**angle** indicates the :ref:`type-angle` of
the guideline. **name** indicates the name
for the guideline. This must be a :ref:`type-string`
or ``None``. **color** indicates the color for
the guideline. This must be a :ref:`type-color`
or ``None``. This will return the newly created
:class:`BaseGuidline` object.
``guideline`` may be a :class:`BaseGuideline` object from which
attribute values will be copied. If ``position``, ``angle``, ``name``
or ``color`` are specified as arguments, those values will be used
instead of the values in the given guideline object.
### Response:
def appendGuideline(self, position=None, angle=None, name=None, color=None, guideline=None):
"""
Append a new guideline to the font.
>>> guideline = font.appendGuideline((50, 0), 90)
>>> guideline = font.appendGuideline((0, 540), 0, name="overshoot",
>>> color=(0, 0, 0, 0.2))
**position** must be a :ref:`type-coordinate`
indicating the position of the guideline.
**angle** indicates the :ref:`type-angle` of
the guideline. **name** indicates the name
for the guideline. This must be a :ref:`type-string`
or ``None``. **color** indicates the color for
the guideline. This must be a :ref:`type-color`
or ``None``. This will return the newly created
:class:`BaseGuidline` object.
``guideline`` may be a :class:`BaseGuideline` object from which
attribute values will be copied. If ``position``, ``angle``, ``name``
or ``color`` are specified as arguments, those values will be used
instead of the values in the given guideline object.
"""
identifier = None
if guideline is not None:
guideline = normalizers.normalizeGuideline(guideline)
if position is None:
position = guideline.position
if angle is None:
angle = guideline.angle
if name is None:
name = guideline.name
if color is None:
color = guideline.color
if guideline.identifier is not None:
existing = set([g.identifier for g in self.guidelines if g.identifier is not None])
if guideline.identifier not in existing:
identifier = guideline.identifier
position = normalizers.normalizeCoordinateTuple(position)
angle = normalizers.normalizeRotationAngle(angle)
if name is not None:
name = normalizers.normalizeGuidelineName(name)
if color is not None:
color = normalizers.normalizeColor(color)
identifier = normalizers.normalizeIdentifier(identifier)
guideline = self._appendGuideline(position, angle, name=name, color=color, identifier=identifier)
guideline.font = self
return guideline |
def set_temperature(self, temp):
"""Set both the driver and passenger temperature to temp."""
temp = round(temp, 1)
self.__manual_update_time = time.time()
data = self._controller.command(self._id, 'set_temps',
{"driver_temp": temp,
"passenger_temp": temp},
wake_if_asleep=True)
if data['response']['result']:
self.__driver_temp_setting = temp
self.__passenger_temp_setting = temp | Set both the driver and passenger temperature to temp. | Below is the the instruction that describes the task:
### Input:
Set both the driver and passenger temperature to temp.
### Response:
def set_temperature(self, temp):
"""Set both the driver and passenger temperature to temp."""
temp = round(temp, 1)
self.__manual_update_time = time.time()
data = self._controller.command(self._id, 'set_temps',
{"driver_temp": temp,
"passenger_temp": temp},
wake_if_asleep=True)
if data['response']['result']:
self.__driver_temp_setting = temp
self.__passenger_temp_setting = temp |
def get_foreign_key_base_declaration_sql(self, foreign_key):
"""
Obtains DBMS specific SQL code portion needed to set the FOREIGN KEY constraint
of a field declaration to be used in statements like CREATE TABLE.
:param foreign_key: The foreign key
:type foreign_key: ForeignKeyConstraint
:rtype: str
"""
sql = ""
if foreign_key.get_name():
sql += "CONSTRAINT %s " % foreign_key.get_quoted_name(self)
sql += "FOREIGN KEY ("
if not foreign_key.get_local_columns():
raise DBALException('Incomplete definition. "local" required.')
if not foreign_key.get_foreign_columns():
raise DBALException('Incomplete definition. "foreign" required.')
if not foreign_key.get_foreign_table_name():
raise DBALException('Incomplete definition. "foreign_table" required.')
sql += "%s) REFERENCES %s (%s)" % (
", ".join(foreign_key.get_quoted_local_columns(self)),
foreign_key.get_quoted_foreign_table_name(self),
", ".join(foreign_key.get_quoted_foreign_columns(self)),
)
return sql | Obtains DBMS specific SQL code portion needed to set the FOREIGN KEY constraint
of a field declaration to be used in statements like CREATE TABLE.
:param foreign_key: The foreign key
:type foreign_key: ForeignKeyConstraint
:rtype: str | Below is the the instruction that describes the task:
### Input:
Obtains DBMS specific SQL code portion needed to set the FOREIGN KEY constraint
of a field declaration to be used in statements like CREATE TABLE.
:param foreign_key: The foreign key
:type foreign_key: ForeignKeyConstraint
:rtype: str
### Response:
def get_foreign_key_base_declaration_sql(self, foreign_key):
"""
Obtains DBMS specific SQL code portion needed to set the FOREIGN KEY constraint
of a field declaration to be used in statements like CREATE TABLE.
:param foreign_key: The foreign key
:type foreign_key: ForeignKeyConstraint
:rtype: str
"""
sql = ""
if foreign_key.get_name():
sql += "CONSTRAINT %s " % foreign_key.get_quoted_name(self)
sql += "FOREIGN KEY ("
if not foreign_key.get_local_columns():
raise DBALException('Incomplete definition. "local" required.')
if not foreign_key.get_foreign_columns():
raise DBALException('Incomplete definition. "foreign" required.')
if not foreign_key.get_foreign_table_name():
raise DBALException('Incomplete definition. "foreign_table" required.')
sql += "%s) REFERENCES %s (%s)" % (
", ".join(foreign_key.get_quoted_local_columns(self)),
foreign_key.get_quoted_foreign_table_name(self),
", ".join(foreign_key.get_quoted_foreign_columns(self)),
)
return sql |
def create_workspace(namespace, name, authorizationDomain="", attributes=None):
"""Create a new FireCloud Workspace.
Args:
namespace (str): project to which workspace belongs
name (str): Workspace name
protected (bool): If True, this workspace is protected by dbGaP
credentials. This option is only available if your FireCloud
account is linked to your NIH account.
attributes (dict): Workspace attributes as key value pairs
Swagger:
https://api.firecloud.org/#!/Workspaces/createWorkspace
"""
if not attributes:
attributes = dict()
body = {
"namespace": namespace,
"name": name,
"attributes": attributes
}
if authorizationDomain:
authDomain = [{"membersGroupName": authorizationDomain}]
else:
authDomain = []
body["authorizationDomain"] = authDomain
return __post("workspaces", json=body) | Create a new FireCloud Workspace.
Args:
namespace (str): project to which workspace belongs
name (str): Workspace name
protected (bool): If True, this workspace is protected by dbGaP
credentials. This option is only available if your FireCloud
account is linked to your NIH account.
attributes (dict): Workspace attributes as key value pairs
Swagger:
https://api.firecloud.org/#!/Workspaces/createWorkspace | Below is the the instruction that describes the task:
### Input:
Create a new FireCloud Workspace.
Args:
namespace (str): project to which workspace belongs
name (str): Workspace name
protected (bool): If True, this workspace is protected by dbGaP
credentials. This option is only available if your FireCloud
account is linked to your NIH account.
attributes (dict): Workspace attributes as key value pairs
Swagger:
https://api.firecloud.org/#!/Workspaces/createWorkspace
### Response:
def create_workspace(namespace, name, authorizationDomain="", attributes=None):
"""Create a new FireCloud Workspace.
Args:
namespace (str): project to which workspace belongs
name (str): Workspace name
protected (bool): If True, this workspace is protected by dbGaP
credentials. This option is only available if your FireCloud
account is linked to your NIH account.
attributes (dict): Workspace attributes as key value pairs
Swagger:
https://api.firecloud.org/#!/Workspaces/createWorkspace
"""
if not attributes:
attributes = dict()
body = {
"namespace": namespace,
"name": name,
"attributes": attributes
}
if authorizationDomain:
authDomain = [{"membersGroupName": authorizationDomain}]
else:
authDomain = []
body["authorizationDomain"] = authDomain
return __post("workspaces", json=body) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.