code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def show_toast(self, msg, long=True):
""" Show a toast message for the given duration.
This is an android specific api.
Parameters
-----------
msg: str
Text to display in the toast message
long: bool
Display for a long or short (system defined) duration
"""
from .android_toast import Toast
def on_toast(ref):
t = Toast(__id__=ref)
t.show()
Toast.makeText(self, msg, 1 if long else 0).then(on_toast) | Show a toast message for the given duration.
This is an android specific api.
Parameters
-----------
msg: str
Text to display in the toast message
long: bool
Display for a long or short (system defined) duration | Below is the the instruction that describes the task:
### Input:
Show a toast message for the given duration.
This is an android specific api.
Parameters
-----------
msg: str
Text to display in the toast message
long: bool
Display for a long or short (system defined) duration
### Response:
def show_toast(self, msg, long=True):
""" Show a toast message for the given duration.
This is an android specific api.
Parameters
-----------
msg: str
Text to display in the toast message
long: bool
Display for a long or short (system defined) duration
"""
from .android_toast import Toast
def on_toast(ref):
t = Toast(__id__=ref)
t.show()
Toast.makeText(self, msg, 1 if long else 0).then(on_toast) |
def set_fallback(self, target):
"""Sets a fallback configuration for section.
Re-exec uWSGI with the specified config when exit code is 1.
:param str|unicode|Section target: File path or Section to include.
"""
if isinstance(target, Section):
target = ':' + target.name
self._set('fallback-config', target)
return self | Sets a fallback configuration for section.
Re-exec uWSGI with the specified config when exit code is 1.
:param str|unicode|Section target: File path or Section to include. | Below is the the instruction that describes the task:
### Input:
Sets a fallback configuration for section.
Re-exec uWSGI with the specified config when exit code is 1.
:param str|unicode|Section target: File path or Section to include.
### Response:
def set_fallback(self, target):
"""Sets a fallback configuration for section.
Re-exec uWSGI with the specified config when exit code is 1.
:param str|unicode|Section target: File path or Section to include.
"""
if isinstance(target, Section):
target = ':' + target.name
self._set('fallback-config', target)
return self |
def _init_all_stages(self, config):
'''Create stages that are used for the pipeline.
:param dict config: `streamcorpus_pipeline` configuration
:return: tuple of (reader, incremental transforms, batch
transforms, post-batch incremental transforms, writers,
temporary directory)
'''
reader = self._init_stage(config, 'reader')
incremental_transforms = self._init_stages(
config, 'incremental_transforms')
batch_transforms = self._init_stages(config, 'batch_transforms')
post_batch_incremental_transforms = self._init_stages(
config, 'post_batch_incremental_transforms')
writers = self._init_stages(config, 'writers')
tmp_dir_path = os.path.join(config['tmp_dir_path'],
self.tmp_dir_suffix)
return (reader, incremental_transforms, batch_transforms,
post_batch_incremental_transforms, writers, tmp_dir_path) | Create stages that are used for the pipeline.
:param dict config: `streamcorpus_pipeline` configuration
:return: tuple of (reader, incremental transforms, batch
transforms, post-batch incremental transforms, writers,
temporary directory) | Below is the the instruction that describes the task:
### Input:
Create stages that are used for the pipeline.
:param dict config: `streamcorpus_pipeline` configuration
:return: tuple of (reader, incremental transforms, batch
transforms, post-batch incremental transforms, writers,
temporary directory)
### Response:
def _init_all_stages(self, config):
'''Create stages that are used for the pipeline.
:param dict config: `streamcorpus_pipeline` configuration
:return: tuple of (reader, incremental transforms, batch
transforms, post-batch incremental transforms, writers,
temporary directory)
'''
reader = self._init_stage(config, 'reader')
incremental_transforms = self._init_stages(
config, 'incremental_transforms')
batch_transforms = self._init_stages(config, 'batch_transforms')
post_batch_incremental_transforms = self._init_stages(
config, 'post_batch_incremental_transforms')
writers = self._init_stages(config, 'writers')
tmp_dir_path = os.path.join(config['tmp_dir_path'],
self.tmp_dir_suffix)
return (reader, incremental_transforms, batch_transforms,
post_batch_incremental_transforms, writers, tmp_dir_path) |
def absolute_magnitude(self, richness=1, steps=1e4):
"""
Calculate the absolute visual magnitude (Mv) from the richness
by transforming the isochrone in the SDSS system and using the
g,r -> V transform equations from Jester 2005
[astro-ph/0506022].
Parameters:
-----------
richness : isochrone normalization parameter
steps : number of isochrone sampling steps
Returns:
--------
abs_mag : Absolute magnitude (Mv)
"""
# Using the SDSS g,r -> V from Jester 2005 [astro-ph/0506022]
# for stars with R-I < 1.15
# V = g_sdss - 0.59*(g_sdss - r_sdss) - 0.01
# Create a copy of the isochrone in the SDSS system
params = {k:v.value for k,v in self._params.items()}
params.update(band_1='g',band_2='r',survey='sdss')
iso = self.__class__(**params)
# g, r are absolute magnitude
mass_init, mass_pdf, mass_act, sdss_g, sdss_r = iso.sample(mass_steps=steps)
V = jester_mag_v(sdss_g,sdss_r)
# Sum the V-band absolute magnitudes
return sum_mags(V,weights=mass_pdf*richness) | Calculate the absolute visual magnitude (Mv) from the richness
by transforming the isochrone in the SDSS system and using the
g,r -> V transform equations from Jester 2005
[astro-ph/0506022].
Parameters:
-----------
richness : isochrone normalization parameter
steps : number of isochrone sampling steps
Returns:
--------
abs_mag : Absolute magnitude (Mv) | Below is the the instruction that describes the task:
### Input:
Calculate the absolute visual magnitude (Mv) from the richness
by transforming the isochrone in the SDSS system and using the
g,r -> V transform equations from Jester 2005
[astro-ph/0506022].
Parameters:
-----------
richness : isochrone normalization parameter
steps : number of isochrone sampling steps
Returns:
--------
abs_mag : Absolute magnitude (Mv)
### Response:
def absolute_magnitude(self, richness=1, steps=1e4):
"""
Calculate the absolute visual magnitude (Mv) from the richness
by transforming the isochrone in the SDSS system and using the
g,r -> V transform equations from Jester 2005
[astro-ph/0506022].
Parameters:
-----------
richness : isochrone normalization parameter
steps : number of isochrone sampling steps
Returns:
--------
abs_mag : Absolute magnitude (Mv)
"""
# Using the SDSS g,r -> V from Jester 2005 [astro-ph/0506022]
# for stars with R-I < 1.15
# V = g_sdss - 0.59*(g_sdss - r_sdss) - 0.01
# Create a copy of the isochrone in the SDSS system
params = {k:v.value for k,v in self._params.items()}
params.update(band_1='g',band_2='r',survey='sdss')
iso = self.__class__(**params)
# g, r are absolute magnitude
mass_init, mass_pdf, mass_act, sdss_g, sdss_r = iso.sample(mass_steps=steps)
V = jester_mag_v(sdss_g,sdss_r)
# Sum the V-band absolute magnitudes
return sum_mags(V,weights=mass_pdf*richness) |
def write_const_tpl(name, tpl_file, suffix, zn_array=None, shape=None, spatial_reference=None,
longnames=False):
""" write a constant (uniform) template file
Parameters
----------
name : str
the base parameter name
tpl_file : str
the template file to write - include path
zn_array : numpy.ndarray
an array used to skip inactive cells
Returns
-------
df : pandas.DataFrame
a dataframe with parameter information
"""
if shape is None and zn_array is None:
raise Exception("must pass either zn_array or shape")
elif shape is None:
shape = zn_array.shape
parnme = []
with open(tpl_file, 'w') as f:
f.write("ptf ~\n")
for i in range(shape[0]):
for j in range(shape[1]):
if zn_array is not None and zn_array[i, j] < 1:
pname = " 1.0 "
else:
if longnames:
pname = "const_{0}_{1}".format(name,suffix)
else:
pname = "{0}{1}".format(name, suffix)
if len(pname) > 12:
raise("zone pname too long:{0}". \
format(pname))
parnme.append(pname)
pname = " ~ {0} ~".format(pname)
f.write(pname)
f.write("\n")
df = pd.DataFrame({"parnme": parnme}, index=parnme)
# df.loc[:,"pargp"] = "{0}{1}".format(self.cn_suffixname)
df.loc[:, "pargp"] = "{0}_{1}".format(name, suffix.replace('_', ''))
df.loc[:, "tpl"] = tpl_file
return df | write a constant (uniform) template file
Parameters
----------
name : str
the base parameter name
tpl_file : str
the template file to write - include path
zn_array : numpy.ndarray
an array used to skip inactive cells
Returns
-------
df : pandas.DataFrame
a dataframe with parameter information | Below is the the instruction that describes the task:
### Input:
write a constant (uniform) template file
Parameters
----------
name : str
the base parameter name
tpl_file : str
the template file to write - include path
zn_array : numpy.ndarray
an array used to skip inactive cells
Returns
-------
df : pandas.DataFrame
a dataframe with parameter information
### Response:
def write_const_tpl(name, tpl_file, suffix, zn_array=None, shape=None, spatial_reference=None,
longnames=False):
""" write a constant (uniform) template file
Parameters
----------
name : str
the base parameter name
tpl_file : str
the template file to write - include path
zn_array : numpy.ndarray
an array used to skip inactive cells
Returns
-------
df : pandas.DataFrame
a dataframe with parameter information
"""
if shape is None and zn_array is None:
raise Exception("must pass either zn_array or shape")
elif shape is None:
shape = zn_array.shape
parnme = []
with open(tpl_file, 'w') as f:
f.write("ptf ~\n")
for i in range(shape[0]):
for j in range(shape[1]):
if zn_array is not None and zn_array[i, j] < 1:
pname = " 1.0 "
else:
if longnames:
pname = "const_{0}_{1}".format(name,suffix)
else:
pname = "{0}{1}".format(name, suffix)
if len(pname) > 12:
raise("zone pname too long:{0}". \
format(pname))
parnme.append(pname)
pname = " ~ {0} ~".format(pname)
f.write(pname)
f.write("\n")
df = pd.DataFrame({"parnme": parnme}, index=parnme)
# df.loc[:,"pargp"] = "{0}{1}".format(self.cn_suffixname)
df.loc[:, "pargp"] = "{0}_{1}".format(name, suffix.replace('_', ''))
df.loc[:, "tpl"] = tpl_file
return df |
def _calculate_column_widths(self):
"""Calculate width of column automatically based on data."""
table_width = self.get_table_width()
lpw, rpw = self._left_padding_widths, self._right_padding_widths
pad_widths = [(lpw[i] + rpw[i]) for i in range(self._column_count)]
max_widths = [0 for index in range(self._column_count)]
offset = table_width - sum(self._column_widths) + sum(pad_widths)
self._max_table_width = max(self._max_table_width,
offset + self._column_count)
for index, column in enumerate(zip(*self._table)):
max_length = 0
for i in column:
for j in to_unicode(i).split('\n'):
output_str = get_output_str(j, self.detect_numerics,
self.numeric_precision,
self.sign_mode.value)
max_length = max(max_length, termwidth(output_str))
for i in to_unicode(self._column_headers[index]).split('\n'):
output_str = get_output_str(i, self.detect_numerics,
self.numeric_precision,
self.sign_mode.value)
max_length = max(max_length, termwidth(output_str))
max_widths[index] += max_length
sum_ = sum(max_widths)
desired_sum = self._max_table_width - offset
# Set flag for columns who are within their fair share
temp_sum = 0
flag = [0] * len(max_widths)
for i, width in enumerate(max_widths):
if width <= int(desired_sum / self._column_count):
temp_sum += width
flag[i] = 1
else:
# Allocate atleast 1 character width to the column
temp_sum += 1
avail_space = desired_sum - temp_sum
actual_space = sum_ - temp_sum
shrinked_columns = {}
# Columns which exceed their fair share should be shrinked based on
# how much space is left for the table
for i, width in enumerate(max_widths):
self.column_widths[i] = width
if not flag[i]:
new_width = 1 + int((width-1) * avail_space / actual_space)
if new_width < width:
self.column_widths[i] = new_width
shrinked_columns[new_width] = i
# Divide any remaining space among shrinked columns
if shrinked_columns:
extra = (self._max_table_width
- offset
- sum(self.column_widths))
actual_space = sum(shrinked_columns)
if extra > 0:
for i, width in enumerate(sorted(shrinked_columns)):
index = shrinked_columns[width]
extra_width = int(width * extra / actual_space)
self.column_widths[i] += extra_width
if i == (len(shrinked_columns) - 1):
extra = (self._max_table_width
- offset
- sum(self.column_widths))
self.column_widths[index] += extra
for i in range(self.column_count):
self.column_widths[i] += pad_widths[i] | Calculate width of column automatically based on data. | Below is the the instruction that describes the task:
### Input:
Calculate width of column automatically based on data.
### Response:
def _calculate_column_widths(self):
"""Calculate width of column automatically based on data."""
table_width = self.get_table_width()
lpw, rpw = self._left_padding_widths, self._right_padding_widths
pad_widths = [(lpw[i] + rpw[i]) for i in range(self._column_count)]
max_widths = [0 for index in range(self._column_count)]
offset = table_width - sum(self._column_widths) + sum(pad_widths)
self._max_table_width = max(self._max_table_width,
offset + self._column_count)
for index, column in enumerate(zip(*self._table)):
max_length = 0
for i in column:
for j in to_unicode(i).split('\n'):
output_str = get_output_str(j, self.detect_numerics,
self.numeric_precision,
self.sign_mode.value)
max_length = max(max_length, termwidth(output_str))
for i in to_unicode(self._column_headers[index]).split('\n'):
output_str = get_output_str(i, self.detect_numerics,
self.numeric_precision,
self.sign_mode.value)
max_length = max(max_length, termwidth(output_str))
max_widths[index] += max_length
sum_ = sum(max_widths)
desired_sum = self._max_table_width - offset
# Set flag for columns who are within their fair share
temp_sum = 0
flag = [0] * len(max_widths)
for i, width in enumerate(max_widths):
if width <= int(desired_sum / self._column_count):
temp_sum += width
flag[i] = 1
else:
# Allocate atleast 1 character width to the column
temp_sum += 1
avail_space = desired_sum - temp_sum
actual_space = sum_ - temp_sum
shrinked_columns = {}
# Columns which exceed their fair share should be shrinked based on
# how much space is left for the table
for i, width in enumerate(max_widths):
self.column_widths[i] = width
if not flag[i]:
new_width = 1 + int((width-1) * avail_space / actual_space)
if new_width < width:
self.column_widths[i] = new_width
shrinked_columns[new_width] = i
# Divide any remaining space among shrinked columns
if shrinked_columns:
extra = (self._max_table_width
- offset
- sum(self.column_widths))
actual_space = sum(shrinked_columns)
if extra > 0:
for i, width in enumerate(sorted(shrinked_columns)):
index = shrinked_columns[width]
extra_width = int(width * extra / actual_space)
self.column_widths[i] += extra_width
if i == (len(shrinked_columns) - 1):
extra = (self._max_table_width
- offset
- sum(self.column_widths))
self.column_widths[index] += extra
for i in range(self.column_count):
self.column_widths[i] += pad_widths[i] |
def fill_gaps(list_dicts):
"""
Fill gaps in a list of dictionaries. Add empty keys to dictionaries in
the list that don't contain other entries' keys
:param list_dicts: A list of dictionaries
:return: A list of field names, a list of dictionaries with identical keys
"""
field_names = [] # != set bc. preserving order is better for output
for datum in list_dicts:
for key in datum.keys():
if key not in field_names:
field_names.append(key)
for datum in list_dicts:
for key in field_names:
if key not in datum:
datum[key] = ''
return list(field_names), list_dicts | Fill gaps in a list of dictionaries. Add empty keys to dictionaries in
the list that don't contain other entries' keys
:param list_dicts: A list of dictionaries
:return: A list of field names, a list of dictionaries with identical keys | Below is the the instruction that describes the task:
### Input:
Fill gaps in a list of dictionaries. Add empty keys to dictionaries in
the list that don't contain other entries' keys
:param list_dicts: A list of dictionaries
:return: A list of field names, a list of dictionaries with identical keys
### Response:
def fill_gaps(list_dicts):
"""
Fill gaps in a list of dictionaries. Add empty keys to dictionaries in
the list that don't contain other entries' keys
:param list_dicts: A list of dictionaries
:return: A list of field names, a list of dictionaries with identical keys
"""
field_names = [] # != set bc. preserving order is better for output
for datum in list_dicts:
for key in datum.keys():
if key not in field_names:
field_names.append(key)
for datum in list_dicts:
for key in field_names:
if key not in datum:
datum[key] = ''
return list(field_names), list_dicts |
def set_field(self, field_name, data):
"""Set property into the Dataset.
Parameters
----------
field_name : string
The field name of the information.
data : list, numpy 1-D array, pandas Series or None
The array of data to be set.
Returns
-------
self : Dataset
Dataset with set property.
"""
if self.handle is None:
raise Exception("Cannot set %s before construct dataset" % field_name)
if data is None:
# set to None
_safe_call(_LIB.LGBM_DatasetSetField(
self.handle,
c_str(field_name),
None,
ctypes.c_int(0),
ctypes.c_int(FIELD_TYPE_MAPPER[field_name])))
return self
dtype = np.float32
if field_name == 'group':
dtype = np.int32
elif field_name == 'init_score':
dtype = np.float64
data = list_to_1d_numpy(data, dtype, name=field_name)
if data.dtype == np.float32 or data.dtype == np.float64:
ptr_data, type_data, _ = c_float_array(data)
elif data.dtype == np.int32:
ptr_data, type_data, _ = c_int_array(data)
else:
raise TypeError("Excepted np.float32/64 or np.int32, meet type({})".format(data.dtype))
if type_data != FIELD_TYPE_MAPPER[field_name]:
raise TypeError("Input type error for set_field")
_safe_call(_LIB.LGBM_DatasetSetField(
self.handle,
c_str(field_name),
ptr_data,
ctypes.c_int(len(data)),
ctypes.c_int(type_data)))
return self | Set property into the Dataset.
Parameters
----------
field_name : string
The field name of the information.
data : list, numpy 1-D array, pandas Series or None
The array of data to be set.
Returns
-------
self : Dataset
Dataset with set property. | Below is the the instruction that describes the task:
### Input:
Set property into the Dataset.
Parameters
----------
field_name : string
The field name of the information.
data : list, numpy 1-D array, pandas Series or None
The array of data to be set.
Returns
-------
self : Dataset
Dataset with set property.
### Response:
def set_field(self, field_name, data):
"""Set property into the Dataset.
Parameters
----------
field_name : string
The field name of the information.
data : list, numpy 1-D array, pandas Series or None
The array of data to be set.
Returns
-------
self : Dataset
Dataset with set property.
"""
if self.handle is None:
raise Exception("Cannot set %s before construct dataset" % field_name)
if data is None:
# set to None
_safe_call(_LIB.LGBM_DatasetSetField(
self.handle,
c_str(field_name),
None,
ctypes.c_int(0),
ctypes.c_int(FIELD_TYPE_MAPPER[field_name])))
return self
dtype = np.float32
if field_name == 'group':
dtype = np.int32
elif field_name == 'init_score':
dtype = np.float64
data = list_to_1d_numpy(data, dtype, name=field_name)
if data.dtype == np.float32 or data.dtype == np.float64:
ptr_data, type_data, _ = c_float_array(data)
elif data.dtype == np.int32:
ptr_data, type_data, _ = c_int_array(data)
else:
raise TypeError("Excepted np.float32/64 or np.int32, meet type({})".format(data.dtype))
if type_data != FIELD_TYPE_MAPPER[field_name]:
raise TypeError("Input type error for set_field")
_safe_call(_LIB.LGBM_DatasetSetField(
self.handle,
c_str(field_name),
ptr_data,
ctypes.c_int(len(data)),
ctypes.c_int(type_data)))
return self |
def _schemaPrepareInsert(self, store):
"""
Prepare each attribute in my schema for insertion into a given store,
either by upgrade or by creation. This makes sure all references point
to this store and all relative paths point to this store's files
directory.
"""
for name, atr in self.getSchema():
atr.prepareInsert(self, store) | Prepare each attribute in my schema for insertion into a given store,
either by upgrade or by creation. This makes sure all references point
to this store and all relative paths point to this store's files
directory. | Below is the the instruction that describes the task:
### Input:
Prepare each attribute in my schema for insertion into a given store,
either by upgrade or by creation. This makes sure all references point
to this store and all relative paths point to this store's files
directory.
### Response:
def _schemaPrepareInsert(self, store):
"""
Prepare each attribute in my schema for insertion into a given store,
either by upgrade or by creation. This makes sure all references point
to this store and all relative paths point to this store's files
directory.
"""
for name, atr in self.getSchema():
atr.prepareInsert(self, store) |
def AddFilesWithUnknownHashes(
client_path_blob_refs,
use_external_stores = True
):
"""Adds new files consisting of given blob references.
Args:
client_path_blob_refs: A dictionary mapping `db.ClientPath` instances to
lists of blob references.
use_external_stores: A flag indicating if the files should also be added to
external file stores.
Returns:
A dictionary mapping `db.ClientPath` to hash ids of the file.
Raises:
BlobNotFoundError: If one of the referenced blobs cannot be found.
"""
hash_id_blob_refs = dict()
client_path_hash_id = dict()
metadatas = dict()
all_client_path_blob_refs = list()
for client_path, blob_refs in iteritems(client_path_blob_refs):
# In the special case where there is only one blob, we don't need to go to
# the data store to read said blob and rehash it, we have all that
# information already available. For empty files without blobs, we can just
# hash the empty string instead.
if len(blob_refs) <= 1:
if blob_refs:
hash_id = rdf_objects.SHA256HashID.FromBytes(
blob_refs[0].blob_id.AsBytes())
else:
hash_id = rdf_objects.SHA256HashID.FromData(b"")
client_path_hash_id[client_path] = hash_id
hash_id_blob_refs[hash_id] = blob_refs
metadatas[hash_id] = FileMetadata(
client_path=client_path, blob_refs=blob_refs)
else:
for blob_ref in blob_refs:
all_client_path_blob_refs.append((client_path, blob_ref))
client_path_offset = collections.defaultdict(lambda: 0)
client_path_sha256 = collections.defaultdict(hashlib.sha256)
verified_client_path_blob_refs = collections.defaultdict(list)
client_path_blob_ref_batches = collection.Batch(
items=all_client_path_blob_refs, size=_BLOBS_READ_BATCH_SIZE)
for client_path_blob_ref_batch in client_path_blob_ref_batches:
blob_id_batch = set(
blob_ref.blob_id for _, blob_ref in client_path_blob_ref_batch)
blobs = data_store.BLOBS.ReadBlobs(blob_id_batch)
for client_path, blob_ref in client_path_blob_ref_batch:
blob = blobs[blob_ref.blob_id]
if blob is None:
message = "Could not find one of referenced blobs: {}".format(
blob_ref.blob_id)
raise BlobNotFoundError(message)
offset = client_path_offset[client_path]
if blob_ref.size != len(blob):
raise ValueError(
"Got conflicting size information for blob %s: %d vs %d." %
(blob_ref.blob_id, blob_ref.size, len(blob)))
if blob_ref.offset != offset:
raise ValueError(
"Got conflicting offset information for blob %s: %d vs %d." %
(blob_ref.blob_id, blob_ref.offset, offset))
verified_client_path_blob_refs[client_path].append(blob_ref)
client_path_offset[client_path] = offset + len(blob)
client_path_sha256[client_path].update(blob)
for client_path in iterkeys(client_path_sha256):
sha256 = client_path_sha256[client_path].digest()
hash_id = rdf_objects.SHA256HashID.FromBytes(sha256)
client_path_hash_id[client_path] = hash_id
hash_id_blob_refs[hash_id] = verified_client_path_blob_refs[client_path]
data_store.REL_DB.WriteHashBlobReferences(hash_id_blob_refs)
if use_external_stores:
for client_path in iterkeys(verified_client_path_blob_refs):
metadatas[client_path_hash_id[client_path]] = FileMetadata(
client_path=client_path,
blob_refs=verified_client_path_blob_refs[client_path])
EXTERNAL_FILE_STORE.AddFiles(metadatas)
return client_path_hash_id | Adds new files consisting of given blob references.
Args:
client_path_blob_refs: A dictionary mapping `db.ClientPath` instances to
lists of blob references.
use_external_stores: A flag indicating if the files should also be added to
external file stores.
Returns:
A dictionary mapping `db.ClientPath` to hash ids of the file.
Raises:
BlobNotFoundError: If one of the referenced blobs cannot be found. | Below is the the instruction that describes the task:
### Input:
Adds new files consisting of given blob references.
Args:
client_path_blob_refs: A dictionary mapping `db.ClientPath` instances to
lists of blob references.
use_external_stores: A flag indicating if the files should also be added to
external file stores.
Returns:
A dictionary mapping `db.ClientPath` to hash ids of the file.
Raises:
BlobNotFoundError: If one of the referenced blobs cannot be found.
### Response:
def AddFilesWithUnknownHashes(
client_path_blob_refs,
use_external_stores = True
):
"""Adds new files consisting of given blob references.
Args:
client_path_blob_refs: A dictionary mapping `db.ClientPath` instances to
lists of blob references.
use_external_stores: A flag indicating if the files should also be added to
external file stores.
Returns:
A dictionary mapping `db.ClientPath` to hash ids of the file.
Raises:
BlobNotFoundError: If one of the referenced blobs cannot be found.
"""
hash_id_blob_refs = dict()
client_path_hash_id = dict()
metadatas = dict()
all_client_path_blob_refs = list()
for client_path, blob_refs in iteritems(client_path_blob_refs):
# In the special case where there is only one blob, we don't need to go to
# the data store to read said blob and rehash it, we have all that
# information already available. For empty files without blobs, we can just
# hash the empty string instead.
if len(blob_refs) <= 1:
if blob_refs:
hash_id = rdf_objects.SHA256HashID.FromBytes(
blob_refs[0].blob_id.AsBytes())
else:
hash_id = rdf_objects.SHA256HashID.FromData(b"")
client_path_hash_id[client_path] = hash_id
hash_id_blob_refs[hash_id] = blob_refs
metadatas[hash_id] = FileMetadata(
client_path=client_path, blob_refs=blob_refs)
else:
for blob_ref in blob_refs:
all_client_path_blob_refs.append((client_path, blob_ref))
client_path_offset = collections.defaultdict(lambda: 0)
client_path_sha256 = collections.defaultdict(hashlib.sha256)
verified_client_path_blob_refs = collections.defaultdict(list)
client_path_blob_ref_batches = collection.Batch(
items=all_client_path_blob_refs, size=_BLOBS_READ_BATCH_SIZE)
for client_path_blob_ref_batch in client_path_blob_ref_batches:
blob_id_batch = set(
blob_ref.blob_id for _, blob_ref in client_path_blob_ref_batch)
blobs = data_store.BLOBS.ReadBlobs(blob_id_batch)
for client_path, blob_ref in client_path_blob_ref_batch:
blob = blobs[blob_ref.blob_id]
if blob is None:
message = "Could not find one of referenced blobs: {}".format(
blob_ref.blob_id)
raise BlobNotFoundError(message)
offset = client_path_offset[client_path]
if blob_ref.size != len(blob):
raise ValueError(
"Got conflicting size information for blob %s: %d vs %d." %
(blob_ref.blob_id, blob_ref.size, len(blob)))
if blob_ref.offset != offset:
raise ValueError(
"Got conflicting offset information for blob %s: %d vs %d." %
(blob_ref.blob_id, blob_ref.offset, offset))
verified_client_path_blob_refs[client_path].append(blob_ref)
client_path_offset[client_path] = offset + len(blob)
client_path_sha256[client_path].update(blob)
for client_path in iterkeys(client_path_sha256):
sha256 = client_path_sha256[client_path].digest()
hash_id = rdf_objects.SHA256HashID.FromBytes(sha256)
client_path_hash_id[client_path] = hash_id
hash_id_blob_refs[hash_id] = verified_client_path_blob_refs[client_path]
data_store.REL_DB.WriteHashBlobReferences(hash_id_blob_refs)
if use_external_stores:
for client_path in iterkeys(verified_client_path_blob_refs):
metadatas[client_path_hash_id[client_path]] = FileMetadata(
client_path=client_path,
blob_refs=verified_client_path_blob_refs[client_path])
EXTERNAL_FILE_STORE.AddFiles(metadatas)
return client_path_hash_id |
def setup_psd_pregenerated(workflow, tags=None):
'''
Setup CBC workflow to use pregenerated psd files.
The file given in cp.get('workflow','pregenerated-psd-file-(ifo)') will
be used as the --psd-file argument to geom_nonspinbank, geom_aligned_bank
and pycbc_plot_psd_file.
Parameters
----------
workflow: pycbc.workflow.core.Workflow
An instanced class that manages the constructed workflow.
tags : list of strings
If given these tags are used to uniquely name and identify output files
that would be produced in multiple calls to this function.
Returns
--------
psd_files : pycbc.workflow.core.FileList
The FileList holding the gating files
'''
if tags is None:
tags = []
psd_files = FileList([])
cp = workflow.cp
global_seg = workflow.analysis_time
user_tag = "PREGEN_PSD"
# Check for one psd for all ifos
try:
pre_gen_file = cp.get_opt_tags('workflow-psd',
'psd-pregenerated-file', tags)
pre_gen_file = resolve_url(pre_gen_file)
file_url = urlparse.urljoin('file:',
urllib.pathname2url(pre_gen_file))
curr_file = File(workflow.ifos, user_tag, global_seg, file_url,
tags=tags)
curr_file.PFN(file_url, site='local')
psd_files.append(curr_file)
except ConfigParser.Error:
# Check for one psd per ifo
for ifo in workflow.ifos:
try:
pre_gen_file = cp.get_opt_tags('workflow-psd',
'psd-pregenerated-file-%s' % ifo.lower(),
tags)
pre_gen_file = resolve_url(pre_gen_file)
file_url = urlparse.urljoin('file:',
urllib.pathname2url(pre_gen_file))
curr_file = File(ifo, user_tag, global_seg, file_url,
tags=tags)
curr_file.PFN(file_url, site='local')
psd_files.append(curr_file)
except ConfigParser.Error:
# It's unlikely, but not impossible, that only some ifos
# will have pregenerated PSDs
logging.warn("No psd file specified for IFO %s." % (ifo,))
pass
return psd_files | Setup CBC workflow to use pregenerated psd files.
The file given in cp.get('workflow','pregenerated-psd-file-(ifo)') will
be used as the --psd-file argument to geom_nonspinbank, geom_aligned_bank
and pycbc_plot_psd_file.
Parameters
----------
workflow: pycbc.workflow.core.Workflow
An instanced class that manages the constructed workflow.
tags : list of strings
If given these tags are used to uniquely name and identify output files
that would be produced in multiple calls to this function.
Returns
--------
psd_files : pycbc.workflow.core.FileList
The FileList holding the gating files | Below is the the instruction that describes the task:
### Input:
Setup CBC workflow to use pregenerated psd files.
The file given in cp.get('workflow','pregenerated-psd-file-(ifo)') will
be used as the --psd-file argument to geom_nonspinbank, geom_aligned_bank
and pycbc_plot_psd_file.
Parameters
----------
workflow: pycbc.workflow.core.Workflow
An instanced class that manages the constructed workflow.
tags : list of strings
If given these tags are used to uniquely name and identify output files
that would be produced in multiple calls to this function.
Returns
--------
psd_files : pycbc.workflow.core.FileList
The FileList holding the gating files
### Response:
def setup_psd_pregenerated(workflow, tags=None):
'''
Setup CBC workflow to use pregenerated psd files.
The file given in cp.get('workflow','pregenerated-psd-file-(ifo)') will
be used as the --psd-file argument to geom_nonspinbank, geom_aligned_bank
and pycbc_plot_psd_file.
Parameters
----------
workflow: pycbc.workflow.core.Workflow
An instanced class that manages the constructed workflow.
tags : list of strings
If given these tags are used to uniquely name and identify output files
that would be produced in multiple calls to this function.
Returns
--------
psd_files : pycbc.workflow.core.FileList
The FileList holding the gating files
'''
if tags is None:
tags = []
psd_files = FileList([])
cp = workflow.cp
global_seg = workflow.analysis_time
user_tag = "PREGEN_PSD"
# Check for one psd for all ifos
try:
pre_gen_file = cp.get_opt_tags('workflow-psd',
'psd-pregenerated-file', tags)
pre_gen_file = resolve_url(pre_gen_file)
file_url = urlparse.urljoin('file:',
urllib.pathname2url(pre_gen_file))
curr_file = File(workflow.ifos, user_tag, global_seg, file_url,
tags=tags)
curr_file.PFN(file_url, site='local')
psd_files.append(curr_file)
except ConfigParser.Error:
# Check for one psd per ifo
for ifo in workflow.ifos:
try:
pre_gen_file = cp.get_opt_tags('workflow-psd',
'psd-pregenerated-file-%s' % ifo.lower(),
tags)
pre_gen_file = resolve_url(pre_gen_file)
file_url = urlparse.urljoin('file:',
urllib.pathname2url(pre_gen_file))
curr_file = File(ifo, user_tag, global_seg, file_url,
tags=tags)
curr_file.PFN(file_url, site='local')
psd_files.append(curr_file)
except ConfigParser.Error:
# It's unlikely, but not impossible, that only some ifos
# will have pregenerated PSDs
logging.warn("No psd file specified for IFO %s." % (ifo,))
pass
return psd_files |
def list_databases(self, like=None):
"""
List databases in the Clickhouse cluster.
Like the SHOW DATABASES command in the clickhouse-shell.
Parameters
----------
like : string, default None
e.g. 'foo*' to match all tables starting with 'foo'
Returns
-------
databases : list of strings
"""
statement = 'SELECT name FROM system.databases'
if like:
statement += " WHERE name LIKE '{0}'".format(like)
data, _, _ = self.raw_sql(statement, results=True)
return data[0] | List databases in the Clickhouse cluster.
Like the SHOW DATABASES command in the clickhouse-shell.
Parameters
----------
like : string, default None
e.g. 'foo*' to match all tables starting with 'foo'
Returns
-------
databases : list of strings | Below is the the instruction that describes the task:
### Input:
List databases in the Clickhouse cluster.
Like the SHOW DATABASES command in the clickhouse-shell.
Parameters
----------
like : string, default None
e.g. 'foo*' to match all tables starting with 'foo'
Returns
-------
databases : list of strings
### Response:
def list_databases(self, like=None):
"""
List databases in the Clickhouse cluster.
Like the SHOW DATABASES command in the clickhouse-shell.
Parameters
----------
like : string, default None
e.g. 'foo*' to match all tables starting with 'foo'
Returns
-------
databases : list of strings
"""
statement = 'SELECT name FROM system.databases'
if like:
statement += " WHERE name LIKE '{0}'".format(like)
data, _, _ = self.raw_sql(statement, results=True)
return data[0] |
def put(self, job, result):
"Perform a job by a member in the pool and return the result."
self.job.put(job)
r = result.get()
return r | Perform a job by a member in the pool and return the result. | Below is the the instruction that describes the task:
### Input:
Perform a job by a member in the pool and return the result.
### Response:
def put(self, job, result):
"Perform a job by a member in the pool and return the result."
self.job.put(job)
r = result.get()
return r |
def create():
"""Create a new post for the current user."""
if request.method == "POST":
title = request.form["title"]
body = request.form["body"]
error = None
if not title:
error = "Title is required."
if error is not None:
flash(error)
else:
db.session.add(Post(title=title, body=body, author=g.user))
db.session.commit()
return redirect(url_for("blog.index"))
return render_template("blog/create.html") | Create a new post for the current user. | Below is the the instruction that describes the task:
### Input:
Create a new post for the current user.
### Response:
def create():
"""Create a new post for the current user."""
if request.method == "POST":
title = request.form["title"]
body = request.form["body"]
error = None
if not title:
error = "Title is required."
if error is not None:
flash(error)
else:
db.session.add(Post(title=title, body=body, author=g.user))
db.session.commit()
return redirect(url_for("blog.index"))
return render_template("blog/create.html") |
def pop_context(self):
"""Pops the last set of keyword arguments provided to the processor."""
processor = getattr(self, 'processor', None)
if processor is not None:
pop_context = getattr(processor, 'pop_context', None)
if pop_context is None:
pop_context = getattr(processor, 'pop', None)
if pop_context is not None:
return pop_context()
if self._pop_next:
self._pop_next = False | Pops the last set of keyword arguments provided to the processor. | Below is the the instruction that describes the task:
### Input:
Pops the last set of keyword arguments provided to the processor.
### Response:
def pop_context(self):
"""Pops the last set of keyword arguments provided to the processor."""
processor = getattr(self, 'processor', None)
if processor is not None:
pop_context = getattr(processor, 'pop_context', None)
if pop_context is None:
pop_context = getattr(processor, 'pop', None)
if pop_context is not None:
return pop_context()
if self._pop_next:
self._pop_next = False |
def guess_content_type_and_encoding(path):
"""Guess the content type of a path, using ``mimetypes``.
Falls back to "application/binary" if no content type is found.
Args:
path (str): the path to guess the mimetype of
Returns:
str: the content type of the file
"""
for ext, content_type in _EXTENSION_TO_MIME_TYPE.items():
if path.endswith(ext):
return content_type
content_type, encoding = mimetypes.guess_type(path)
content_type = content_type or "application/binary"
return content_type, encoding | Guess the content type of a path, using ``mimetypes``.
Falls back to "application/binary" if no content type is found.
Args:
path (str): the path to guess the mimetype of
Returns:
str: the content type of the file | Below is the the instruction that describes the task:
### Input:
Guess the content type of a path, using ``mimetypes``.
Falls back to "application/binary" if no content type is found.
Args:
path (str): the path to guess the mimetype of
Returns:
str: the content type of the file
### Response:
def guess_content_type_and_encoding(path):
"""Guess the content type of a path, using ``mimetypes``.
Falls back to "application/binary" if no content type is found.
Args:
path (str): the path to guess the mimetype of
Returns:
str: the content type of the file
"""
for ext, content_type in _EXTENSION_TO_MIME_TYPE.items():
if path.endswith(ext):
return content_type
content_type, encoding = mimetypes.guess_type(path)
content_type = content_type or "application/binary"
return content_type, encoding |
def load_variable(self, var=None, start_date=None, end_date=None,
time_offset=None, grid_attrs=None, **DataAttrs):
"""Load a DataArray for requested variable and time range.
Automatically renames all grid attributes to match aospy conventions.
Parameters
----------
var : Var
aospy Var object
start_date : datetime.datetime
start date for interval
end_date : datetime.datetime
end date for interval
time_offset : dict
Option to add a time offset to the time coordinate to correct for
incorrect metadata.
grid_attrs : dict (optional)
Overriding dictionary of grid attributes mapping aospy internal
names to names of grid attributes used in a particular model.
**DataAttrs
Attributes needed to identify a unique set of files to load from
Returns
-------
da : DataArray
DataArray for the specified variable, date range, and interval in
"""
file_set = self._generate_file_set(var=var, start_date=start_date,
end_date=end_date, **DataAttrs)
ds = _load_data_from_disk(
file_set, self.preprocess_func, data_vars=self.data_vars,
coords=self.coords, start_date=start_date, end_date=end_date,
time_offset=time_offset, grid_attrs=grid_attrs, **DataAttrs
)
if var.def_time:
ds = _prep_time_data(ds)
start_date = times.maybe_convert_to_index_date_type(
ds.indexes[TIME_STR], start_date)
end_date = times.maybe_convert_to_index_date_type(
ds.indexes[TIME_STR], end_date)
ds = set_grid_attrs_as_coords(ds)
da = _sel_var(ds, var, self.upcast_float32)
if var.def_time:
da = self._maybe_apply_time_shift(da, time_offset, **DataAttrs)
return times.sel_time(da, start_date, end_date).load()
else:
return da.load() | Load a DataArray for requested variable and time range.
Automatically renames all grid attributes to match aospy conventions.
Parameters
----------
var : Var
aospy Var object
start_date : datetime.datetime
start date for interval
end_date : datetime.datetime
end date for interval
time_offset : dict
Option to add a time offset to the time coordinate to correct for
incorrect metadata.
grid_attrs : dict (optional)
Overriding dictionary of grid attributes mapping aospy internal
names to names of grid attributes used in a particular model.
**DataAttrs
Attributes needed to identify a unique set of files to load from
Returns
-------
da : DataArray
DataArray for the specified variable, date range, and interval in | Below is the the instruction that describes the task:
### Input:
Load a DataArray for requested variable and time range.
Automatically renames all grid attributes to match aospy conventions.
Parameters
----------
var : Var
aospy Var object
start_date : datetime.datetime
start date for interval
end_date : datetime.datetime
end date for interval
time_offset : dict
Option to add a time offset to the time coordinate to correct for
incorrect metadata.
grid_attrs : dict (optional)
Overriding dictionary of grid attributes mapping aospy internal
names to names of grid attributes used in a particular model.
**DataAttrs
Attributes needed to identify a unique set of files to load from
Returns
-------
da : DataArray
DataArray for the specified variable, date range, and interval in
### Response:
def load_variable(self, var=None, start_date=None, end_date=None,
time_offset=None, grid_attrs=None, **DataAttrs):
"""Load a DataArray for requested variable and time range.
Automatically renames all grid attributes to match aospy conventions.
Parameters
----------
var : Var
aospy Var object
start_date : datetime.datetime
start date for interval
end_date : datetime.datetime
end date for interval
time_offset : dict
Option to add a time offset to the time coordinate to correct for
incorrect metadata.
grid_attrs : dict (optional)
Overriding dictionary of grid attributes mapping aospy internal
names to names of grid attributes used in a particular model.
**DataAttrs
Attributes needed to identify a unique set of files to load from
Returns
-------
da : DataArray
DataArray for the specified variable, date range, and interval in
"""
file_set = self._generate_file_set(var=var, start_date=start_date,
end_date=end_date, **DataAttrs)
ds = _load_data_from_disk(
file_set, self.preprocess_func, data_vars=self.data_vars,
coords=self.coords, start_date=start_date, end_date=end_date,
time_offset=time_offset, grid_attrs=grid_attrs, **DataAttrs
)
if var.def_time:
ds = _prep_time_data(ds)
start_date = times.maybe_convert_to_index_date_type(
ds.indexes[TIME_STR], start_date)
end_date = times.maybe_convert_to_index_date_type(
ds.indexes[TIME_STR], end_date)
ds = set_grid_attrs_as_coords(ds)
da = _sel_var(ds, var, self.upcast_float32)
if var.def_time:
da = self._maybe_apply_time_shift(da, time_offset, **DataAttrs)
return times.sel_time(da, start_date, end_date).load()
else:
return da.load() |
def breakfast(self, message="Breakfast is ready", shout: bool = False):
"""Say something in the morning"""
return self.helper.output(message, shout) | Say something in the morning | Below is the the instruction that describes the task:
### Input:
Say something in the morning
### Response:
def breakfast(self, message="Breakfast is ready", shout: bool = False):
"""Say something in the morning"""
return self.helper.output(message, shout) |
def _index_range(self, version, symbol, date_range=None, **kwargs):
""" Given a version, read the segment_index and return the chunks associated
with the date_range. As the segment index is (id -> last datetime)
we need to take care in choosing the correct chunks. """
if date_range and 'segment_index' in version:
# index is read-only but it's never written to
index = np.frombuffer(decompress(version['segment_index']), dtype=INDEX_DTYPE)
dtcol = self._datetime64_index(index)
if dtcol and len(index):
dts = index[dtcol]
start, end = _start_end(date_range, dts)
if start > dts[-1]:
return -1, -1
idxstart = min(np.searchsorted(dts, start), len(dts) - 1)
idxend = min(np.searchsorted(dts, end, side='right'), len(dts) - 1)
return int(index['index'][idxstart]), int(index['index'][idxend] + 1)
return super(PandasStore, self)._index_range(version, symbol, **kwargs) | Given a version, read the segment_index and return the chunks associated
with the date_range. As the segment index is (id -> last datetime)
we need to take care in choosing the correct chunks. | Below is the the instruction that describes the task:
### Input:
Given a version, read the segment_index and return the chunks associated
with the date_range. As the segment index is (id -> last datetime)
we need to take care in choosing the correct chunks.
### Response:
def _index_range(self, version, symbol, date_range=None, **kwargs):
""" Given a version, read the segment_index and return the chunks associated
with the date_range. As the segment index is (id -> last datetime)
we need to take care in choosing the correct chunks. """
if date_range and 'segment_index' in version:
# index is read-only but it's never written to
index = np.frombuffer(decompress(version['segment_index']), dtype=INDEX_DTYPE)
dtcol = self._datetime64_index(index)
if dtcol and len(index):
dts = index[dtcol]
start, end = _start_end(date_range, dts)
if start > dts[-1]:
return -1, -1
idxstart = min(np.searchsorted(dts, start), len(dts) - 1)
idxend = min(np.searchsorted(dts, end, side='right'), len(dts) - 1)
return int(index['index'][idxstart]), int(index['index'][idxend] + 1)
return super(PandasStore, self)._index_range(version, symbol, **kwargs) |
def render_template(self, plain, rich = None, **context):
'''Render the body of the message from a template. The plain
body will be rendered from a template named ``plain`` or
``plain + '.txt'`` (in that order of preference). The rich
body will be rendered from ``rich`` if given, or else from
``plain + '.html'``. If neither exists, then the message will
have no rich body.'''
self.plain = render_template([plain, plain + '.txt'], **context)
if rich is not None:
self.rich = render_template(rich, **context)
else:
try:
self.rich = render_template(plain + '.html', **context)
except TemplateNotFound:
pass | Render the body of the message from a template. The plain
body will be rendered from a template named ``plain`` or
``plain + '.txt'`` (in that order of preference). The rich
body will be rendered from ``rich`` if given, or else from
``plain + '.html'``. If neither exists, then the message will
have no rich body. | Below is the the instruction that describes the task:
### Input:
Render the body of the message from a template. The plain
body will be rendered from a template named ``plain`` or
``plain + '.txt'`` (in that order of preference). The rich
body will be rendered from ``rich`` if given, or else from
``plain + '.html'``. If neither exists, then the message will
have no rich body.
### Response:
def render_template(self, plain, rich = None, **context):
'''Render the body of the message from a template. The plain
body will be rendered from a template named ``plain`` or
``plain + '.txt'`` (in that order of preference). The rich
body will be rendered from ``rich`` if given, or else from
``plain + '.html'``. If neither exists, then the message will
have no rich body.'''
self.plain = render_template([plain, plain + '.txt'], **context)
if rich is not None:
self.rich = render_template(rich, **context)
else:
try:
self.rich = render_template(plain + '.html', **context)
except TemplateNotFound:
pass |
def stop(self, key):
"""
Stop a concurrent operation.
This gets the concurrency limiter for the given key (creating it if
necessary) and stops a concurrent operation on it. If the concurrency
limiter is empty, it is deleted.
"""
self._get_limiter(key).stop()
self._cleanup_limiter(key) | Stop a concurrent operation.
This gets the concurrency limiter for the given key (creating it if
necessary) and stops a concurrent operation on it. If the concurrency
limiter is empty, it is deleted. | Below is the the instruction that describes the task:
### Input:
Stop a concurrent operation.
This gets the concurrency limiter for the given key (creating it if
necessary) and stops a concurrent operation on it. If the concurrency
limiter is empty, it is deleted.
### Response:
def stop(self, key):
"""
Stop a concurrent operation.
This gets the concurrency limiter for the given key (creating it if
necessary) and stops a concurrent operation on it. If the concurrency
limiter is empty, it is deleted.
"""
self._get_limiter(key).stop()
self._cleanup_limiter(key) |
def setname(self, dim_name):
"""Set the dimension name.
Args::
dim_name dimension name; setting 2 dimensions to the same
name make the dimensions "shared"; in order to be
shared, the dimesions must be deined similarly.
Returns::
None
C library equivalent : SDsetdimname
"""
status = _C.SDsetdimname(self._id, dim_name)
_checkErr('setname', status, 'cannot execute') | Set the dimension name.
Args::
dim_name dimension name; setting 2 dimensions to the same
name make the dimensions "shared"; in order to be
shared, the dimesions must be deined similarly.
Returns::
None
C library equivalent : SDsetdimname | Below is the the instruction that describes the task:
### Input:
Set the dimension name.
Args::
dim_name dimension name; setting 2 dimensions to the same
name make the dimensions "shared"; in order to be
shared, the dimesions must be deined similarly.
Returns::
None
C library equivalent : SDsetdimname
### Response:
def setname(self, dim_name):
"""Set the dimension name.
Args::
dim_name dimension name; setting 2 dimensions to the same
name make the dimensions "shared"; in order to be
shared, the dimesions must be deined similarly.
Returns::
None
C library equivalent : SDsetdimname
"""
status = _C.SDsetdimname(self._id, dim_name)
_checkErr('setname', status, 'cannot execute') |
def pack_ihex(type_, address, size, data):
"""Create a Intel HEX record of given data.
"""
line = '{:02X}{:04X}{:02X}'.format(size, address, type_)
if data:
line += binascii.hexlify(data).decode('ascii').upper()
return ':{}{:02X}'.format(line, crc_ihex(line)) | Create a Intel HEX record of given data. | Below is the the instruction that describes the task:
### Input:
Create a Intel HEX record of given data.
### Response:
def pack_ihex(type_, address, size, data):
"""Create a Intel HEX record of given data.
"""
line = '{:02X}{:04X}{:02X}'.format(size, address, type_)
if data:
line += binascii.hexlify(data).decode('ascii').upper()
return ':{}{:02X}'.format(line, crc_ihex(line)) |
def get_model(self, opt_fn, emb_sz, n_hid, n_layers, **kwargs):
""" Method returns a RNN_Learner object, that wraps an instance of the RNN_Encoder module.
Args:
opt_fn (Optimizer): the torch optimizer function to use
emb_sz (int): embedding size
n_hid (int): number of hidden inputs
n_layers (int): number of hidden layers
kwargs: other arguments
Returns:
An instance of the RNN_Learner class.
"""
m = get_language_model(self.nt, emb_sz, n_hid, n_layers, self.pad_idx, **kwargs)
model = SingleModel(to_gpu(m))
return RNN_Learner(self, model, opt_fn=opt_fn) | Method returns a RNN_Learner object, that wraps an instance of the RNN_Encoder module.
Args:
opt_fn (Optimizer): the torch optimizer function to use
emb_sz (int): embedding size
n_hid (int): number of hidden inputs
n_layers (int): number of hidden layers
kwargs: other arguments
Returns:
An instance of the RNN_Learner class. | Below is the the instruction that describes the task:
### Input:
Method returns a RNN_Learner object, that wraps an instance of the RNN_Encoder module.
Args:
opt_fn (Optimizer): the torch optimizer function to use
emb_sz (int): embedding size
n_hid (int): number of hidden inputs
n_layers (int): number of hidden layers
kwargs: other arguments
Returns:
An instance of the RNN_Learner class.
### Response:
def get_model(self, opt_fn, emb_sz, n_hid, n_layers, **kwargs):
""" Method returns a RNN_Learner object, that wraps an instance of the RNN_Encoder module.
Args:
opt_fn (Optimizer): the torch optimizer function to use
emb_sz (int): embedding size
n_hid (int): number of hidden inputs
n_layers (int): number of hidden layers
kwargs: other arguments
Returns:
An instance of the RNN_Learner class.
"""
m = get_language_model(self.nt, emb_sz, n_hid, n_layers, self.pad_idx, **kwargs)
model = SingleModel(to_gpu(m))
return RNN_Learner(self, model, opt_fn=opt_fn) |
def tokens(self, si, k):
'''`si` is a stream item and `k` is a key in this feature. The purpose
of this method is to dereference the token pointers with
respect to the given stream item. That is, it translates each
sequence of token pointers to a sequence of `Token`.
'''
for tokens in self[k]:
yield [si.body.sentences[tagid][sid].tokens[tid]
for tagid, sid, tid in tokens] | `si` is a stream item and `k` is a key in this feature. The purpose
of this method is to dereference the token pointers with
respect to the given stream item. That is, it translates each
sequence of token pointers to a sequence of `Token`. | Below is the the instruction that describes the task:
### Input:
`si` is a stream item and `k` is a key in this feature. The purpose
of this method is to dereference the token pointers with
respect to the given stream item. That is, it translates each
sequence of token pointers to a sequence of `Token`.
### Response:
def tokens(self, si, k):
'''`si` is a stream item and `k` is a key in this feature. The purpose
of this method is to dereference the token pointers with
respect to the given stream item. That is, it translates each
sequence of token pointers to a sequence of `Token`.
'''
for tokens in self[k]:
yield [si.body.sentences[tagid][sid].tokens[tid]
for tagid, sid, tid in tokens] |
def _parse_plt_segment(self, fptr):
"""Parse the PLT segment.
The packet headers are not parsed, i.e. they remain uninterpreted raw
data buffers.
Parameters
----------
fptr : file
Open file object.
Returns
-------
PLTSegment
The current PLT segment.
"""
offset = fptr.tell() - 2
read_buffer = fptr.read(3)
length, zplt = struct.unpack('>HB', read_buffer)
numbytes = length - 3
read_buffer = fptr.read(numbytes)
iplt = np.frombuffer(read_buffer, dtype=np.uint8)
packet_len = []
plen = 0
for byte in iplt:
plen |= (byte & 0x7f)
if byte & 0x80:
# Continue by or-ing in the next byte.
plen <<= 7
else:
packet_len.append(plen)
plen = 0
iplt = packet_len
return PLTsegment(zplt, iplt, length, offset) | Parse the PLT segment.
The packet headers are not parsed, i.e. they remain uninterpreted raw
data buffers.
Parameters
----------
fptr : file
Open file object.
Returns
-------
PLTSegment
The current PLT segment. | Below is the the instruction that describes the task:
### Input:
Parse the PLT segment.
The packet headers are not parsed, i.e. they remain uninterpreted raw
data buffers.
Parameters
----------
fptr : file
Open file object.
Returns
-------
PLTSegment
The current PLT segment.
### Response:
def _parse_plt_segment(self, fptr):
"""Parse the PLT segment.
The packet headers are not parsed, i.e. they remain uninterpreted raw
data buffers.
Parameters
----------
fptr : file
Open file object.
Returns
-------
PLTSegment
The current PLT segment.
"""
offset = fptr.tell() - 2
read_buffer = fptr.read(3)
length, zplt = struct.unpack('>HB', read_buffer)
numbytes = length - 3
read_buffer = fptr.read(numbytes)
iplt = np.frombuffer(read_buffer, dtype=np.uint8)
packet_len = []
plen = 0
for byte in iplt:
plen |= (byte & 0x7f)
if byte & 0x80:
# Continue by or-ing in the next byte.
plen <<= 7
else:
packet_len.append(plen)
plen = 0
iplt = packet_len
return PLTsegment(zplt, iplt, length, offset) |
def in_download_archive(track):
"""
Returns True if a track_id exists in the download archive
"""
global arguments
if not arguments['--download-archive']:
return
archive_filename = arguments.get('--download-archive')
try:
with open(archive_filename, 'a+', encoding='utf-8') as file:
logger.debug('Contents of {0}:'.format(archive_filename))
file.seek(0)
track_id = '{0}'.format(track['id'])
for line in file:
logger.debug('"'+line.strip()+'"')
if line.strip() == track_id:
return True
except IOError as ioe:
logger.error('Error trying to read download archive...')
logger.debug(ioe)
return False | Returns True if a track_id exists in the download archive | Below is the the instruction that describes the task:
### Input:
Returns True if a track_id exists in the download archive
### Response:
def in_download_archive(track):
"""
Returns True if a track_id exists in the download archive
"""
global arguments
if not arguments['--download-archive']:
return
archive_filename = arguments.get('--download-archive')
try:
with open(archive_filename, 'a+', encoding='utf-8') as file:
logger.debug('Contents of {0}:'.format(archive_filename))
file.seek(0)
track_id = '{0}'.format(track['id'])
for line in file:
logger.debug('"'+line.strip()+'"')
if line.strip() == track_id:
return True
except IOError as ioe:
logger.error('Error trying to read download archive...')
logger.debug(ioe)
return False |
def CountHuntLogEntries(self, hunt_id, cursor=None):
"""Returns number of hunt log entries of a given hunt."""
hunt_id_int = db_utils.HuntIDToInt(hunt_id)
query = ("SELECT COUNT(*) FROM flow_log_entries "
"FORCE INDEX(flow_log_entries_by_hunt) "
"WHERE hunt_id = %s AND flow_id = hunt_id")
cursor.execute(query, [hunt_id_int])
return cursor.fetchone()[0] | Returns number of hunt log entries of a given hunt. | Below is the the instruction that describes the task:
### Input:
Returns number of hunt log entries of a given hunt.
### Response:
def CountHuntLogEntries(self, hunt_id, cursor=None):
"""Returns number of hunt log entries of a given hunt."""
hunt_id_int = db_utils.HuntIDToInt(hunt_id)
query = ("SELECT COUNT(*) FROM flow_log_entries "
"FORCE INDEX(flow_log_entries_by_hunt) "
"WHERE hunt_id = %s AND flow_id = hunt_id")
cursor.execute(query, [hunt_id_int])
return cursor.fetchone()[0] |
def plot_ebands(self, **kwargs):
"""
Plot the band structure. kwargs are passed to the plot method of :class:`ElectronBands`.
Returns:
`matplotlib` figure
"""
with self.nscf_task.open_gsr() as gsr:
return gsr.ebands.plot(**kwargs) | Plot the band structure. kwargs are passed to the plot method of :class:`ElectronBands`.
Returns:
`matplotlib` figure | Below is the the instruction that describes the task:
### Input:
Plot the band structure. kwargs are passed to the plot method of :class:`ElectronBands`.
Returns:
`matplotlib` figure
### Response:
def plot_ebands(self, **kwargs):
"""
Plot the band structure. kwargs are passed to the plot method of :class:`ElectronBands`.
Returns:
`matplotlib` figure
"""
with self.nscf_task.open_gsr() as gsr:
return gsr.ebands.plot(**kwargs) |
def enable_node(self, service_name, node_name):
"""
Enables a given node name for the given service name via the
"enable server" HAProxy command.
"""
logger.info("Enabling server %s/%s", service_name, node_name)
return self.send_command(
"enable server %s/%s" % (service_name, node_name)
) | Enables a given node name for the given service name via the
"enable server" HAProxy command. | Below is the the instruction that describes the task:
### Input:
Enables a given node name for the given service name via the
"enable server" HAProxy command.
### Response:
def enable_node(self, service_name, node_name):
"""
Enables a given node name for the given service name via the
"enable server" HAProxy command.
"""
logger.info("Enabling server %s/%s", service_name, node_name)
return self.send_command(
"enable server %s/%s" % (service_name, node_name)
) |
def linkage(self):
"""Return the linkage of this cursor."""
if not hasattr(self, '_linkage'):
self._linkage = conf.lib.clang_getCursorLinkage(self)
return LinkageKind.from_id(self._linkage) | Return the linkage of this cursor. | Below is the the instruction that describes the task:
### Input:
Return the linkage of this cursor.
### Response:
def linkage(self):
"""Return the linkage of this cursor."""
if not hasattr(self, '_linkage'):
self._linkage = conf.lib.clang_getCursorLinkage(self)
return LinkageKind.from_id(self._linkage) |
def context(self, line):
"""
Return the context for a given 1-offset line number.
"""
# XXX due to a limitation in Visitor,
# non-python code after the last python code
# in a file is not added to self.lines, so we
# have to guard against IndexErrors.
idx = line - 1
if idx >= len(self.lines):
return self.prefix
return self.lines[idx] | Return the context for a given 1-offset line number. | Below is the the instruction that describes the task:
### Input:
Return the context for a given 1-offset line number.
### Response:
def context(self, line):
"""
Return the context for a given 1-offset line number.
"""
# XXX due to a limitation in Visitor,
# non-python code after the last python code
# in a file is not added to self.lines, so we
# have to guard against IndexErrors.
idx = line - 1
if idx >= len(self.lines):
return self.prefix
return self.lines[idx] |
def max_size(self):
"""
Gets the largest size of the object over all timesteps.
Returns:
Maximum size of the object in pixels
"""
sizes = np.array([m.sum() for m in self.masks])
return sizes.max() | Gets the largest size of the object over all timesteps.
Returns:
Maximum size of the object in pixels | Below is the the instruction that describes the task:
### Input:
Gets the largest size of the object over all timesteps.
Returns:
Maximum size of the object in pixels
### Response:
def max_size(self):
"""
Gets the largest size of the object over all timesteps.
Returns:
Maximum size of the object in pixels
"""
sizes = np.array([m.sum() for m in self.masks])
return sizes.max() |
def ip_to_geojson(ipaddress, name="Point"):
"""Generate GeoJSON for given IP address"""
geo = ip_to_geo(ipaddress)
point = {
"type": "FeatureCollection",
"features": [
{
"type": "Feature",
"properties": {
"name": name
},
"geometry": {
"type": "Point",
"coordinates": [
geo["longitude"],
geo["latitude"]
]
}
}
]
}
return point | Generate GeoJSON for given IP address | Below is the the instruction that describes the task:
### Input:
Generate GeoJSON for given IP address
### Response:
def ip_to_geojson(ipaddress, name="Point"):
"""Generate GeoJSON for given IP address"""
geo = ip_to_geo(ipaddress)
point = {
"type": "FeatureCollection",
"features": [
{
"type": "Feature",
"properties": {
"name": name
},
"geometry": {
"type": "Point",
"coordinates": [
geo["longitude"],
geo["latitude"]
]
}
}
]
}
return point |
def match(self, table, nomatch=0):
"""
Make a vector of the positions of (first) matches of its first argument in its second.
Only applicable to single-column categorical/string frames.
:param List table: the list of items to match against
:param int nomatch: value that should be returned when there is no match.
:returns: a new H2OFrame containing for each cell from the source frame the index where
the pattern ``table`` first occurs within that cell.
"""
return H2OFrame._expr(expr=ExprNode("match", self, table, nomatch, None)) | Make a vector of the positions of (first) matches of its first argument in its second.
Only applicable to single-column categorical/string frames.
:param List table: the list of items to match against
:param int nomatch: value that should be returned when there is no match.
:returns: a new H2OFrame containing for each cell from the source frame the index where
the pattern ``table`` first occurs within that cell. | Below is the the instruction that describes the task:
### Input:
Make a vector of the positions of (first) matches of its first argument in its second.
Only applicable to single-column categorical/string frames.
:param List table: the list of items to match against
:param int nomatch: value that should be returned when there is no match.
:returns: a new H2OFrame containing for each cell from the source frame the index where
the pattern ``table`` first occurs within that cell.
### Response:
def match(self, table, nomatch=0):
"""
Make a vector of the positions of (first) matches of its first argument in its second.
Only applicable to single-column categorical/string frames.
:param List table: the list of items to match against
:param int nomatch: value that should be returned when there is no match.
:returns: a new H2OFrame containing for each cell from the source frame the index where
the pattern ``table`` first occurs within that cell.
"""
return H2OFrame._expr(expr=ExprNode("match", self, table, nomatch, None)) |
def copy(self):
"""Make a copy of this instance.
Copies the local data stored as simple types and copies the client
attached to this instance.
:rtype: :class:`~google.cloud.spanner_v1.instance.Instance`
:returns: A copy of the current instance.
"""
new_client = self._client.copy()
return self.__class__(
self.instance_id,
new_client,
self.configuration_name,
node_count=self.node_count,
display_name=self.display_name,
) | Make a copy of this instance.
Copies the local data stored as simple types and copies the client
attached to this instance.
:rtype: :class:`~google.cloud.spanner_v1.instance.Instance`
:returns: A copy of the current instance. | Below is the the instruction that describes the task:
### Input:
Make a copy of this instance.
Copies the local data stored as simple types and copies the client
attached to this instance.
:rtype: :class:`~google.cloud.spanner_v1.instance.Instance`
:returns: A copy of the current instance.
### Response:
def copy(self):
"""Make a copy of this instance.
Copies the local data stored as simple types and copies the client
attached to this instance.
:rtype: :class:`~google.cloud.spanner_v1.instance.Instance`
:returns: A copy of the current instance.
"""
new_client = self._client.copy()
return self.__class__(
self.instance_id,
new_client,
self.configuration_name,
node_count=self.node_count,
display_name=self.display_name,
) |
def readQuotes(self, start, end):
''' read quotes from Yahoo Financial'''
if self.symbol is None:
LOG.debug('Symbol is None')
return []
return self.__yf.getQuotes(self.symbol, start, end) | read quotes from Yahoo Financial | Below is the the instruction that describes the task:
### Input:
read quotes from Yahoo Financial
### Response:
def readQuotes(self, start, end):
''' read quotes from Yahoo Financial'''
if self.symbol is None:
LOG.debug('Symbol is None')
return []
return self.__yf.getQuotes(self.symbol, start, end) |
def send_reset_password_instructions(self, user):
"""
Sends the reset password instructions email for the specified user.
Sends signal `reset_password_instructions_sent`.
:param user: The user to send the instructions to.
"""
token = self.security_utils_service.generate_reset_password_token(user)
reset_link = url_for('security_controller.reset_password',
token=token, _external=True)
self.send_mail(
_('flask_unchained.bundles.security:email_subject.reset_password_instructions'),
to=user.email,
template='security/email/reset_password_instructions.html',
user=user,
reset_link=reset_link)
reset_password_instructions_sent.send(app._get_current_object(),
user=user, token=token) | Sends the reset password instructions email for the specified user.
Sends signal `reset_password_instructions_sent`.
:param user: The user to send the instructions to. | Below is the the instruction that describes the task:
### Input:
Sends the reset password instructions email for the specified user.
Sends signal `reset_password_instructions_sent`.
:param user: The user to send the instructions to.
### Response:
def send_reset_password_instructions(self, user):
"""
Sends the reset password instructions email for the specified user.
Sends signal `reset_password_instructions_sent`.
:param user: The user to send the instructions to.
"""
token = self.security_utils_service.generate_reset_password_token(user)
reset_link = url_for('security_controller.reset_password',
token=token, _external=True)
self.send_mail(
_('flask_unchained.bundles.security:email_subject.reset_password_instructions'),
to=user.email,
template='security/email/reset_password_instructions.html',
user=user,
reset_link=reset_link)
reset_password_instructions_sent.send(app._get_current_object(),
user=user, token=token) |
def as_xml(self, parent = None):
"""Make an XML element from self.
:Parameters:
- `parent`: Parent element
:Types:
- `parent`: :etree:`ElementTree.Element`
"""
if parent is not None:
element = ElementTree.SubElement(parent, ITEM_TAG)
else:
element = ElementTree.Element(ITEM_TAG)
element.set("jid", unicode(self.jid))
if self.name is not None:
element.set("name", self.name)
if self.subscription is not None:
element.set("subscription", self.subscription)
if self.ask:
element.set("ask", self.ask)
if self.approved:
element.set("approved", "true")
for group in self.groups:
ElementTree.SubElement(element, GROUP_TAG).text = group
return element | Make an XML element from self.
:Parameters:
- `parent`: Parent element
:Types:
- `parent`: :etree:`ElementTree.Element` | Below is the the instruction that describes the task:
### Input:
Make an XML element from self.
:Parameters:
- `parent`: Parent element
:Types:
- `parent`: :etree:`ElementTree.Element`
### Response:
def as_xml(self, parent = None):
"""Make an XML element from self.
:Parameters:
- `parent`: Parent element
:Types:
- `parent`: :etree:`ElementTree.Element`
"""
if parent is not None:
element = ElementTree.SubElement(parent, ITEM_TAG)
else:
element = ElementTree.Element(ITEM_TAG)
element.set("jid", unicode(self.jid))
if self.name is not None:
element.set("name", self.name)
if self.subscription is not None:
element.set("subscription", self.subscription)
if self.ask:
element.set("ask", self.ask)
if self.approved:
element.set("approved", "true")
for group in self.groups:
ElementTree.SubElement(element, GROUP_TAG).text = group
return element |
def compose(*fs):
"""
Compose functions together in order:
compose(f, g, h) = lambda n: f(g(h(n)))
"""
# Pull the iterator out into a tuple so we can call `composed`
# more than once.
rs = tuple(reversed(fs))
def composed(n):
return reduce(lambda a, b: b(a), rs, n)
# Attempt to make the function look pretty with
# a fresh docstring and name.
try:
composed.__doc__ = 'lambda n: ' + _composed_doc(fs)
except AttributeError:
# One of our callables does not have a `__name__`, whatever.
pass
else:
# We already know that for all `f` in `fs`, there exists `f.__name__`
composed.__name__ = '_of_'.join(f.__name__ for f in fs)
return composed | Compose functions together in order:
compose(f, g, h) = lambda n: f(g(h(n))) | Below is the the instruction that describes the task:
### Input:
Compose functions together in order:
compose(f, g, h) = lambda n: f(g(h(n)))
### Response:
def compose(*fs):
"""
Compose functions together in order:
compose(f, g, h) = lambda n: f(g(h(n)))
"""
# Pull the iterator out into a tuple so we can call `composed`
# more than once.
rs = tuple(reversed(fs))
def composed(n):
return reduce(lambda a, b: b(a), rs, n)
# Attempt to make the function look pretty with
# a fresh docstring and name.
try:
composed.__doc__ = 'lambda n: ' + _composed_doc(fs)
except AttributeError:
# One of our callables does not have a `__name__`, whatever.
pass
else:
# We already know that for all `f` in `fs`, there exists `f.__name__`
composed.__name__ = '_of_'.join(f.__name__ for f in fs)
return composed |
def search_manager_from_config(config, **default_init):
"""Get a `SearchManager` instance dynamically based on config.
`config` is a dictionary containing ``class`` and ``init`` keys as defined
in :mod:`birding.config`.
"""
manager_cls = import_name(config['class'], default_ns='birding.search')
init = {}
init.update(default_init)
init.update(config['init'])
manager = manager_cls(**init)
return manager | Get a `SearchManager` instance dynamically based on config.
`config` is a dictionary containing ``class`` and ``init`` keys as defined
in :mod:`birding.config`. | Below is the the instruction that describes the task:
### Input:
Get a `SearchManager` instance dynamically based on config.
`config` is a dictionary containing ``class`` and ``init`` keys as defined
in :mod:`birding.config`.
### Response:
def search_manager_from_config(config, **default_init):
"""Get a `SearchManager` instance dynamically based on config.
`config` is a dictionary containing ``class`` and ``init`` keys as defined
in :mod:`birding.config`.
"""
manager_cls = import_name(config['class'], default_ns='birding.search')
init = {}
init.update(default_init)
init.update(config['init'])
manager = manager_cls(**init)
return manager |
def _replace_with_specific_page(page, menu_item):
"""
If ``page`` is a vanilla ``Page` object, replace it with a 'specific'
version of itself. Also update ``menu_item``, depending on whether it's
a ``MenuItem`` object or a ``Page`` object.
"""
if type(page) is Page:
page = page.specific
if isinstance(menu_item, MenuItem):
menu_item.link_page = page
else:
menu_item = page
return page, menu_item | If ``page`` is a vanilla ``Page` object, replace it with a 'specific'
version of itself. Also update ``menu_item``, depending on whether it's
a ``MenuItem`` object or a ``Page`` object. | Below is the the instruction that describes the task:
### Input:
If ``page`` is a vanilla ``Page` object, replace it with a 'specific'
version of itself. Also update ``menu_item``, depending on whether it's
a ``MenuItem`` object or a ``Page`` object.
### Response:
def _replace_with_specific_page(page, menu_item):
"""
If ``page`` is a vanilla ``Page` object, replace it with a 'specific'
version of itself. Also update ``menu_item``, depending on whether it's
a ``MenuItem`` object or a ``Page`` object.
"""
if type(page) is Page:
page = page.specific
if isinstance(menu_item, MenuItem):
menu_item.link_page = page
else:
menu_item = page
return page, menu_item |
def _key_question(self, text):
"""Action for '?'"""
if self.get_current_line_to_cursor():
last_obj = self.get_last_obj()
if last_obj and not last_obj.isdigit():
self.show_object_info(last_obj)
self.insert_text(text)
# In case calltip and completion are shown at the same time:
if self.is_completion_widget_visible():
self.completion_text += '?' | Action for '? | Below is the the instruction that describes the task:
### Input:
Action for '?
### Response:
def _key_question(self, text):
"""Action for '?'"""
if self.get_current_line_to_cursor():
last_obj = self.get_last_obj()
if last_obj and not last_obj.isdigit():
self.show_object_info(last_obj)
self.insert_text(text)
# In case calltip and completion are shown at the same time:
if self.is_completion_widget_visible():
self.completion_text += '?' |
def get_means_and_scales_from_q(self):
"""
Gets the mean and scales for normal approximating parameters
"""
means = np.zeros(len(self.q))
scale = np.zeros(len(self.q))
for i in range(len(self.q)):
means[i] = self.q[i].mu0
scale[i] = self.q[i].sigma0
return means, scale | Gets the mean and scales for normal approximating parameters | Below is the the instruction that describes the task:
### Input:
Gets the mean and scales for normal approximating parameters
### Response:
def get_means_and_scales_from_q(self):
"""
Gets the mean and scales for normal approximating parameters
"""
means = np.zeros(len(self.q))
scale = np.zeros(len(self.q))
for i in range(len(self.q)):
means[i] = self.q[i].mu0
scale[i] = self.q[i].sigma0
return means, scale |
def wrapper(vertices_resources, vertices_applications,
nets, net_keys,
machine, constraints=[],
reserve_monitor=True, align_sdram=True,
place=default_place, place_kwargs={},
allocate=default_allocate, allocate_kwargs={},
route=default_route, route_kwargs={},
core_resource=Cores, sdram_resource=SDRAM):
"""Wrapper for core place-and-route tasks for the common case.
At a high level this function essentially takes a set of vertices and nets
and produces placements, memory allocations, routing tables and application
loading information.
.. warning::
This function is deprecated. New users should use
:py:func:`.place_and_route_wrapper` along with
:py:meth:`rig.machine_control.MachineController.get_system_info` in
place of this function. The new wrapper automatically reserves cores
and SDRAM already in use in the target machine, improving on the
behaviour of this wrapper which blindly reserves certain ranges of
resources presuming only core 0 (the monitor processor) is not idle.
Parameters
----------
vertices_resources : {vertex: {resource: quantity, ...}, ...}
A dictionary from vertex to the required resources for that vertex.
This dictionary must include an entry for every vertex in the
application.
Resource requirements are specified by a dictionary `{resource:
quantity, ...}` where `resource` is some resource identifier and
`quantity` is a non-negative integer representing the quantity of that
resource required.
vertices_applications : {vertex: application, ...}
A dictionary from vertices to the application binary to load
onto cores associated with that vertex. Applications are given as a
string containing the file name of the binary to load.
nets : [:py:class:`~rig.netlist.Net`, ...]
A list (in no particular order) defining the nets connecting vertices.
net_keys : {:py:class:`~rig.netlist.Net`: (key, mask), ...}
A dictionary from nets to (key, mask) tuples to be used in SpiNNaker
routing tables for routes implementing this net. The key and mask
should be given as 32-bit integers.
machine : :py:class:`rig.place_and_route.Machine`
A data structure which defines the resources available in the target
SpiNNaker machine.
constraints : [constraint, ...]
A list of constraints on placement, allocation and routing. Available
constraints are provided in the
:py:mod:`rig.place_and_route.constraints` module.
reserve_monitor : bool (Default: True)
**Optional.** If True, reserve core zero since it will be used as the
monitor processor using a
:py:class:`rig.place_and_route.constraints.ReserveResourceConstraint`.
align_sdram : bool (Default: True)
**Optional.** If True, SDRAM allocations will be aligned to 4-byte
addresses. Specifically, the supplied constraints will be augmented
with an `AlignResourceConstraint(sdram_resource, 4)`.
place : function (Default: :py:func:`rig.place_and_route.place`)
**Optional.** Placement algorithm to use.
place_kwargs : dict (Default: {})
**Optional.** Algorithm-specific arguments for the placer.
allocate : function (Default: :py:func:`rig.place_and_route.allocate`)
**Optional.** Allocation algorithm to use.
allocate_kwargs : dict (Default: {})
**Optional.** Algorithm-specific arguments for the allocator.
route : function (Default: :py:func:`rig.place_and_route.route`)
**Optional.** Routing algorithm to use.
route_kwargs : dict (Default: {})
**Optional.** Algorithm-specific arguments for the router.
core_resource : resource (Default: :py:data:`~rig.place_and_route.Cores`)
**Optional.** The resource identifier used for cores.
sdram_resource : resource (Default: :py:data:`~rig.place_and_route.SDRAM`)
**Optional.** The resource identifier used for SDRAM.
Returns
-------
placements : {vertex: (x, y), ...}
A dictionary from vertices to the chip coordinate produced by
placement.
allocations : {vertex: {resource: slice, ...}, ...}
A dictionary from vertices to the resources allocated to it. Resource
allocations are dictionaries from resources to a :py:class:`slice`
defining the range of the given resource type allocated to the vertex.
These :py:class:`slice` objects have `start` <= `end` and `step` set to
None.
application_map : {application: {(x, y): set([core_num, ...]), ...}, ...}
A dictionary from application to the set of cores it should be loaded
onto. The set of cores is given as a dictionary from chip to sets of
core numbers.
routing_tables : {(x, y): \
[:py:class:`~rig.routing_table.RoutingTableEntry`, \
...], ...}
The generated routing tables. Provided as a dictionary from chip to a
list of routing table entries.
"""
warnings.warn("rig.place_and_route.wrapper is deprecated "
"use rig.place_and_route.place_and_route_wrapper instead in "
"new applications.",
DeprecationWarning)
constraints = constraints[:]
# Augment constraints with (historically) commonly used constraints
if reserve_monitor:
constraints.append(
ReserveResourceConstraint(core_resource, slice(0, 1)))
if align_sdram:
constraints.append(AlignResourceConstraint(sdram_resource, 4))
# Place/Allocate/Route
placements = place(vertices_resources, nets, machine, constraints,
**place_kwargs)
allocations = allocate(vertices_resources, nets, machine, constraints,
placements, **allocate_kwargs)
routes = route(vertices_resources, nets, machine, constraints, placements,
allocations, core_resource, **route_kwargs)
# Build data-structures ready to feed to the machine loading functions
application_map = build_application_map(vertices_applications, placements,
allocations, core_resource)
# Build data-structures ready to feed to the machine loading functions
from rig.place_and_route.utils import build_routing_tables
routing_tables = build_routing_tables(routes, net_keys)
return placements, allocations, application_map, routing_tables | Wrapper for core place-and-route tasks for the common case.
At a high level this function essentially takes a set of vertices and nets
and produces placements, memory allocations, routing tables and application
loading information.
.. warning::
This function is deprecated. New users should use
:py:func:`.place_and_route_wrapper` along with
:py:meth:`rig.machine_control.MachineController.get_system_info` in
place of this function. The new wrapper automatically reserves cores
and SDRAM already in use in the target machine, improving on the
behaviour of this wrapper which blindly reserves certain ranges of
resources presuming only core 0 (the monitor processor) is not idle.
Parameters
----------
vertices_resources : {vertex: {resource: quantity, ...}, ...}
A dictionary from vertex to the required resources for that vertex.
This dictionary must include an entry for every vertex in the
application.
Resource requirements are specified by a dictionary `{resource:
quantity, ...}` where `resource` is some resource identifier and
`quantity` is a non-negative integer representing the quantity of that
resource required.
vertices_applications : {vertex: application, ...}
A dictionary from vertices to the application binary to load
onto cores associated with that vertex. Applications are given as a
string containing the file name of the binary to load.
nets : [:py:class:`~rig.netlist.Net`, ...]
A list (in no particular order) defining the nets connecting vertices.
net_keys : {:py:class:`~rig.netlist.Net`: (key, mask), ...}
A dictionary from nets to (key, mask) tuples to be used in SpiNNaker
routing tables for routes implementing this net. The key and mask
should be given as 32-bit integers.
machine : :py:class:`rig.place_and_route.Machine`
A data structure which defines the resources available in the target
SpiNNaker machine.
constraints : [constraint, ...]
A list of constraints on placement, allocation and routing. Available
constraints are provided in the
:py:mod:`rig.place_and_route.constraints` module.
reserve_monitor : bool (Default: True)
**Optional.** If True, reserve core zero since it will be used as the
monitor processor using a
:py:class:`rig.place_and_route.constraints.ReserveResourceConstraint`.
align_sdram : bool (Default: True)
**Optional.** If True, SDRAM allocations will be aligned to 4-byte
addresses. Specifically, the supplied constraints will be augmented
with an `AlignResourceConstraint(sdram_resource, 4)`.
place : function (Default: :py:func:`rig.place_and_route.place`)
**Optional.** Placement algorithm to use.
place_kwargs : dict (Default: {})
**Optional.** Algorithm-specific arguments for the placer.
allocate : function (Default: :py:func:`rig.place_and_route.allocate`)
**Optional.** Allocation algorithm to use.
allocate_kwargs : dict (Default: {})
**Optional.** Algorithm-specific arguments for the allocator.
route : function (Default: :py:func:`rig.place_and_route.route`)
**Optional.** Routing algorithm to use.
route_kwargs : dict (Default: {})
**Optional.** Algorithm-specific arguments for the router.
core_resource : resource (Default: :py:data:`~rig.place_and_route.Cores`)
**Optional.** The resource identifier used for cores.
sdram_resource : resource (Default: :py:data:`~rig.place_and_route.SDRAM`)
**Optional.** The resource identifier used for SDRAM.
Returns
-------
placements : {vertex: (x, y), ...}
A dictionary from vertices to the chip coordinate produced by
placement.
allocations : {vertex: {resource: slice, ...}, ...}
A dictionary from vertices to the resources allocated to it. Resource
allocations are dictionaries from resources to a :py:class:`slice`
defining the range of the given resource type allocated to the vertex.
These :py:class:`slice` objects have `start` <= `end` and `step` set to
None.
application_map : {application: {(x, y): set([core_num, ...]), ...}, ...}
A dictionary from application to the set of cores it should be loaded
onto. The set of cores is given as a dictionary from chip to sets of
core numbers.
routing_tables : {(x, y): \
[:py:class:`~rig.routing_table.RoutingTableEntry`, \
...], ...}
The generated routing tables. Provided as a dictionary from chip to a
list of routing table entries. | Below is the the instruction that describes the task:
### Input:
Wrapper for core place-and-route tasks for the common case.
At a high level this function essentially takes a set of vertices and nets
and produces placements, memory allocations, routing tables and application
loading information.
.. warning::
This function is deprecated. New users should use
:py:func:`.place_and_route_wrapper` along with
:py:meth:`rig.machine_control.MachineController.get_system_info` in
place of this function. The new wrapper automatically reserves cores
and SDRAM already in use in the target machine, improving on the
behaviour of this wrapper which blindly reserves certain ranges of
resources presuming only core 0 (the monitor processor) is not idle.
Parameters
----------
vertices_resources : {vertex: {resource: quantity, ...}, ...}
A dictionary from vertex to the required resources for that vertex.
This dictionary must include an entry for every vertex in the
application.
Resource requirements are specified by a dictionary `{resource:
quantity, ...}` where `resource` is some resource identifier and
`quantity` is a non-negative integer representing the quantity of that
resource required.
vertices_applications : {vertex: application, ...}
A dictionary from vertices to the application binary to load
onto cores associated with that vertex. Applications are given as a
string containing the file name of the binary to load.
nets : [:py:class:`~rig.netlist.Net`, ...]
A list (in no particular order) defining the nets connecting vertices.
net_keys : {:py:class:`~rig.netlist.Net`: (key, mask), ...}
A dictionary from nets to (key, mask) tuples to be used in SpiNNaker
routing tables for routes implementing this net. The key and mask
should be given as 32-bit integers.
machine : :py:class:`rig.place_and_route.Machine`
A data structure which defines the resources available in the target
SpiNNaker machine.
constraints : [constraint, ...]
A list of constraints on placement, allocation and routing. Available
constraints are provided in the
:py:mod:`rig.place_and_route.constraints` module.
reserve_monitor : bool (Default: True)
**Optional.** If True, reserve core zero since it will be used as the
monitor processor using a
:py:class:`rig.place_and_route.constraints.ReserveResourceConstraint`.
align_sdram : bool (Default: True)
**Optional.** If True, SDRAM allocations will be aligned to 4-byte
addresses. Specifically, the supplied constraints will be augmented
with an `AlignResourceConstraint(sdram_resource, 4)`.
place : function (Default: :py:func:`rig.place_and_route.place`)
**Optional.** Placement algorithm to use.
place_kwargs : dict (Default: {})
**Optional.** Algorithm-specific arguments for the placer.
allocate : function (Default: :py:func:`rig.place_and_route.allocate`)
**Optional.** Allocation algorithm to use.
allocate_kwargs : dict (Default: {})
**Optional.** Algorithm-specific arguments for the allocator.
route : function (Default: :py:func:`rig.place_and_route.route`)
**Optional.** Routing algorithm to use.
route_kwargs : dict (Default: {})
**Optional.** Algorithm-specific arguments for the router.
core_resource : resource (Default: :py:data:`~rig.place_and_route.Cores`)
**Optional.** The resource identifier used for cores.
sdram_resource : resource (Default: :py:data:`~rig.place_and_route.SDRAM`)
**Optional.** The resource identifier used for SDRAM.
Returns
-------
placements : {vertex: (x, y), ...}
A dictionary from vertices to the chip coordinate produced by
placement.
allocations : {vertex: {resource: slice, ...}, ...}
A dictionary from vertices to the resources allocated to it. Resource
allocations are dictionaries from resources to a :py:class:`slice`
defining the range of the given resource type allocated to the vertex.
These :py:class:`slice` objects have `start` <= `end` and `step` set to
None.
application_map : {application: {(x, y): set([core_num, ...]), ...}, ...}
A dictionary from application to the set of cores it should be loaded
onto. The set of cores is given as a dictionary from chip to sets of
core numbers.
routing_tables : {(x, y): \
[:py:class:`~rig.routing_table.RoutingTableEntry`, \
...], ...}
The generated routing tables. Provided as a dictionary from chip to a
list of routing table entries.
### Response:
def wrapper(vertices_resources, vertices_applications,
nets, net_keys,
machine, constraints=[],
reserve_monitor=True, align_sdram=True,
place=default_place, place_kwargs={},
allocate=default_allocate, allocate_kwargs={},
route=default_route, route_kwargs={},
core_resource=Cores, sdram_resource=SDRAM):
"""Wrapper for core place-and-route tasks for the common case.
At a high level this function essentially takes a set of vertices and nets
and produces placements, memory allocations, routing tables and application
loading information.
.. warning::
This function is deprecated. New users should use
:py:func:`.place_and_route_wrapper` along with
:py:meth:`rig.machine_control.MachineController.get_system_info` in
place of this function. The new wrapper automatically reserves cores
and SDRAM already in use in the target machine, improving on the
behaviour of this wrapper which blindly reserves certain ranges of
resources presuming only core 0 (the monitor processor) is not idle.
Parameters
----------
vertices_resources : {vertex: {resource: quantity, ...}, ...}
A dictionary from vertex to the required resources for that vertex.
This dictionary must include an entry for every vertex in the
application.
Resource requirements are specified by a dictionary `{resource:
quantity, ...}` where `resource` is some resource identifier and
`quantity` is a non-negative integer representing the quantity of that
resource required.
vertices_applications : {vertex: application, ...}
A dictionary from vertices to the application binary to load
onto cores associated with that vertex. Applications are given as a
string containing the file name of the binary to load.
nets : [:py:class:`~rig.netlist.Net`, ...]
A list (in no particular order) defining the nets connecting vertices.
net_keys : {:py:class:`~rig.netlist.Net`: (key, mask), ...}
A dictionary from nets to (key, mask) tuples to be used in SpiNNaker
routing tables for routes implementing this net. The key and mask
should be given as 32-bit integers.
machine : :py:class:`rig.place_and_route.Machine`
A data structure which defines the resources available in the target
SpiNNaker machine.
constraints : [constraint, ...]
A list of constraints on placement, allocation and routing. Available
constraints are provided in the
:py:mod:`rig.place_and_route.constraints` module.
reserve_monitor : bool (Default: True)
**Optional.** If True, reserve core zero since it will be used as the
monitor processor using a
:py:class:`rig.place_and_route.constraints.ReserveResourceConstraint`.
align_sdram : bool (Default: True)
**Optional.** If True, SDRAM allocations will be aligned to 4-byte
addresses. Specifically, the supplied constraints will be augmented
with an `AlignResourceConstraint(sdram_resource, 4)`.
place : function (Default: :py:func:`rig.place_and_route.place`)
**Optional.** Placement algorithm to use.
place_kwargs : dict (Default: {})
**Optional.** Algorithm-specific arguments for the placer.
allocate : function (Default: :py:func:`rig.place_and_route.allocate`)
**Optional.** Allocation algorithm to use.
allocate_kwargs : dict (Default: {})
**Optional.** Algorithm-specific arguments for the allocator.
route : function (Default: :py:func:`rig.place_and_route.route`)
**Optional.** Routing algorithm to use.
route_kwargs : dict (Default: {})
**Optional.** Algorithm-specific arguments for the router.
core_resource : resource (Default: :py:data:`~rig.place_and_route.Cores`)
**Optional.** The resource identifier used for cores.
sdram_resource : resource (Default: :py:data:`~rig.place_and_route.SDRAM`)
**Optional.** The resource identifier used for SDRAM.
Returns
-------
placements : {vertex: (x, y), ...}
A dictionary from vertices to the chip coordinate produced by
placement.
allocations : {vertex: {resource: slice, ...}, ...}
A dictionary from vertices to the resources allocated to it. Resource
allocations are dictionaries from resources to a :py:class:`slice`
defining the range of the given resource type allocated to the vertex.
These :py:class:`slice` objects have `start` <= `end` and `step` set to
None.
application_map : {application: {(x, y): set([core_num, ...]), ...}, ...}
A dictionary from application to the set of cores it should be loaded
onto. The set of cores is given as a dictionary from chip to sets of
core numbers.
routing_tables : {(x, y): \
[:py:class:`~rig.routing_table.RoutingTableEntry`, \
...], ...}
The generated routing tables. Provided as a dictionary from chip to a
list of routing table entries.
"""
warnings.warn("rig.place_and_route.wrapper is deprecated "
"use rig.place_and_route.place_and_route_wrapper instead in "
"new applications.",
DeprecationWarning)
constraints = constraints[:]
# Augment constraints with (historically) commonly used constraints
if reserve_monitor:
constraints.append(
ReserveResourceConstraint(core_resource, slice(0, 1)))
if align_sdram:
constraints.append(AlignResourceConstraint(sdram_resource, 4))
# Place/Allocate/Route
placements = place(vertices_resources, nets, machine, constraints,
**place_kwargs)
allocations = allocate(vertices_resources, nets, machine, constraints,
placements, **allocate_kwargs)
routes = route(vertices_resources, nets, machine, constraints, placements,
allocations, core_resource, **route_kwargs)
# Build data-structures ready to feed to the machine loading functions
application_map = build_application_map(vertices_applications, placements,
allocations, core_resource)
# Build data-structures ready to feed to the machine loading functions
from rig.place_and_route.utils import build_routing_tables
routing_tables = build_routing_tables(routes, net_keys)
return placements, allocations, application_map, routing_tables |
def get_word_from_offset(self, offset):
"""Return the word value at the given file offset. (little endian)"""
if offset+2 > len(self.__data__):
return None
return self.get_word_from_data(self.__data__[offset:offset+2], 0) | Return the word value at the given file offset. (little endian) | Below is the the instruction that describes the task:
### Input:
Return the word value at the given file offset. (little endian)
### Response:
def get_word_from_offset(self, offset):
"""Return the word value at the given file offset. (little endian)"""
if offset+2 > len(self.__data__):
return None
return self.get_word_from_data(self.__data__[offset:offset+2], 0) |
def main(dbg=None, sys_argv=list(sys.argv)):
"""Routine which gets run if we were invoked directly"""
global __title__
# Save the original just for use in the restart that works via exec.
orig_sys_argv = list(sys_argv)
opts, dbg_opts, sys_argv = Moptions.process_options(__title__,
VERSION,
sys_argv)
if opts.server is not None:
if opts.server == 'tcp':
connection_opts={'IO': 'TCP', 'PORT': opts.port}
else:
connection_opts={'IO': 'FIFO'}
intf = Mserver.ServerInterface(connection_opts=connection_opts)
dbg_opts['interface'] = intf
if 'FIFO' == intf.server_type:
print('Starting FIFO server for process %s.' % os.getpid())
elif 'TCP' == intf.server_type:
print('Starting TCP server listening on port %s.' %
intf.inout.PORT)
pass
elif opts.client:
Mclient.run(opts, sys_argv)
return
dbg_opts['orig_sys_argv'] = orig_sys_argv
if dbg is None:
dbg = Mdebugger.Trepan(dbg_opts)
dbg.core.add_ignore(main)
pass
Moptions._postprocess_options(dbg, opts)
# process_options has munged sys.argv to remove any options that
# options that belong to this debugger. The original options to
# invoke the debugger and script are in global sys_argv
if len(sys_argv) == 0:
# No program given to debug. Set to go into a command loop
# anyway
mainpyfile = None
else:
mainpyfile = sys_argv[0] # Get script filename.
if not osp.isfile(mainpyfile):
mainpyfile=Mclifns.whence_file(mainpyfile)
is_readable = Mfile.readable(mainpyfile)
if is_readable is None:
print("%s: Python script file '%s' does not exist"
% (__title__, mainpyfile,))
sys.exit(1)
elif not is_readable:
print("%s: Can't read Python script file '%s'"
% (__title__, mainpyfile, ))
sys.exit(1)
return
if Mfile.is_compiled_py(mainpyfile):
try:
from xdis import load_module, PYTHON_VERSION, IS_PYPY
(python_version, timestamp, magic_int, co, is_pypy,
source_size) = load_module(mainpyfile, code_objects=None,
fast_load=True)
assert is_pypy == IS_PYPY
assert python_version == PYTHON_VERSION, \
"bytecode is for version %s but we are version %s" % (
python_version, PYTHON_VERSION)
# We should we check version magic_int
py_file = co.co_filename
if osp.isabs(py_file):
try_file = py_file
else:
mainpydir = osp.dirname(mainpyfile)
tag = sys.implementation.cache_tag
dirnames = [osp.join(mainpydir, tag),
mainpydir] + os.environ['PATH'].split(osp.pathsep) + ['.']
try_file = Mclifns.whence_file(py_file, dirnames)
if osp.isfile(try_file):
mainpyfile = try_file
pass
else:
# Move onto the except branch
raise IOError("Python file name embedded in code %s not found" % try_file)
except IOError:
try:
from uncompyle6 import decompile_file
except ImportError:
print("%s: Compiled python file '%s', but uncompyle6 not found"
% (__title__, mainpyfile), file=sys.stderr)
sys.exit(1)
return
short_name = osp.basename(mainpyfile).strip('.pyc')
fd = tempfile.NamedTemporaryFile(suffix='.py',
prefix=short_name + "_",
delete=False)
old_write = fd.file.write
def write_wrapper(*args, **kwargs):
if isinstance(args[0], str):
new_args = list(args)
new_args[0] = args[0].encode('utf-8')
old_write(*new_args, **kwargs)
else:
old_write(*args, **kwargs)
fd.file.write = write_wrapper
# from io import StringIO
# linemap_io = StringIO()
try:
decompile_file(mainpyfile, fd.file, mapstream=fd)
except:
print("%s: error decompiling '%s'"
% (__title__, mainpyfile), file=sys.stderr)
sys.exit(1)
return
# # Get the line associations between the original and
# # decompiled program
# mapline = linemap_io.getvalue()
# fd.write(mapline + "\n\n")
# linemap = eval(mapline[3:])
mainpyfile = fd.name
fd.close()
# Since we are actually running the recreated source,
# there is little no need to remap line numbers.
# The mapping is given at the end of the file.
# However we should consider adding this information
# and original file name.
print("%s: couldn't find Python source so we recreated it at '%s'"
% (__title__, mainpyfile), file=sys.stderr)
pass
# If mainpyfile is an optimized Python script try to find and
# use non-optimized alternative.
mainpyfile_noopt = pyficache.pyc2py(mainpyfile)
if mainpyfile != mainpyfile_noopt \
and Mfile.readable(mainpyfile_noopt):
print("%s: Compiled Python script given and we can't use that."
% __title__)
print("%s: Substituting non-compiled name: %s" % (
__title__, mainpyfile_noopt,))
mainpyfile = mainpyfile_noopt
pass
# Replace trepan's dir with script's dir in front of
# module search path.
sys.path[0] = dbg.main_dirname = osp.dirname(mainpyfile)
# XXX If a signal has been received we continue in the loop, otherwise
# the loop exits for some reason.
dbg.sig_received = False
# if not mainpyfile:
# print('For now, you need to specify a Python script name!')
# sys.exit(2)
# pass
while True:
# Run the debugged script over and over again until we get it
# right.
try:
if dbg.program_sys_argv and mainpyfile:
normal_termination = dbg.run_script(mainpyfile)
if not normal_termination: break
else:
dbg.core.execution_status = 'No program'
dbg.core.processor.process_commands()
pass
dbg.core.execution_status = 'Terminated'
dbg.intf[-1].msg("The program finished - quit or restart")
dbg.core.processor.process_commands()
except Mexcept.DebuggerQuit:
break
except Mexcept.DebuggerRestart:
dbg.core.execution_status = 'Restart requested'
if dbg.program_sys_argv:
sys.argv = list(dbg.program_sys_argv)
part1 = ('Restarting %s with arguments:' %
dbg.core.filename(mainpyfile))
args = ' '.join(dbg.program_sys_argv[1:])
dbg.intf[-1].msg(
Mmisc.wrapped_lines(part1, args,
dbg.settings['width']))
else: break
except SystemExit:
# In most cases SystemExit does not warrant a post-mortem session.
break
pass
# Restore old sys.argv
sys.argv = orig_sys_argv
return | Routine which gets run if we were invoked directly | Below is the the instruction that describes the task:
### Input:
Routine which gets run if we were invoked directly
### Response:
def main(dbg=None, sys_argv=list(sys.argv)):
"""Routine which gets run if we were invoked directly"""
global __title__
# Save the original just for use in the restart that works via exec.
orig_sys_argv = list(sys_argv)
opts, dbg_opts, sys_argv = Moptions.process_options(__title__,
VERSION,
sys_argv)
if opts.server is not None:
if opts.server == 'tcp':
connection_opts={'IO': 'TCP', 'PORT': opts.port}
else:
connection_opts={'IO': 'FIFO'}
intf = Mserver.ServerInterface(connection_opts=connection_opts)
dbg_opts['interface'] = intf
if 'FIFO' == intf.server_type:
print('Starting FIFO server for process %s.' % os.getpid())
elif 'TCP' == intf.server_type:
print('Starting TCP server listening on port %s.' %
intf.inout.PORT)
pass
elif opts.client:
Mclient.run(opts, sys_argv)
return
dbg_opts['orig_sys_argv'] = orig_sys_argv
if dbg is None:
dbg = Mdebugger.Trepan(dbg_opts)
dbg.core.add_ignore(main)
pass
Moptions._postprocess_options(dbg, opts)
# process_options has munged sys.argv to remove any options that
# options that belong to this debugger. The original options to
# invoke the debugger and script are in global sys_argv
if len(sys_argv) == 0:
# No program given to debug. Set to go into a command loop
# anyway
mainpyfile = None
else:
mainpyfile = sys_argv[0] # Get script filename.
if not osp.isfile(mainpyfile):
mainpyfile=Mclifns.whence_file(mainpyfile)
is_readable = Mfile.readable(mainpyfile)
if is_readable is None:
print("%s: Python script file '%s' does not exist"
% (__title__, mainpyfile,))
sys.exit(1)
elif not is_readable:
print("%s: Can't read Python script file '%s'"
% (__title__, mainpyfile, ))
sys.exit(1)
return
if Mfile.is_compiled_py(mainpyfile):
try:
from xdis import load_module, PYTHON_VERSION, IS_PYPY
(python_version, timestamp, magic_int, co, is_pypy,
source_size) = load_module(mainpyfile, code_objects=None,
fast_load=True)
assert is_pypy == IS_PYPY
assert python_version == PYTHON_VERSION, \
"bytecode is for version %s but we are version %s" % (
python_version, PYTHON_VERSION)
# We should we check version magic_int
py_file = co.co_filename
if osp.isabs(py_file):
try_file = py_file
else:
mainpydir = osp.dirname(mainpyfile)
tag = sys.implementation.cache_tag
dirnames = [osp.join(mainpydir, tag),
mainpydir] + os.environ['PATH'].split(osp.pathsep) + ['.']
try_file = Mclifns.whence_file(py_file, dirnames)
if osp.isfile(try_file):
mainpyfile = try_file
pass
else:
# Move onto the except branch
raise IOError("Python file name embedded in code %s not found" % try_file)
except IOError:
try:
from uncompyle6 import decompile_file
except ImportError:
print("%s: Compiled python file '%s', but uncompyle6 not found"
% (__title__, mainpyfile), file=sys.stderr)
sys.exit(1)
return
short_name = osp.basename(mainpyfile).strip('.pyc')
fd = tempfile.NamedTemporaryFile(suffix='.py',
prefix=short_name + "_",
delete=False)
old_write = fd.file.write
def write_wrapper(*args, **kwargs):
if isinstance(args[0], str):
new_args = list(args)
new_args[0] = args[0].encode('utf-8')
old_write(*new_args, **kwargs)
else:
old_write(*args, **kwargs)
fd.file.write = write_wrapper
# from io import StringIO
# linemap_io = StringIO()
try:
decompile_file(mainpyfile, fd.file, mapstream=fd)
except:
print("%s: error decompiling '%s'"
% (__title__, mainpyfile), file=sys.stderr)
sys.exit(1)
return
# # Get the line associations between the original and
# # decompiled program
# mapline = linemap_io.getvalue()
# fd.write(mapline + "\n\n")
# linemap = eval(mapline[3:])
mainpyfile = fd.name
fd.close()
# Since we are actually running the recreated source,
# there is little no need to remap line numbers.
# The mapping is given at the end of the file.
# However we should consider adding this information
# and original file name.
print("%s: couldn't find Python source so we recreated it at '%s'"
% (__title__, mainpyfile), file=sys.stderr)
pass
# If mainpyfile is an optimized Python script try to find and
# use non-optimized alternative.
mainpyfile_noopt = pyficache.pyc2py(mainpyfile)
if mainpyfile != mainpyfile_noopt \
and Mfile.readable(mainpyfile_noopt):
print("%s: Compiled Python script given and we can't use that."
% __title__)
print("%s: Substituting non-compiled name: %s" % (
__title__, mainpyfile_noopt,))
mainpyfile = mainpyfile_noopt
pass
# Replace trepan's dir with script's dir in front of
# module search path.
sys.path[0] = dbg.main_dirname = osp.dirname(mainpyfile)
# XXX If a signal has been received we continue in the loop, otherwise
# the loop exits for some reason.
dbg.sig_received = False
# if not mainpyfile:
# print('For now, you need to specify a Python script name!')
# sys.exit(2)
# pass
while True:
# Run the debugged script over and over again until we get it
# right.
try:
if dbg.program_sys_argv and mainpyfile:
normal_termination = dbg.run_script(mainpyfile)
if not normal_termination: break
else:
dbg.core.execution_status = 'No program'
dbg.core.processor.process_commands()
pass
dbg.core.execution_status = 'Terminated'
dbg.intf[-1].msg("The program finished - quit or restart")
dbg.core.processor.process_commands()
except Mexcept.DebuggerQuit:
break
except Mexcept.DebuggerRestart:
dbg.core.execution_status = 'Restart requested'
if dbg.program_sys_argv:
sys.argv = list(dbg.program_sys_argv)
part1 = ('Restarting %s with arguments:' %
dbg.core.filename(mainpyfile))
args = ' '.join(dbg.program_sys_argv[1:])
dbg.intf[-1].msg(
Mmisc.wrapped_lines(part1, args,
dbg.settings['width']))
else: break
except SystemExit:
# In most cases SystemExit does not warrant a post-mortem session.
break
pass
# Restore old sys.argv
sys.argv = orig_sys_argv
return |
def parse(self, fn):
"""
Parses a file into a lxml.etree structure with namespaces remove. This tree is added to self.iocs.
:param fn: File to parse.
:return:
"""
ioc_xml = xmlutils.read_xml_no_ns(fn)
if not ioc_xml:
return False
root = ioc_xml.getroot()
iocid = root.get('id', None)
if not iocid:
return False
self.iocs[iocid] = ioc_xml
return True | Parses a file into a lxml.etree structure with namespaces remove. This tree is added to self.iocs.
:param fn: File to parse.
:return: | Below is the the instruction that describes the task:
### Input:
Parses a file into a lxml.etree structure with namespaces remove. This tree is added to self.iocs.
:param fn: File to parse.
:return:
### Response:
def parse(self, fn):
"""
Parses a file into a lxml.etree structure with namespaces remove. This tree is added to self.iocs.
:param fn: File to parse.
:return:
"""
ioc_xml = xmlutils.read_xml_no_ns(fn)
if not ioc_xml:
return False
root = ioc_xml.getroot()
iocid = root.get('id', None)
if not iocid:
return False
self.iocs[iocid] = ioc_xml
return True |
def parallel_assimilate(self, rootpath):
"""
Assimilate the entire subdirectory structure in rootpath.
"""
logger.info('Scanning for valid paths...')
valid_paths = []
for (parent, subdirs, files) in os.walk(rootpath):
valid_paths.extend(self._drone.get_valid_paths((parent, subdirs,
files)))
manager = Manager()
data = manager.list()
status = manager.dict()
status['count'] = 0
status['total'] = len(valid_paths)
logger.info('{} valid paths found.'.format(len(valid_paths)))
p = Pool(self._num_drones)
p.map(order_assimilation, ((path, self._drone, data, status)
for path in valid_paths))
for d in data:
self._data.append(json.loads(d, cls=MontyDecoder)) | Assimilate the entire subdirectory structure in rootpath. | Below is the the instruction that describes the task:
### Input:
Assimilate the entire subdirectory structure in rootpath.
### Response:
def parallel_assimilate(self, rootpath):
"""
Assimilate the entire subdirectory structure in rootpath.
"""
logger.info('Scanning for valid paths...')
valid_paths = []
for (parent, subdirs, files) in os.walk(rootpath):
valid_paths.extend(self._drone.get_valid_paths((parent, subdirs,
files)))
manager = Manager()
data = manager.list()
status = manager.dict()
status['count'] = 0
status['total'] = len(valid_paths)
logger.info('{} valid paths found.'.format(len(valid_paths)))
p = Pool(self._num_drones)
p.map(order_assimilation, ((path, self._drone, data, status)
for path in valid_paths))
for d in data:
self._data.append(json.loads(d, cls=MontyDecoder)) |
def usaheatindex(temp, humidity, dew=None):
"""Calculate Heat Index as per USA National Weather Service Standards
See http://en.wikipedia.org/wiki/Heat_index, formula 1. The
formula is not valid for T < 26.7C, Dew Point < 12C, or RH < 40%
"""
if temp is None or humidity is None:
return None
if dew is None:
dew = dew_point(temp, humidity)
if temp < 26.7 or humidity < 40 or dew < 12.0:
return temp
T = (temp * 1.8) + 32.0
R = humidity
c_1 = -42.379
c_2 = 2.04901523
c_3 = 10.14333127
c_4 = -0.22475541
c_5 = -0.00683783
c_6 = -0.05481717
c_7 = 0.00122874
c_8 = 0.00085282
c_9 = -0.00000199
return ((c_1 + (c_2 * T) + (c_3 * R) + (c_4 * T * R) + (c_5 * (T**2)) +
(c_6 * (R**2)) + (c_7 * (T**2) * R) + (c_8 * T * (R**2)) +
(c_9 * (T**2) * (R**2))) - 32.0) / 1.8 | Calculate Heat Index as per USA National Weather Service Standards
See http://en.wikipedia.org/wiki/Heat_index, formula 1. The
formula is not valid for T < 26.7C, Dew Point < 12C, or RH < 40% | Below is the the instruction that describes the task:
### Input:
Calculate Heat Index as per USA National Weather Service Standards
See http://en.wikipedia.org/wiki/Heat_index, formula 1. The
formula is not valid for T < 26.7C, Dew Point < 12C, or RH < 40%
### Response:
def usaheatindex(temp, humidity, dew=None):
"""Calculate Heat Index as per USA National Weather Service Standards
See http://en.wikipedia.org/wiki/Heat_index, formula 1. The
formula is not valid for T < 26.7C, Dew Point < 12C, or RH < 40%
"""
if temp is None or humidity is None:
return None
if dew is None:
dew = dew_point(temp, humidity)
if temp < 26.7 or humidity < 40 or dew < 12.0:
return temp
T = (temp * 1.8) + 32.0
R = humidity
c_1 = -42.379
c_2 = 2.04901523
c_3 = 10.14333127
c_4 = -0.22475541
c_5 = -0.00683783
c_6 = -0.05481717
c_7 = 0.00122874
c_8 = 0.00085282
c_9 = -0.00000199
return ((c_1 + (c_2 * T) + (c_3 * R) + (c_4 * T * R) + (c_5 * (T**2)) +
(c_6 * (R**2)) + (c_7 * (T**2) * R) + (c_8 * T * (R**2)) +
(c_9 * (T**2) * (R**2))) - 32.0) / 1.8 |
def get_table(table, table_file, path=None, target=None, key=None, key_items=None,
filters=None, template_args=None):
'''
Retrieve data from a Junos device using Tables/Views
table (required)
Name of PyEZ Table
table_file (required)
YAML file that has the table specified in table parameter
path:
Path of location of the YAML file.
defaults to op directory in jnpr.junos.op
target:
if command need to run on FPC, can specify fpc target
key:
To overwrite key provided in YAML
key_items:
To select only given key items
filters:
To select only filter for the dictionary from columns
template_args:
key/value pair which should render Jinja template command
CLI Example:
.. code-block:: bash
salt 'device_name' junos.get_table
'''
conn = __proxy__['junos.conn']()
ret = {}
ret['out'] = True
ret['hostname'] = conn._hostname
ret['tablename'] = table
get_kvargs = {}
if target is not None:
get_kvargs['target'] = target
if key is not None:
get_kvargs['key'] = key
if key_items is not None:
get_kvargs['key_items'] = key_items
if filters is not None:
get_kvargs['filters'] = filters
if template_args is not None and isinstance(template_args, dict):
get_kvargs['args'] = template_args
pyez_tables_path = os.path.dirname(os.path.abspath(tables_dir.__file__))
try:
if path is not None:
file_loc = glob.glob(os.path.join(path, '{}'.format(table_file)))
else:
file_loc = glob.glob(os.path.join(pyez_tables_path, '{}'.format(table_file)))
if len(file_loc) == 1:
file_name = file_loc[0]
else:
ret['message'] = 'Given table file {} cannot be located'.format(table_file)
ret['out'] = False
return ret
try:
with salt.utils.files.fopen(file_name) as fp:
ret['table'] = yaml.load(fp.read(),
Loader=yamlordereddictloader.Loader)
globals().update(FactoryLoader().load(ret['table']))
except IOError as err:
ret['message'] = 'Uncaught exception during YAML Load - please ' \
'report: {0}'.format(six.text_type(err))
ret['out'] = False
return ret
try:
data = globals()[table](conn)
data.get(**get_kvargs)
except KeyError as err:
ret['message'] = 'Uncaught exception during get API call - please ' \
'report: {0}'.format(six.text_type(err))
ret['out'] = False
return ret
except ConnectClosedError:
ret['message'] = 'Got ConnectClosedError exception. Connection lost ' \
'with {}'.format(conn)
ret['out'] = False
return ret
ret['reply'] = json.loads(data.to_json())
if data.__class__.__bases__[0] == OpTable:
# Sets key value if not present in YAML. To be used by returner
if ret['table'][table].get('key') is None:
ret['table'][table]['key'] = data.ITEM_NAME_XPATH
# If key is provided from salt state file.
if key is not None:
ret['table'][table]['key'] = data.KEY
else:
if target is not None:
ret['table'][table]['target'] = data.TARGET
if key is not None:
ret['table'][table]['key'] = data.KEY
if key_items is not None:
ret['table'][table]['key_items'] = data.KEY_ITEMS
if template_args is not None:
ret['table'][table]['args'] = data.CMD_ARGS
ret['table'][table]['command'] = data.GET_CMD
except Exception as err:
ret['message'] = 'Uncaught exception - please report: {0}'.format(
str(err))
traceback.print_exc()
ret['out'] = False
return ret
return ret | Retrieve data from a Junos device using Tables/Views
table (required)
Name of PyEZ Table
table_file (required)
YAML file that has the table specified in table parameter
path:
Path of location of the YAML file.
defaults to op directory in jnpr.junos.op
target:
if command need to run on FPC, can specify fpc target
key:
To overwrite key provided in YAML
key_items:
To select only given key items
filters:
To select only filter for the dictionary from columns
template_args:
key/value pair which should render Jinja template command
CLI Example:
.. code-block:: bash
salt 'device_name' junos.get_table | Below is the the instruction that describes the task:
### Input:
Retrieve data from a Junos device using Tables/Views
table (required)
Name of PyEZ Table
table_file (required)
YAML file that has the table specified in table parameter
path:
Path of location of the YAML file.
defaults to op directory in jnpr.junos.op
target:
if command need to run on FPC, can specify fpc target
key:
To overwrite key provided in YAML
key_items:
To select only given key items
filters:
To select only filter for the dictionary from columns
template_args:
key/value pair which should render Jinja template command
CLI Example:
.. code-block:: bash
salt 'device_name' junos.get_table
### Response:
def get_table(table, table_file, path=None, target=None, key=None, key_items=None,
filters=None, template_args=None):
'''
Retrieve data from a Junos device using Tables/Views
table (required)
Name of PyEZ Table
table_file (required)
YAML file that has the table specified in table parameter
path:
Path of location of the YAML file.
defaults to op directory in jnpr.junos.op
target:
if command need to run on FPC, can specify fpc target
key:
To overwrite key provided in YAML
key_items:
To select only given key items
filters:
To select only filter for the dictionary from columns
template_args:
key/value pair which should render Jinja template command
CLI Example:
.. code-block:: bash
salt 'device_name' junos.get_table
'''
conn = __proxy__['junos.conn']()
ret = {}
ret['out'] = True
ret['hostname'] = conn._hostname
ret['tablename'] = table
get_kvargs = {}
if target is not None:
get_kvargs['target'] = target
if key is not None:
get_kvargs['key'] = key
if key_items is not None:
get_kvargs['key_items'] = key_items
if filters is not None:
get_kvargs['filters'] = filters
if template_args is not None and isinstance(template_args, dict):
get_kvargs['args'] = template_args
pyez_tables_path = os.path.dirname(os.path.abspath(tables_dir.__file__))
try:
if path is not None:
file_loc = glob.glob(os.path.join(path, '{}'.format(table_file)))
else:
file_loc = glob.glob(os.path.join(pyez_tables_path, '{}'.format(table_file)))
if len(file_loc) == 1:
file_name = file_loc[0]
else:
ret['message'] = 'Given table file {} cannot be located'.format(table_file)
ret['out'] = False
return ret
try:
with salt.utils.files.fopen(file_name) as fp:
ret['table'] = yaml.load(fp.read(),
Loader=yamlordereddictloader.Loader)
globals().update(FactoryLoader().load(ret['table']))
except IOError as err:
ret['message'] = 'Uncaught exception during YAML Load - please ' \
'report: {0}'.format(six.text_type(err))
ret['out'] = False
return ret
try:
data = globals()[table](conn)
data.get(**get_kvargs)
except KeyError as err:
ret['message'] = 'Uncaught exception during get API call - please ' \
'report: {0}'.format(six.text_type(err))
ret['out'] = False
return ret
except ConnectClosedError:
ret['message'] = 'Got ConnectClosedError exception. Connection lost ' \
'with {}'.format(conn)
ret['out'] = False
return ret
ret['reply'] = json.loads(data.to_json())
if data.__class__.__bases__[0] == OpTable:
# Sets key value if not present in YAML. To be used by returner
if ret['table'][table].get('key') is None:
ret['table'][table]['key'] = data.ITEM_NAME_XPATH
# If key is provided from salt state file.
if key is not None:
ret['table'][table]['key'] = data.KEY
else:
if target is not None:
ret['table'][table]['target'] = data.TARGET
if key is not None:
ret['table'][table]['key'] = data.KEY
if key_items is not None:
ret['table'][table]['key_items'] = data.KEY_ITEMS
if template_args is not None:
ret['table'][table]['args'] = data.CMD_ARGS
ret['table'][table]['command'] = data.GET_CMD
except Exception as err:
ret['message'] = 'Uncaught exception - please report: {0}'.format(
str(err))
traceback.print_exc()
ret['out'] = False
return ret
return ret |
def cluster_resources(self):
"""Get the current total cluster resources.
Note that this information can grow stale as nodes are added to or
removed from the cluster.
Returns:
A dictionary mapping resource name to the total quantity of that
resource in the cluster.
"""
resources = defaultdict(int)
clients = self.client_table()
for client in clients:
# Only count resources from live clients.
if client["IsInsertion"]:
for key, value in client["Resources"].items():
resources[key] += value
return dict(resources) | Get the current total cluster resources.
Note that this information can grow stale as nodes are added to or
removed from the cluster.
Returns:
A dictionary mapping resource name to the total quantity of that
resource in the cluster. | Below is the the instruction that describes the task:
### Input:
Get the current total cluster resources.
Note that this information can grow stale as nodes are added to or
removed from the cluster.
Returns:
A dictionary mapping resource name to the total quantity of that
resource in the cluster.
### Response:
def cluster_resources(self):
"""Get the current total cluster resources.
Note that this information can grow stale as nodes are added to or
removed from the cluster.
Returns:
A dictionary mapping resource name to the total quantity of that
resource in the cluster.
"""
resources = defaultdict(int)
clients = self.client_table()
for client in clients:
# Only count resources from live clients.
if client["IsInsertion"]:
for key, value in client["Resources"].items():
resources[key] += value
return dict(resources) |
def rest_verbs(http_method_names=None):
"""
Decorator that converts a function-based view into an RestView subclass.
Takes a list of allowed methods for the view as an argument.
"""
http_method_names = ['GET'] if (http_method_names is None) else http_method_names
def decorator(func):
WrappedRestView = type(
six.PY3 and 'WrappedRestView' or b'WrappedRestView',
(RestView,),
{'__doc__': func.__doc__}
)
# Note, the above allows us to set the docstring.
# It is the equivalent of:
#
# class WrappedRestView(RestView):
# pass
# WrappedRestView.__doc__ = func.doc <--- Not possible to do this
# api_view applied without (method_names)
assert not(isinstance(http_method_names, types.FunctionType)), \
'@api_view missing list of allowed HTTP methods'
# api_view applied with eg. string instead of list of strings
assert isinstance(http_method_names, (list, tuple)), \
'@api_view expected a list of strings, received %s' % type(http_method_names).__name__
allowed_methods = set(http_method_names) | set(('options',))
WrappedRestView.http_method_names = [method.lower() for method in allowed_methods]
def handler(self, *args, **kwargs):
return func(*args, **kwargs)
for method in http_method_names:
setattr(WrappedRestView, method.lower(), handler)
WrappedRestView.__name__ = func.__name__
WrappedRestView.renderer_classes = getattr(func, 'renderer_classes',
RestView.renderer_classes)
WrappedRestView.parser_classes = getattr(func, 'parser_classes',
RestView.parser_classes)
WrappedRestView.authentication_classes = getattr(func, 'authentication_classes',
RestView.authentication_classes)
WrappedRestView.throttle_classes = getattr(func, 'throttle_classes',
RestView.throttle_classes)
WrappedRestView.permission_classes = getattr(func, 'permission_classes',
RestView.permission_classes)
return WrappedRestView.as_view()
return decorator | Decorator that converts a function-based view into an RestView subclass.
Takes a list of allowed methods for the view as an argument. | Below is the the instruction that describes the task:
### Input:
Decorator that converts a function-based view into an RestView subclass.
Takes a list of allowed methods for the view as an argument.
### Response:
def rest_verbs(http_method_names=None):
"""
Decorator that converts a function-based view into an RestView subclass.
Takes a list of allowed methods for the view as an argument.
"""
http_method_names = ['GET'] if (http_method_names is None) else http_method_names
def decorator(func):
WrappedRestView = type(
six.PY3 and 'WrappedRestView' or b'WrappedRestView',
(RestView,),
{'__doc__': func.__doc__}
)
# Note, the above allows us to set the docstring.
# It is the equivalent of:
#
# class WrappedRestView(RestView):
# pass
# WrappedRestView.__doc__ = func.doc <--- Not possible to do this
# api_view applied without (method_names)
assert not(isinstance(http_method_names, types.FunctionType)), \
'@api_view missing list of allowed HTTP methods'
# api_view applied with eg. string instead of list of strings
assert isinstance(http_method_names, (list, tuple)), \
'@api_view expected a list of strings, received %s' % type(http_method_names).__name__
allowed_methods = set(http_method_names) | set(('options',))
WrappedRestView.http_method_names = [method.lower() for method in allowed_methods]
def handler(self, *args, **kwargs):
return func(*args, **kwargs)
for method in http_method_names:
setattr(WrappedRestView, method.lower(), handler)
WrappedRestView.__name__ = func.__name__
WrappedRestView.renderer_classes = getattr(func, 'renderer_classes',
RestView.renderer_classes)
WrappedRestView.parser_classes = getattr(func, 'parser_classes',
RestView.parser_classes)
WrappedRestView.authentication_classes = getattr(func, 'authentication_classes',
RestView.authentication_classes)
WrappedRestView.throttle_classes = getattr(func, 'throttle_classes',
RestView.throttle_classes)
WrappedRestView.permission_classes = getattr(func, 'permission_classes',
RestView.permission_classes)
return WrappedRestView.as_view()
return decorator |
def plot(self, sig_style='', title=None, figsize=None,
return_fig=False):
"""
Plot the comparison of two sets of annotations, possibly
overlaid on their original signal.
Parameters
----------
sig_style : str, optional
The matplotlib style of the signal
title : str, optional
The title of the plot
figsize: tuple, optional
Tuple pair specifying the width, and height of the figure.
It is the'figsize' argument passed into matplotlib.pyplot's
`figure` function.
return_fig : bool, optional
Whether the figure is to be returned as an output argument.
"""
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(1, 1, 1)
legend = ['Signal',
'Matched Reference Annotations (%d/%d)' % (self.tp, self.n_ref),
'Unmatched Reference Annotations (%d/%d)' % (self.fn, self.n_ref),
'Matched Test Annotations (%d/%d)' % (self.tp, self.n_test),
'Unmatched Test Annotations (%d/%d)' % (self.fp, self.n_test)
]
# Plot the signal if any
if self.signal is not None:
ax.plot(self.signal, sig_style)
# Plot reference annotations
ax.plot(self.matched_ref_sample,
self.signal[self.matched_ref_sample], 'ko')
ax.plot(self.unmatched_ref_sample,
self.signal[self.unmatched_ref_sample], 'ko',
fillstyle='none')
# Plot test annotations
ax.plot(self.matched_test_sample,
self.signal[self.matched_test_sample], 'g+')
ax.plot(self.unmatched_test_sample,
self.signal[self.unmatched_test_sample], 'rx')
ax.legend(legend)
# Just plot annotations
else:
# Plot reference annotations
ax.plot(self.matched_ref_sample, np.ones(self.tp), 'ko')
ax.plot(self.unmatched_ref_sample, np.ones(self.fn), 'ko',
fillstyle='none')
# Plot test annotations
ax.plot(self.matched_test_sample, 0.5 * np.ones(self.tp), 'g+')
ax.plot(self.unmatched_test_sample, 0.5 * np.ones(self.fp), 'rx')
ax.legend(legend[1:])
if title:
ax.set_title(title)
ax.set_xlabel('time/sample')
fig.show()
if return_fig:
return fig, ax | Plot the comparison of two sets of annotations, possibly
overlaid on their original signal.
Parameters
----------
sig_style : str, optional
The matplotlib style of the signal
title : str, optional
The title of the plot
figsize: tuple, optional
Tuple pair specifying the width, and height of the figure.
It is the'figsize' argument passed into matplotlib.pyplot's
`figure` function.
return_fig : bool, optional
Whether the figure is to be returned as an output argument. | Below is the the instruction that describes the task:
### Input:
Plot the comparison of two sets of annotations, possibly
overlaid on their original signal.
Parameters
----------
sig_style : str, optional
The matplotlib style of the signal
title : str, optional
The title of the plot
figsize: tuple, optional
Tuple pair specifying the width, and height of the figure.
It is the'figsize' argument passed into matplotlib.pyplot's
`figure` function.
return_fig : bool, optional
Whether the figure is to be returned as an output argument.
### Response:
def plot(self, sig_style='', title=None, figsize=None,
return_fig=False):
"""
Plot the comparison of two sets of annotations, possibly
overlaid on their original signal.
Parameters
----------
sig_style : str, optional
The matplotlib style of the signal
title : str, optional
The title of the plot
figsize: tuple, optional
Tuple pair specifying the width, and height of the figure.
It is the'figsize' argument passed into matplotlib.pyplot's
`figure` function.
return_fig : bool, optional
Whether the figure is to be returned as an output argument.
"""
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(1, 1, 1)
legend = ['Signal',
'Matched Reference Annotations (%d/%d)' % (self.tp, self.n_ref),
'Unmatched Reference Annotations (%d/%d)' % (self.fn, self.n_ref),
'Matched Test Annotations (%d/%d)' % (self.tp, self.n_test),
'Unmatched Test Annotations (%d/%d)' % (self.fp, self.n_test)
]
# Plot the signal if any
if self.signal is not None:
ax.plot(self.signal, sig_style)
# Plot reference annotations
ax.plot(self.matched_ref_sample,
self.signal[self.matched_ref_sample], 'ko')
ax.plot(self.unmatched_ref_sample,
self.signal[self.unmatched_ref_sample], 'ko',
fillstyle='none')
# Plot test annotations
ax.plot(self.matched_test_sample,
self.signal[self.matched_test_sample], 'g+')
ax.plot(self.unmatched_test_sample,
self.signal[self.unmatched_test_sample], 'rx')
ax.legend(legend)
# Just plot annotations
else:
# Plot reference annotations
ax.plot(self.matched_ref_sample, np.ones(self.tp), 'ko')
ax.plot(self.unmatched_ref_sample, np.ones(self.fn), 'ko',
fillstyle='none')
# Plot test annotations
ax.plot(self.matched_test_sample, 0.5 * np.ones(self.tp), 'g+')
ax.plot(self.unmatched_test_sample, 0.5 * np.ones(self.fp), 'rx')
ax.legend(legend[1:])
if title:
ax.set_title(title)
ax.set_xlabel('time/sample')
fig.show()
if return_fig:
return fig, ax |
def alignment_to_partials(alignment, missing_data=None):
""" Generate a partials dictionary from a treeCl.Alignment """
partials_dict = {}
for (name, sequence) in alignment.get_sequences():
datatype = 'dna' if alignment.is_dna() else 'protein'
partials_dict[name] = seq_to_partials(sequence, datatype)
if missing_data is not None:
l = len(alignment)
for name in missing_data:
if name not in partials_dict:
partials_dict[name] = seq_to_partials('-'*l, datatype)
return partials_dict | Generate a partials dictionary from a treeCl.Alignment | Below is the the instruction that describes the task:
### Input:
Generate a partials dictionary from a treeCl.Alignment
### Response:
def alignment_to_partials(alignment, missing_data=None):
""" Generate a partials dictionary from a treeCl.Alignment """
partials_dict = {}
for (name, sequence) in alignment.get_sequences():
datatype = 'dna' if alignment.is_dna() else 'protein'
partials_dict[name] = seq_to_partials(sequence, datatype)
if missing_data is not None:
l = len(alignment)
for name in missing_data:
if name not in partials_dict:
partials_dict[name] = seq_to_partials('-'*l, datatype)
return partials_dict |
def wait(self=None, period=10, callback=None, *args, **kwargs):
"""Wait until task is complete
:param period: Time in seconds between reloads
:param callback: Function to call after the task has finished,
arguments and keyword arguments can be provided for it
:return: Return value of provided callback function or None if a
callback function was not provided
"""
while self.status not in [
TaskStatus.COMPLETED,
TaskStatus.FAILED,
TaskStatus.ABORTED
]:
self.reload()
time.sleep(period)
if callback:
return callback(*args, **kwargs) | Wait until task is complete
:param period: Time in seconds between reloads
:param callback: Function to call after the task has finished,
arguments and keyword arguments can be provided for it
:return: Return value of provided callback function or None if a
callback function was not provided | Below is the the instruction that describes the task:
### Input:
Wait until task is complete
:param period: Time in seconds between reloads
:param callback: Function to call after the task has finished,
arguments and keyword arguments can be provided for it
:return: Return value of provided callback function or None if a
callback function was not provided
### Response:
def wait(self=None, period=10, callback=None, *args, **kwargs):
"""Wait until task is complete
:param period: Time in seconds between reloads
:param callback: Function to call after the task has finished,
arguments and keyword arguments can be provided for it
:return: Return value of provided callback function or None if a
callback function was not provided
"""
while self.status not in [
TaskStatus.COMPLETED,
TaskStatus.FAILED,
TaskStatus.ABORTED
]:
self.reload()
time.sleep(period)
if callback:
return callback(*args, **kwargs) |
def writeInfo(self, stream):
""" Write information about diffs into a file stream for use later. """
for (fromUUID, size) in Diff.theKnownSizes[self.uuid].iteritems():
self.writeInfoLine(stream, fromUUID, size) | Write information about diffs into a file stream for use later. | Below is the the instruction that describes the task:
### Input:
Write information about diffs into a file stream for use later.
### Response:
def writeInfo(self, stream):
""" Write information about diffs into a file stream for use later. """
for (fromUUID, size) in Diff.theKnownSizes[self.uuid].iteritems():
self.writeInfoLine(stream, fromUUID, size) |
def pop_back(self):
'''Remove the last element from the :class:`Sequence`.'''
backend = self.backend
return backend.execute(backend.structure(self).pop_back(),
self.value_pickler.loads) | Remove the last element from the :class:`Sequence`. | Below is the the instruction that describes the task:
### Input:
Remove the last element from the :class:`Sequence`.
### Response:
def pop_back(self):
'''Remove the last element from the :class:`Sequence`.'''
backend = self.backend
return backend.execute(backend.structure(self).pop_back(),
self.value_pickler.loads) |
def _add_path(dir_name, payload_info_list):
"""Add a key with the path to each payload_info_dict."""
for payload_info_dict in payload_info_list:
file_name = payload_info_dict['filename'] or payload_info_dict['pid']
payload_info_dict['path'] = d1_common.utils.filesystem.gen_safe_path(
dir_name, 'data', file_name
) | Add a key with the path to each payload_info_dict. | Below is the the instruction that describes the task:
### Input:
Add a key with the path to each payload_info_dict.
### Response:
def _add_path(dir_name, payload_info_list):
"""Add a key with the path to each payload_info_dict."""
for payload_info_dict in payload_info_list:
file_name = payload_info_dict['filename'] or payload_info_dict['pid']
payload_info_dict['path'] = d1_common.utils.filesystem.gen_safe_path(
dir_name, 'data', file_name
) |
def escape_latex(s):
r"""Escape characters that are special in latex.
Args
----
s : `str`, `NoEscape` or anything that can be converted to string
The string to be escaped. If this is not a string, it will be converted
to a string using `str`. If it is a `NoEscape` string, it will pass
through unchanged.
Returns
-------
NoEscape
The string, with special characters in latex escaped.
Examples
--------
>>> escape_latex("Total cost: $30,000")
'Total cost: \$30,000'
>>> escape_latex("Issue #5 occurs in 30% of all cases")
'Issue \#5 occurs in 30\% of all cases'
>>> print(escape_latex("Total cost: $30,000"))
References
----------
* http://tex.stackexchange.com/a/34586/43228
* http://stackoverflow.com/a/16264094/2570866
"""
if isinstance(s, NoEscape):
return s
return NoEscape(''.join(_latex_special_chars.get(c, c) for c in str(s))) | r"""Escape characters that are special in latex.
Args
----
s : `str`, `NoEscape` or anything that can be converted to string
The string to be escaped. If this is not a string, it will be converted
to a string using `str`. If it is a `NoEscape` string, it will pass
through unchanged.
Returns
-------
NoEscape
The string, with special characters in latex escaped.
Examples
--------
>>> escape_latex("Total cost: $30,000")
'Total cost: \$30,000'
>>> escape_latex("Issue #5 occurs in 30% of all cases")
'Issue \#5 occurs in 30\% of all cases'
>>> print(escape_latex("Total cost: $30,000"))
References
----------
* http://tex.stackexchange.com/a/34586/43228
* http://stackoverflow.com/a/16264094/2570866 | Below is the the instruction that describes the task:
### Input:
r"""Escape characters that are special in latex.
Args
----
s : `str`, `NoEscape` or anything that can be converted to string
The string to be escaped. If this is not a string, it will be converted
to a string using `str`. If it is a `NoEscape` string, it will pass
through unchanged.
Returns
-------
NoEscape
The string, with special characters in latex escaped.
Examples
--------
>>> escape_latex("Total cost: $30,000")
'Total cost: \$30,000'
>>> escape_latex("Issue #5 occurs in 30% of all cases")
'Issue \#5 occurs in 30\% of all cases'
>>> print(escape_latex("Total cost: $30,000"))
References
----------
* http://tex.stackexchange.com/a/34586/43228
* http://stackoverflow.com/a/16264094/2570866
### Response:
def escape_latex(s):
r"""Escape characters that are special in latex.
Args
----
s : `str`, `NoEscape` or anything that can be converted to string
The string to be escaped. If this is not a string, it will be converted
to a string using `str`. If it is a `NoEscape` string, it will pass
through unchanged.
Returns
-------
NoEscape
The string, with special characters in latex escaped.
Examples
--------
>>> escape_latex("Total cost: $30,000")
'Total cost: \$30,000'
>>> escape_latex("Issue #5 occurs in 30% of all cases")
'Issue \#5 occurs in 30\% of all cases'
>>> print(escape_latex("Total cost: $30,000"))
References
----------
* http://tex.stackexchange.com/a/34586/43228
* http://stackoverflow.com/a/16264094/2570866
"""
if isinstance(s, NoEscape):
return s
return NoEscape(''.join(_latex_special_chars.get(c, c) for c in str(s))) |
def fetch_post_data(self):
'''
fetch post accessed data. post_data, and ext_dic.
'''
post_data = {}
ext_dic = {}
for key in self.request.arguments:
if key.startswith('ext_') or key.startswith('tag_'):
ext_dic[key] = self.get_argument(key)
else:
post_data[key] = self.get_arguments(key)[0]
post_data['user_name'] = self.userinfo.user_name
post_data['kind'] = self.kind
# append external infor.
if 'tags' in post_data:
ext_dic['def_tag_arr'] = [x.strip() for x
in post_data['tags'].strip().strip(',').split(',')]
ext_dic = dict(ext_dic, **self.ext_post_data(postdata=post_data))
return (post_data, ext_dic) | fetch post accessed data. post_data, and ext_dic. | Below is the the instruction that describes the task:
### Input:
fetch post accessed data. post_data, and ext_dic.
### Response:
def fetch_post_data(self):
'''
fetch post accessed data. post_data, and ext_dic.
'''
post_data = {}
ext_dic = {}
for key in self.request.arguments:
if key.startswith('ext_') or key.startswith('tag_'):
ext_dic[key] = self.get_argument(key)
else:
post_data[key] = self.get_arguments(key)[0]
post_data['user_name'] = self.userinfo.user_name
post_data['kind'] = self.kind
# append external infor.
if 'tags' in post_data:
ext_dic['def_tag_arr'] = [x.strip() for x
in post_data['tags'].strip().strip(',').split(',')]
ext_dic = dict(ext_dic, **self.ext_post_data(postdata=post_data))
return (post_data, ext_dic) |
def pop_identity(self):
"""
:returns: SimpleIdentifierCollection
"""
popped = None
stack = self.get_run_as_identifiers_stack()
if (stack):
popped = stack.pop()
if (stack):
# persist the changed stack to the session
session = self.get_session()
session.set_internal_attribute(self.run_as_identifiers_session_key, stack)
else:
# stack is empty, remove it from the session:
self.clear_run_as_identities()
return popped | :returns: SimpleIdentifierCollection | Below is the the instruction that describes the task:
### Input:
:returns: SimpleIdentifierCollection
### Response:
def pop_identity(self):
"""
:returns: SimpleIdentifierCollection
"""
popped = None
stack = self.get_run_as_identifiers_stack()
if (stack):
popped = stack.pop()
if (stack):
# persist the changed stack to the session
session = self.get_session()
session.set_internal_attribute(self.run_as_identifiers_session_key, stack)
else:
# stack is empty, remove it from the session:
self.clear_run_as_identities()
return popped |
def chrome_getdata_view(request):
"""Get the data of the last notification sent to the current user.
This is needed because Chrome, as of version 44, doesn't support
sending a data payload to a notification. Thus, information on what
the notification is actually for must be manually fetched.
"""
data = {}
if request.user.is_authenticated:
# authenticated session
notifs = GCMNotification.objects.filter(sent_to__user=request.user).order_by("-time")
if notifs.count() > 0:
notif = notifs.first()
ndata = notif.data
if "title" in ndata and "text" in ndata:
data = {
"title": ndata['title'] if 'title' in ndata else '',
"text": ndata['text'] if 'text' in ndata else '',
"url": ndata['url'] if 'url' in ndata else ''
}
else:
schedule_chk = chrome_getdata_check(request)
if schedule_chk:
data = schedule_chk
else:
schedule_chk = chrome_getdata_check(request)
if schedule_chk:
data = schedule_chk
else:
return HttpResponse("null", content_type="text/json")
else:
schedule_chk = chrome_getdata_check(request)
if schedule_chk:
data = schedule_chk
else:
data = {"title": "Check Intranet", "text": "You have a new notification that couldn't be loaded right now."}
j = json.dumps(data)
return HttpResponse(j, content_type="text/json") | Get the data of the last notification sent to the current user.
This is needed because Chrome, as of version 44, doesn't support
sending a data payload to a notification. Thus, information on what
the notification is actually for must be manually fetched. | Below is the the instruction that describes the task:
### Input:
Get the data of the last notification sent to the current user.
This is needed because Chrome, as of version 44, doesn't support
sending a data payload to a notification. Thus, information on what
the notification is actually for must be manually fetched.
### Response:
def chrome_getdata_view(request):
"""Get the data of the last notification sent to the current user.
This is needed because Chrome, as of version 44, doesn't support
sending a data payload to a notification. Thus, information on what
the notification is actually for must be manually fetched.
"""
data = {}
if request.user.is_authenticated:
# authenticated session
notifs = GCMNotification.objects.filter(sent_to__user=request.user).order_by("-time")
if notifs.count() > 0:
notif = notifs.first()
ndata = notif.data
if "title" in ndata and "text" in ndata:
data = {
"title": ndata['title'] if 'title' in ndata else '',
"text": ndata['text'] if 'text' in ndata else '',
"url": ndata['url'] if 'url' in ndata else ''
}
else:
schedule_chk = chrome_getdata_check(request)
if schedule_chk:
data = schedule_chk
else:
schedule_chk = chrome_getdata_check(request)
if schedule_chk:
data = schedule_chk
else:
return HttpResponse("null", content_type="text/json")
else:
schedule_chk = chrome_getdata_check(request)
if schedule_chk:
data = schedule_chk
else:
data = {"title": "Check Intranet", "text": "You have a new notification that couldn't be loaded right now."}
j = json.dumps(data)
return HttpResponse(j, content_type="text/json") |
def parse_str(self, s):
"""
Parse string and return relevant object
:param s: string to parse
:type s: str
:return: Parsed object
"""
self.object = self.parsed_class()
in_section = None # Holds name of FEH file section while traversing through file.
for line in s.split('\n'):
if line.lower().startswith('[end]'):
# Leave section
in_section = None
elif line.startswith('['):
# Enter section, sanitise `[Section Name]` to `section_name`
in_section = line.strip().strip('[]').lower().replace(' ', '_')
elif in_section:
try:
# Call method `_section_section_name(line)`
getattr(self, '_section_' + in_section)(line.strip())
except AttributeError:
pass # Skip unsupported section
return self.object | Parse string and return relevant object
:param s: string to parse
:type s: str
:return: Parsed object | Below is the the instruction that describes the task:
### Input:
Parse string and return relevant object
:param s: string to parse
:type s: str
:return: Parsed object
### Response:
def parse_str(self, s):
"""
Parse string and return relevant object
:param s: string to parse
:type s: str
:return: Parsed object
"""
self.object = self.parsed_class()
in_section = None # Holds name of FEH file section while traversing through file.
for line in s.split('\n'):
if line.lower().startswith('[end]'):
# Leave section
in_section = None
elif line.startswith('['):
# Enter section, sanitise `[Section Name]` to `section_name`
in_section = line.strip().strip('[]').lower().replace(' ', '_')
elif in_section:
try:
# Call method `_section_section_name(line)`
getattr(self, '_section_' + in_section)(line.strip())
except AttributeError:
pass # Skip unsupported section
return self.object |
def AddKeys(self, key_list):
"""Mark additional columns as being part of the superkey.
Supplements the Keys already extracted from the FSM template.
Useful when adding new columns to existing tables.
Note: This will impact attempts to further 'extend' the table as the
superkey must be common between tables for successful extension.
Args:
key_list: list of header entries to be included in the superkey.
Raises:
KeyError: If any entry in list is not a valid header entry.
"""
for keyname in key_list:
if keyname not in self.header:
raise KeyError("'%s'" % keyname)
self._keys = self._keys.union(set(key_list)) | Mark additional columns as being part of the superkey.
Supplements the Keys already extracted from the FSM template.
Useful when adding new columns to existing tables.
Note: This will impact attempts to further 'extend' the table as the
superkey must be common between tables for successful extension.
Args:
key_list: list of header entries to be included in the superkey.
Raises:
KeyError: If any entry in list is not a valid header entry. | Below is the the instruction that describes the task:
### Input:
Mark additional columns as being part of the superkey.
Supplements the Keys already extracted from the FSM template.
Useful when adding new columns to existing tables.
Note: This will impact attempts to further 'extend' the table as the
superkey must be common between tables for successful extension.
Args:
key_list: list of header entries to be included in the superkey.
Raises:
KeyError: If any entry in list is not a valid header entry.
### Response:
def AddKeys(self, key_list):
"""Mark additional columns as being part of the superkey.
Supplements the Keys already extracted from the FSM template.
Useful when adding new columns to existing tables.
Note: This will impact attempts to further 'extend' the table as the
superkey must be common between tables for successful extension.
Args:
key_list: list of header entries to be included in the superkey.
Raises:
KeyError: If any entry in list is not a valid header entry.
"""
for keyname in key_list:
if keyname not in self.header:
raise KeyError("'%s'" % keyname)
self._keys = self._keys.union(set(key_list)) |
def parse_cookie(cookie: str) -> Dict[str, str]:
"""Parse a ``Cookie`` HTTP header into a dict of name/value pairs.
This function attempts to mimic browser cookie parsing behavior;
it specifically does not follow any of the cookie-related RFCs
(because browsers don't either).
The algorithm used is identical to that used by Django version 1.9.10.
.. versionadded:: 4.4.2
"""
cookiedict = {}
for chunk in cookie.split(str(";")):
if str("=") in chunk:
key, val = chunk.split(str("="), 1)
else:
# Assume an empty name per
# https://bugzilla.mozilla.org/show_bug.cgi?id=169091
key, val = str(""), chunk
key, val = key.strip(), val.strip()
if key or val:
# unquote using Python's algorithm.
cookiedict[key] = _unquote_cookie(val)
return cookiedict | Parse a ``Cookie`` HTTP header into a dict of name/value pairs.
This function attempts to mimic browser cookie parsing behavior;
it specifically does not follow any of the cookie-related RFCs
(because browsers don't either).
The algorithm used is identical to that used by Django version 1.9.10.
.. versionadded:: 4.4.2 | Below is the the instruction that describes the task:
### Input:
Parse a ``Cookie`` HTTP header into a dict of name/value pairs.
This function attempts to mimic browser cookie parsing behavior;
it specifically does not follow any of the cookie-related RFCs
(because browsers don't either).
The algorithm used is identical to that used by Django version 1.9.10.
.. versionadded:: 4.4.2
### Response:
def parse_cookie(cookie: str) -> Dict[str, str]:
"""Parse a ``Cookie`` HTTP header into a dict of name/value pairs.
This function attempts to mimic browser cookie parsing behavior;
it specifically does not follow any of the cookie-related RFCs
(because browsers don't either).
The algorithm used is identical to that used by Django version 1.9.10.
.. versionadded:: 4.4.2
"""
cookiedict = {}
for chunk in cookie.split(str(";")):
if str("=") in chunk:
key, val = chunk.split(str("="), 1)
else:
# Assume an empty name per
# https://bugzilla.mozilla.org/show_bug.cgi?id=169091
key, val = str(""), chunk
key, val = key.strip(), val.strip()
if key or val:
# unquote using Python's algorithm.
cookiedict[key] = _unquote_cookie(val)
return cookiedict |
def addToDefinition(self, json_dict):
"""
The addToDefinition operation supports adding a definition
property to a hosted feature service. The result of this
operation is a response indicating success or failure with error
code and description.
This function will allow users to change add additional values
to an already published service.
Input:
json_dict - part to add to host service. The part format can
be derived from the asDictionary property. For
layer level modifications, run updates on each
individual feature service layer object.
Output:
JSON message as dictionary
"""
params = {
"f" : "json",
"addToDefinition" : json.dumps(json_dict),
"async" : False
}
uURL = self._url + "/addToDefinition"
res = self._post(url=uURL, param_dict=params,
securityHandler=self._securityHandler,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
self.refresh()
return res | The addToDefinition operation supports adding a definition
property to a hosted feature service. The result of this
operation is a response indicating success or failure with error
code and description.
This function will allow users to change add additional values
to an already published service.
Input:
json_dict - part to add to host service. The part format can
be derived from the asDictionary property. For
layer level modifications, run updates on each
individual feature service layer object.
Output:
JSON message as dictionary | Below is the the instruction that describes the task:
### Input:
The addToDefinition operation supports adding a definition
property to a hosted feature service. The result of this
operation is a response indicating success or failure with error
code and description.
This function will allow users to change add additional values
to an already published service.
Input:
json_dict - part to add to host service. The part format can
be derived from the asDictionary property. For
layer level modifications, run updates on each
individual feature service layer object.
Output:
JSON message as dictionary
### Response:
def addToDefinition(self, json_dict):
"""
The addToDefinition operation supports adding a definition
property to a hosted feature service. The result of this
operation is a response indicating success or failure with error
code and description.
This function will allow users to change add additional values
to an already published service.
Input:
json_dict - part to add to host service. The part format can
be derived from the asDictionary property. For
layer level modifications, run updates on each
individual feature service layer object.
Output:
JSON message as dictionary
"""
params = {
"f" : "json",
"addToDefinition" : json.dumps(json_dict),
"async" : False
}
uURL = self._url + "/addToDefinition"
res = self._post(url=uURL, param_dict=params,
securityHandler=self._securityHandler,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
self.refresh()
return res |
def diff(
cobertura_file1, cobertura_file2,
color, format, output, source1, source2,
source_prefix1, source_prefix2, source):
"""compare coverage of two Cobertura reports"""
cobertura1 = Cobertura(
cobertura_file1,
source=source1,
source_prefix=source_prefix1
)
cobertura2 = Cobertura(
cobertura_file2,
source=source2,
source_prefix=source_prefix2
)
Reporter = delta_reporters[format]
reporter_args = [cobertura1, cobertura2]
reporter_kwargs = {'show_source': source}
isatty = True if output is None else output.isatty()
if format == 'text':
color = isatty if color is None else color is True
reporter_kwargs['color'] = color
reporter = Reporter(*reporter_args, **reporter_kwargs)
report = reporter.generate()
if not isinstance(report, bytes):
report = report.encode('utf-8')
click.echo(report, file=output, nl=isatty, color=color)
exit_code = get_exit_code(reporter.differ, source)
raise SystemExit(exit_code) | compare coverage of two Cobertura reports | Below is the the instruction that describes the task:
### Input:
compare coverage of two Cobertura reports
### Response:
def diff(
cobertura_file1, cobertura_file2,
color, format, output, source1, source2,
source_prefix1, source_prefix2, source):
"""compare coverage of two Cobertura reports"""
cobertura1 = Cobertura(
cobertura_file1,
source=source1,
source_prefix=source_prefix1
)
cobertura2 = Cobertura(
cobertura_file2,
source=source2,
source_prefix=source_prefix2
)
Reporter = delta_reporters[format]
reporter_args = [cobertura1, cobertura2]
reporter_kwargs = {'show_source': source}
isatty = True if output is None else output.isatty()
if format == 'text':
color = isatty if color is None else color is True
reporter_kwargs['color'] = color
reporter = Reporter(*reporter_args, **reporter_kwargs)
report = reporter.generate()
if not isinstance(report, bytes):
report = report.encode('utf-8')
click.echo(report, file=output, nl=isatty, color=color)
exit_code = get_exit_code(reporter.differ, source)
raise SystemExit(exit_code) |
def send_document(self, *args, **kwargs):
"""See :func:`send_document`"""
return send_document(*args, **self._merge_overrides(**kwargs)).run() | See :func:`send_document` | Below is the the instruction that describes the task:
### Input:
See :func:`send_document`
### Response:
def send_document(self, *args, **kwargs):
"""See :func:`send_document`"""
return send_document(*args, **self._merge_overrides(**kwargs)).run() |
def printrdf(wflow, ctx, style): # type: (Process, ContextType, Text) -> Text
"""Serialize the CWL document into a string, ready for printing."""
rdf = gather(wflow, ctx).serialize(format=style, encoding='utf-8')
if not rdf:
return u""
return rdf.decode('utf-8') | Serialize the CWL document into a string, ready for printing. | Below is the the instruction that describes the task:
### Input:
Serialize the CWL document into a string, ready for printing.
### Response:
def printrdf(wflow, ctx, style): # type: (Process, ContextType, Text) -> Text
"""Serialize the CWL document into a string, ready for printing."""
rdf = gather(wflow, ctx).serialize(format=style, encoding='utf-8')
if not rdf:
return u""
return rdf.decode('utf-8') |
def str_to_inet(address):
"""Convert an a string IP address to a inet struct
Args:
address (str): String representation of address
Returns:
inet: Inet network address
"""
# First try ipv4 and then ipv6
try:
return socket.inet_pton(socket.AF_INET, address)
except socket.error:
return socket.inet_pton(socket.AF_INET6, address) | Convert an a string IP address to a inet struct
Args:
address (str): String representation of address
Returns:
inet: Inet network address | Below is the the instruction that describes the task:
### Input:
Convert an a string IP address to a inet struct
Args:
address (str): String representation of address
Returns:
inet: Inet network address
### Response:
def str_to_inet(address):
"""Convert an a string IP address to a inet struct
Args:
address (str): String representation of address
Returns:
inet: Inet network address
"""
# First try ipv4 and then ipv6
try:
return socket.inet_pton(socket.AF_INET, address)
except socket.error:
return socket.inet_pton(socket.AF_INET6, address) |
def eval_function(value):
""" Evaluate a timestamp function """
name, args = value[0], value[1:]
if name == "NOW":
return datetime.utcnow().replace(tzinfo=tzutc())
elif name in ["TIMESTAMP", "TS"]:
return parse(unwrap(args[0])).replace(tzinfo=tzlocal())
elif name in ["UTCTIMESTAMP", "UTCTS"]:
return parse(unwrap(args[0])).replace(tzinfo=tzutc())
elif name == "MS":
return 1000 * resolve(args[0])
else:
raise SyntaxError("Unrecognized function %r" % name) | Evaluate a timestamp function | Below is the the instruction that describes the task:
### Input:
Evaluate a timestamp function
### Response:
def eval_function(value):
""" Evaluate a timestamp function """
name, args = value[0], value[1:]
if name == "NOW":
return datetime.utcnow().replace(tzinfo=tzutc())
elif name in ["TIMESTAMP", "TS"]:
return parse(unwrap(args[0])).replace(tzinfo=tzlocal())
elif name in ["UTCTIMESTAMP", "UTCTS"]:
return parse(unwrap(args[0])).replace(tzinfo=tzutc())
elif name == "MS":
return 1000 * resolve(args[0])
else:
raise SyntaxError("Unrecognized function %r" % name) |
def _parse_version_reply(self):
"waiting for a version reply"
if len(self._data) >= 2:
reply = self._data[:2]
self._data = self._data[2:]
(version, method) = struct.unpack('BB', reply)
if version == 5 and method in [0x00, 0x02]:
self.version_reply(method)
else:
if version != 5:
self.version_error(SocksError(
"Expected version 5, got {}".format(version)))
else:
self.version_error(SocksError(
"Wanted method 0 or 2, got {}".format(method))) | waiting for a version reply | Below is the the instruction that describes the task:
### Input:
waiting for a version reply
### Response:
def _parse_version_reply(self):
"waiting for a version reply"
if len(self._data) >= 2:
reply = self._data[:2]
self._data = self._data[2:]
(version, method) = struct.unpack('BB', reply)
if version == 5 and method in [0x00, 0x02]:
self.version_reply(method)
else:
if version != 5:
self.version_error(SocksError(
"Expected version 5, got {}".format(version)))
else:
self.version_error(SocksError(
"Wanted method 0 or 2, got {}".format(method))) |
def _static_value(
self,
serie_node,
value,
x,
y,
metadata,
align_text='left',
classes=None
):
"""Write the print value"""
label = metadata and metadata.get('label')
classes = classes and [classes] or []
if self.print_labels and label:
label_cls = classes + ['label']
if self.print_values:
y -= self.style.value_font_size / 2
self.svg.node(
serie_node['text_overlay'],
'text',
class_=' '.join(label_cls),
x=x,
y=y + self.style.value_font_size / 3
).text = label
y += self.style.value_font_size
if self.print_values or self.dynamic_print_values:
val_cls = classes + ['value']
if self.dynamic_print_values:
val_cls.append('showable')
self.svg.node(
serie_node['text_overlay'],
'text',
class_=' '.join(val_cls),
x=x,
y=y + self.style.value_font_size / 3,
attrib={
'text-anchor': align_text
}
).text = value if self.print_zeroes or value != '0' else '' | Write the print value | Below is the the instruction that describes the task:
### Input:
Write the print value
### Response:
def _static_value(
self,
serie_node,
value,
x,
y,
metadata,
align_text='left',
classes=None
):
"""Write the print value"""
label = metadata and metadata.get('label')
classes = classes and [classes] or []
if self.print_labels and label:
label_cls = classes + ['label']
if self.print_values:
y -= self.style.value_font_size / 2
self.svg.node(
serie_node['text_overlay'],
'text',
class_=' '.join(label_cls),
x=x,
y=y + self.style.value_font_size / 3
).text = label
y += self.style.value_font_size
if self.print_values or self.dynamic_print_values:
val_cls = classes + ['value']
if self.dynamic_print_values:
val_cls.append('showable')
self.svg.node(
serie_node['text_overlay'],
'text',
class_=' '.join(val_cls),
x=x,
y=y + self.style.value_font_size / 3,
attrib={
'text-anchor': align_text
}
).text = value if self.print_zeroes or value != '0' else '' |
def _visit_value_and_its_immediate_references(obj, visitor):
''' Recurse down Models, HasProps, and Python containers
The ordering in this function is to optimize performance. We check the
most comomn types (int, float, str) first so that we can quickly return in
the common case. We avoid isinstance and issubclass checks in a couple
places with `type` checks because isinstance checks can be slow.
'''
typ = type(obj)
if typ in _common_types: # short circuit on common base types
return
if typ is list or issubclass(typ, (list, tuple)): # check common containers
for item in obj:
_visit_value_and_its_immediate_references(item, visitor)
elif issubclass(typ, dict):
for key, value in iteritems(obj):
_visit_value_and_its_immediate_references(key, visitor)
_visit_value_and_its_immediate_references(value, visitor)
elif issubclass(typ, HasProps):
if issubclass(typ, Model):
visitor(obj)
else:
# this isn't a Model, so recurse into it
_visit_immediate_value_references(obj, visitor) | Recurse down Models, HasProps, and Python containers
The ordering in this function is to optimize performance. We check the
most comomn types (int, float, str) first so that we can quickly return in
the common case. We avoid isinstance and issubclass checks in a couple
places with `type` checks because isinstance checks can be slow. | Below is the the instruction that describes the task:
### Input:
Recurse down Models, HasProps, and Python containers
The ordering in this function is to optimize performance. We check the
most comomn types (int, float, str) first so that we can quickly return in
the common case. We avoid isinstance and issubclass checks in a couple
places with `type` checks because isinstance checks can be slow.
### Response:
def _visit_value_and_its_immediate_references(obj, visitor):
''' Recurse down Models, HasProps, and Python containers
The ordering in this function is to optimize performance. We check the
most comomn types (int, float, str) first so that we can quickly return in
the common case. We avoid isinstance and issubclass checks in a couple
places with `type` checks because isinstance checks can be slow.
'''
typ = type(obj)
if typ in _common_types: # short circuit on common base types
return
if typ is list or issubclass(typ, (list, tuple)): # check common containers
for item in obj:
_visit_value_and_its_immediate_references(item, visitor)
elif issubclass(typ, dict):
for key, value in iteritems(obj):
_visit_value_and_its_immediate_references(key, visitor)
_visit_value_and_its_immediate_references(value, visitor)
elif issubclass(typ, HasProps):
if issubclass(typ, Model):
visitor(obj)
else:
# this isn't a Model, so recurse into it
_visit_immediate_value_references(obj, visitor) |
def profile_config_get(name, config_key, remote_addr=None,
cert=None, key=None, verify_cert=True):
''' Get a profile config item.
name :
The name of the profile to get the config item from.
config_key :
The key for the item to retrieve.
remote_addr :
An URL to a remote Server, you also have to give cert and key if
you provide remote_addr and its a TCP Address!
Examples:
https://myserver.lan:8443
/var/lib/mysocket.sock
cert :
PEM Formatted SSL Certificate.
Examples:
~/.config/lxc/client.crt
key :
PEM Formatted SSL Key.
Examples:
~/.config/lxc/client.key
verify_cert : True
Wherever to verify the cert, this is by default True
but in the most cases you want to set it off as LXD
normaly uses self-signed certificates.
CLI Example:
.. code-block:: bash
$ salt '*' lxd.profile_config_get autostart boot.autostart
'''
profile = profile_get(
name,
remote_addr,
cert,
key,
verify_cert,
_raw=True
)
return _get_property_dict_item(profile, 'config', config_key) | Get a profile config item.
name :
The name of the profile to get the config item from.
config_key :
The key for the item to retrieve.
remote_addr :
An URL to a remote Server, you also have to give cert and key if
you provide remote_addr and its a TCP Address!
Examples:
https://myserver.lan:8443
/var/lib/mysocket.sock
cert :
PEM Formatted SSL Certificate.
Examples:
~/.config/lxc/client.crt
key :
PEM Formatted SSL Key.
Examples:
~/.config/lxc/client.key
verify_cert : True
Wherever to verify the cert, this is by default True
but in the most cases you want to set it off as LXD
normaly uses self-signed certificates.
CLI Example:
.. code-block:: bash
$ salt '*' lxd.profile_config_get autostart boot.autostart | Below is the the instruction that describes the task:
### Input:
Get a profile config item.
name :
The name of the profile to get the config item from.
config_key :
The key for the item to retrieve.
remote_addr :
An URL to a remote Server, you also have to give cert and key if
you provide remote_addr and its a TCP Address!
Examples:
https://myserver.lan:8443
/var/lib/mysocket.sock
cert :
PEM Formatted SSL Certificate.
Examples:
~/.config/lxc/client.crt
key :
PEM Formatted SSL Key.
Examples:
~/.config/lxc/client.key
verify_cert : True
Wherever to verify the cert, this is by default True
but in the most cases you want to set it off as LXD
normaly uses self-signed certificates.
CLI Example:
.. code-block:: bash
$ salt '*' lxd.profile_config_get autostart boot.autostart
### Response:
def profile_config_get(name, config_key, remote_addr=None,
cert=None, key=None, verify_cert=True):
''' Get a profile config item.
name :
The name of the profile to get the config item from.
config_key :
The key for the item to retrieve.
remote_addr :
An URL to a remote Server, you also have to give cert and key if
you provide remote_addr and its a TCP Address!
Examples:
https://myserver.lan:8443
/var/lib/mysocket.sock
cert :
PEM Formatted SSL Certificate.
Examples:
~/.config/lxc/client.crt
key :
PEM Formatted SSL Key.
Examples:
~/.config/lxc/client.key
verify_cert : True
Wherever to verify the cert, this is by default True
but in the most cases you want to set it off as LXD
normaly uses self-signed certificates.
CLI Example:
.. code-block:: bash
$ salt '*' lxd.profile_config_get autostart boot.autostart
'''
profile = profile_get(
name,
remote_addr,
cert,
key,
verify_cert,
_raw=True
)
return _get_property_dict_item(profile, 'config', config_key) |
def import_class(import_str):
"""Returns a class from a string including module and class."""
mod_str, _sep, class_str = import_str.rpartition(".")
__import__(mod_str)
return getattr(sys.modules[mod_str], class_str) | Returns a class from a string including module and class. | Below is the the instruction that describes the task:
### Input:
Returns a class from a string including module and class.
### Response:
def import_class(import_str):
"""Returns a class from a string including module and class."""
mod_str, _sep, class_str = import_str.rpartition(".")
__import__(mod_str)
return getattr(sys.modules[mod_str], class_str) |
def cut_action_callback(self, *event):
"""Add a copy and cut all selected row dict value pairs to the clipboard"""
if react_to_event(self.view, self.tree_view, event) and self.active_entry_widget is None:
_, dict_paths = self.get_view_selection()
stored_data_list = []
for dict_path_as_list in dict_paths:
if dict_path_as_list:
value = self.model.state.semantic_data
for path_element in dict_path_as_list:
value = value[path_element]
stored_data_list.append((path_element, value))
self.model.state.remove_semantic_data(dict_path_as_list)
rafcon.gui.clipboard.global_clipboard.set_semantic_dictionary_list(stored_data_list)
self.reload_tree_store_data() | Add a copy and cut all selected row dict value pairs to the clipboard | Below is the the instruction that describes the task:
### Input:
Add a copy and cut all selected row dict value pairs to the clipboard
### Response:
def cut_action_callback(self, *event):
"""Add a copy and cut all selected row dict value pairs to the clipboard"""
if react_to_event(self.view, self.tree_view, event) and self.active_entry_widget is None:
_, dict_paths = self.get_view_selection()
stored_data_list = []
for dict_path_as_list in dict_paths:
if dict_path_as_list:
value = self.model.state.semantic_data
for path_element in dict_path_as_list:
value = value[path_element]
stored_data_list.append((path_element, value))
self.model.state.remove_semantic_data(dict_path_as_list)
rafcon.gui.clipboard.global_clipboard.set_semantic_dictionary_list(stored_data_list)
self.reload_tree_store_data() |
def add_workflow(self, workflow, commit=False):
"""
Add a new workflow and optionally commit it to the database
:param workflow: The workflow
:param commit: Whether to commit the workflow to the database
:type workflow: Workflow
:type commit: bool
:return: None
"""
if workflow.workflow_id in self.workflows:
raise KeyError("Workflow with id {} already exists".format(workflow.workflow_id))
self.workflows[workflow.workflow_id] = workflow
logging.info("Added workflow {} to workflow manager".format(workflow.workflow_id))
# Optionally also save the workflow to database
if commit:
self.commit_workflow(workflow.workflow_id)
else:
self.uncommitted_workflows.add(workflow.workflow_id) | Add a new workflow and optionally commit it to the database
:param workflow: The workflow
:param commit: Whether to commit the workflow to the database
:type workflow: Workflow
:type commit: bool
:return: None | Below is the the instruction that describes the task:
### Input:
Add a new workflow and optionally commit it to the database
:param workflow: The workflow
:param commit: Whether to commit the workflow to the database
:type workflow: Workflow
:type commit: bool
:return: None
### Response:
def add_workflow(self, workflow, commit=False):
"""
Add a new workflow and optionally commit it to the database
:param workflow: The workflow
:param commit: Whether to commit the workflow to the database
:type workflow: Workflow
:type commit: bool
:return: None
"""
if workflow.workflow_id in self.workflows:
raise KeyError("Workflow with id {} already exists".format(workflow.workflow_id))
self.workflows[workflow.workflow_id] = workflow
logging.info("Added workflow {} to workflow manager".format(workflow.workflow_id))
# Optionally also save the workflow to database
if commit:
self.commit_workflow(workflow.workflow_id)
else:
self.uncommitted_workflows.add(workflow.workflow_id) |
def collect_population_best(self, best_chromosome, best_fitness_function):
"""!
@brief Stores the best chromosome for current specific iteration and its fitness function's value.
@param[in] best_chromosome (list): The best chromosome on specific iteration.
@param[in] best_fitness_function (float): Fitness function value of the chromosome.
"""
if not self._need_population_best:
return
self._best_population_result['chromosome'].append(best_chromosome)
self._best_population_result['fitness_function'].append(best_fitness_function) | !
@brief Stores the best chromosome for current specific iteration and its fitness function's value.
@param[in] best_chromosome (list): The best chromosome on specific iteration.
@param[in] best_fitness_function (float): Fitness function value of the chromosome. | Below is the the instruction that describes the task:
### Input:
!
@brief Stores the best chromosome for current specific iteration and its fitness function's value.
@param[in] best_chromosome (list): The best chromosome on specific iteration.
@param[in] best_fitness_function (float): Fitness function value of the chromosome.
### Response:
def collect_population_best(self, best_chromosome, best_fitness_function):
"""!
@brief Stores the best chromosome for current specific iteration and its fitness function's value.
@param[in] best_chromosome (list): The best chromosome on specific iteration.
@param[in] best_fitness_function (float): Fitness function value of the chromosome.
"""
if not self._need_population_best:
return
self._best_population_result['chromosome'].append(best_chromosome)
self._best_population_result['fitness_function'].append(best_fitness_function) |
def strike(channel, nick, rest):
"Strike last <n> statements from the record"
yield NoLog
rest = rest.strip()
if not rest:
count = 1
else:
if not rest.isdigit():
yield "Strike how many? Argument must be a positive integer."
raise StopIteration
count = int(rest)
try:
struck = Logger.store.strike(channel, nick, count)
tmpl = (
"Isn't undo great? Last %d statement%s "
"by %s were stricken from the record."
)
yield tmpl % (struck, 's' if struck > 1 else '', nick)
except Exception:
traceback.print_exc()
yield "Hmm.. I didn't find anything of yours to strike!" | Strike last <n> statements from the record | Below is the the instruction that describes the task:
### Input:
Strike last <n> statements from the record
### Response:
def strike(channel, nick, rest):
"Strike last <n> statements from the record"
yield NoLog
rest = rest.strip()
if not rest:
count = 1
else:
if not rest.isdigit():
yield "Strike how many? Argument must be a positive integer."
raise StopIteration
count = int(rest)
try:
struck = Logger.store.strike(channel, nick, count)
tmpl = (
"Isn't undo great? Last %d statement%s "
"by %s were stricken from the record."
)
yield tmpl % (struck, 's' if struck > 1 else '', nick)
except Exception:
traceback.print_exc()
yield "Hmm.. I didn't find anything of yours to strike!" |
def get_library_value(self, key: str) -> typing.Any:
"""Get the library value for the given key.
Please consult the developer documentation for a list of valid keys.
.. versionadded:: 1.0
Scriptable: Yes
"""
desc = Metadata.session_key_map.get(key)
if desc is not None:
field_id = desc['path'][-1]
return getattr(ApplicationData.get_session_metadata_model(), field_id)
raise KeyError() | Get the library value for the given key.
Please consult the developer documentation for a list of valid keys.
.. versionadded:: 1.0
Scriptable: Yes | Below is the the instruction that describes the task:
### Input:
Get the library value for the given key.
Please consult the developer documentation for a list of valid keys.
.. versionadded:: 1.0
Scriptable: Yes
### Response:
def get_library_value(self, key: str) -> typing.Any:
"""Get the library value for the given key.
Please consult the developer documentation for a list of valid keys.
.. versionadded:: 1.0
Scriptable: Yes
"""
desc = Metadata.session_key_map.get(key)
if desc is not None:
field_id = desc['path'][-1]
return getattr(ApplicationData.get_session_metadata_model(), field_id)
raise KeyError() |
def accountable_date(self):
'''Accountable date of transaction, localized as America/Santiago
'''
fecha_transaccion = self.data['TBK_FECHA_CONTABLE']
m = int(fecha_transaccion[:2])
d = int(fecha_transaccion[2:])
santiago = pytz.timezone('America/Santiago')
today = santiago.localize(datetime.datetime.today())
year = today.year
if self.paid_at.month == 12 and m == 1:
year += 1
santiago_dt = santiago.localize(datetime.datetime(year, m, d))
return santiago_dt | Accountable date of transaction, localized as America/Santiago | Below is the the instruction that describes the task:
### Input:
Accountable date of transaction, localized as America/Santiago
### Response:
def accountable_date(self):
'''Accountable date of transaction, localized as America/Santiago
'''
fecha_transaccion = self.data['TBK_FECHA_CONTABLE']
m = int(fecha_transaccion[:2])
d = int(fecha_transaccion[2:])
santiago = pytz.timezone('America/Santiago')
today = santiago.localize(datetime.datetime.today())
year = today.year
if self.paid_at.month == 12 and m == 1:
year += 1
santiago_dt = santiago.localize(datetime.datetime(year, m, d))
return santiago_dt |
def features(sender=''):
'''Returns a list of signature features.'''
return [
# This one isn't from paper.
# Meant to match companies names, sender's names, address.
many_capitalized_words,
# This one is not from paper.
# Line is too long.
# This one is less aggressive than `Line is too short`
lambda line: 1 if len(line) > TOO_LONG_SIGNATURE_LINE else 0,
# Line contains email pattern.
binary_regex_search(RE_EMAIL),
# Line contains url.
binary_regex_search(RE_URL),
# Line contains phone number pattern.
binary_regex_search(RE_RELAX_PHONE),
# Line matches the regular expression "^[\s]*---*[\s]*$".
binary_regex_match(RE_SEPARATOR),
# Line has a sequence of 10 or more special characters.
binary_regex_search(RE_SPECIAL_CHARS),
# Line contains any typical signature words.
binary_regex_search(RE_SIGNATURE_WORDS),
# Line contains a pattern like Vitor R. Carvalho or William W. Cohen.
binary_regex_search(RE_NAME),
# Percentage of punctuation symbols in the line is larger than 50%
lambda line: 1 if punctuation_percent(line) > 50 else 0,
# Percentage of punctuation symbols in the line is larger than 90%
lambda line: 1 if punctuation_percent(line) > 90 else 0,
contains_sender_names(sender)
] | Returns a list of signature features. | Below is the the instruction that describes the task:
### Input:
Returns a list of signature features.
### Response:
def features(sender=''):
'''Returns a list of signature features.'''
return [
# This one isn't from paper.
# Meant to match companies names, sender's names, address.
many_capitalized_words,
# This one is not from paper.
# Line is too long.
# This one is less aggressive than `Line is too short`
lambda line: 1 if len(line) > TOO_LONG_SIGNATURE_LINE else 0,
# Line contains email pattern.
binary_regex_search(RE_EMAIL),
# Line contains url.
binary_regex_search(RE_URL),
# Line contains phone number pattern.
binary_regex_search(RE_RELAX_PHONE),
# Line matches the regular expression "^[\s]*---*[\s]*$".
binary_regex_match(RE_SEPARATOR),
# Line has a sequence of 10 or more special characters.
binary_regex_search(RE_SPECIAL_CHARS),
# Line contains any typical signature words.
binary_regex_search(RE_SIGNATURE_WORDS),
# Line contains a pattern like Vitor R. Carvalho or William W. Cohen.
binary_regex_search(RE_NAME),
# Percentage of punctuation symbols in the line is larger than 50%
lambda line: 1 if punctuation_percent(line) > 50 else 0,
# Percentage of punctuation symbols in the line is larger than 90%
lambda line: 1 if punctuation_percent(line) > 90 else 0,
contains_sender_names(sender)
] |
def _define_case(self):
''' Identify case '''
lonBool = self._map_center(
'long', self.lonM) != self._map_center('long', self.lonm)
latBool = self._map_center(
'lat', self.latM) != self._map_center('lat', self.latm)
if not lonBool and not latBool:
print('No overlap - Processing should be quick')
return self._cas_1()
elif lonBool and not latBool:
print('Longitude overlap - 2 images have to be proceded \n \
Processing could take a few seconds')
return self._cas_2()
elif not lonBool and latBool:
print('Latitude overlap - 2 images have to be proceded \n\
Processing could take a few seconds')
return self._cas_3()
else:
print('Latitude/Longidude overlaps - 4 images have to be proceded \n\
Processing could take a few seconds')
return self._cas_4() | Identify case | Below is the the instruction that describes the task:
### Input:
Identify case
### Response:
def _define_case(self):
''' Identify case '''
lonBool = self._map_center(
'long', self.lonM) != self._map_center('long', self.lonm)
latBool = self._map_center(
'lat', self.latM) != self._map_center('lat', self.latm)
if not lonBool and not latBool:
print('No overlap - Processing should be quick')
return self._cas_1()
elif lonBool and not latBool:
print('Longitude overlap - 2 images have to be proceded \n \
Processing could take a few seconds')
return self._cas_2()
elif not lonBool and latBool:
print('Latitude overlap - 2 images have to be proceded \n\
Processing could take a few seconds')
return self._cas_3()
else:
print('Latitude/Longidude overlaps - 4 images have to be proceded \n\
Processing could take a few seconds')
return self._cas_4() |
def submit_influxdb_measurement(self):
"""Submit a measurement for a message to InfluxDB"""
measurement = influxdb.Measurement(*self.influxdb)
measurement.set_timestamp(time.time())
for key, value in self.measurement.counters.items():
measurement.set_field(key, value)
for key, value in self.measurement.tags.items():
measurement.set_tag(key, value)
for key, value in self.measurement.values.items():
measurement.set_field(key, value)
for key, values in self.measurement.durations.items():
if len(values) == 1:
measurement.set_field(key, values[0])
elif len(values) > 1:
measurement.set_field('{}-average'.format(key),
sum(values) / len(values))
measurement.set_field('{}-max'.format(key), max(values))
measurement.set_field('{}-min'.format(key), min(values))
measurement.set_field('{}-median'.format(key),
utils.percentile(values, 50))
measurement.set_field('{}-95th'.format(key),
utils.percentile(values, 95))
influxdb.add_measurement(measurement)
LOGGER.debug('InfluxDB Measurement: %r', measurement.marshall()) | Submit a measurement for a message to InfluxDB | Below is the the instruction that describes the task:
### Input:
Submit a measurement for a message to InfluxDB
### Response:
def submit_influxdb_measurement(self):
"""Submit a measurement for a message to InfluxDB"""
measurement = influxdb.Measurement(*self.influxdb)
measurement.set_timestamp(time.time())
for key, value in self.measurement.counters.items():
measurement.set_field(key, value)
for key, value in self.measurement.tags.items():
measurement.set_tag(key, value)
for key, value in self.measurement.values.items():
measurement.set_field(key, value)
for key, values in self.measurement.durations.items():
if len(values) == 1:
measurement.set_field(key, values[0])
elif len(values) > 1:
measurement.set_field('{}-average'.format(key),
sum(values) / len(values))
measurement.set_field('{}-max'.format(key), max(values))
measurement.set_field('{}-min'.format(key), min(values))
measurement.set_field('{}-median'.format(key),
utils.percentile(values, 50))
measurement.set_field('{}-95th'.format(key),
utils.percentile(values, 95))
influxdb.add_measurement(measurement)
LOGGER.debug('InfluxDB Measurement: %r', measurement.marshall()) |
def is_memoized(self, k):
'''
lmap.is_memoized(k) yields True if k is a key in the given lazy map lmap that is both lazy
and already memoized.
'''
v = ps.PMap.__getitem__(self, k)
if not isinstance(v, (types.FunctionType, partial)):
return False
else:
return id(v) in self._memoized | lmap.is_memoized(k) yields True if k is a key in the given lazy map lmap that is both lazy
and already memoized. | Below is the the instruction that describes the task:
### Input:
lmap.is_memoized(k) yields True if k is a key in the given lazy map lmap that is both lazy
and already memoized.
### Response:
def is_memoized(self, k):
'''
lmap.is_memoized(k) yields True if k is a key in the given lazy map lmap that is both lazy
and already memoized.
'''
v = ps.PMap.__getitem__(self, k)
if not isinstance(v, (types.FunctionType, partial)):
return False
else:
return id(v) in self._memoized |
def set_provenance_to_project_variables(provenances):
"""Helper method to update / create provenance in project variables.
:param provenances: Keys and values from provenances.
:type provenances: dict
"""
def write_project_variable(key, value):
"""Helper to write project variable for base_key and value.
The key will be:
- base_key__KEY: value for dictionary.
- base_key__INDEX: value for list, tuple, set.
- date will be converted to ISO.
- None will be converted to ''.
:param key: The key.
:type key: basestring
:param value: A list of dictionary.
:type value: dict, list, tuple, set
"""
if key in list(duplicated_global_variables.keys()):
return
if isinstance(value, (list, tuple, set)):
# Skip if the type is too complex (list of note, actions)
return
elif isinstance(value, dict):
for dict_key, dict_value in list(value.items()):
write_project_variable(
'%s__%s' % (key, dict_key), dict_value)
elif isinstance(value, (bool, str, Number)):
# Don't use get_name for field
if 'field' in key:
pretty_value = get_name(value)
QgsExpressionContextUtils.setProjectVariable(
QgsProject.instance(),
key, pretty_value)
else:
QgsExpressionContextUtils.setProjectVariable(
QgsProject.instance(), key, value)
elif isinstance(value, type(None)):
QgsExpressionContextUtils.setProjectVariable(
QgsProject.instance(), key, '')
elif isinstance(value, datetime):
QgsExpressionContextUtils.setProjectVariable(
QgsProject.instance(),
key, value.isoformat())
elif isinstance(value, QUrl):
QgsExpressionContextUtils.setProjectVariable(
QgsProject.instance(),
key, value.toString())
else:
LOGGER.warning('Not handled provenance')
LOGGER.warning('Key: %s, Type: %s, Value: %s' % (
key, type(value), value))
# Remove old provenance data first
remove_provenance_project_variables()
for key, value in list(provenances.items()):
if QgsExpressionContextUtils.globalScope().hasVariable(key):
continue
write_project_variable(key, value) | Helper method to update / create provenance in project variables.
:param provenances: Keys and values from provenances.
:type provenances: dict | Below is the the instruction that describes the task:
### Input:
Helper method to update / create provenance in project variables.
:param provenances: Keys and values from provenances.
:type provenances: dict
### Response:
def set_provenance_to_project_variables(provenances):
"""Helper method to update / create provenance in project variables.
:param provenances: Keys and values from provenances.
:type provenances: dict
"""
def write_project_variable(key, value):
"""Helper to write project variable for base_key and value.
The key will be:
- base_key__KEY: value for dictionary.
- base_key__INDEX: value for list, tuple, set.
- date will be converted to ISO.
- None will be converted to ''.
:param key: The key.
:type key: basestring
:param value: A list of dictionary.
:type value: dict, list, tuple, set
"""
if key in list(duplicated_global_variables.keys()):
return
if isinstance(value, (list, tuple, set)):
# Skip if the type is too complex (list of note, actions)
return
elif isinstance(value, dict):
for dict_key, dict_value in list(value.items()):
write_project_variable(
'%s__%s' % (key, dict_key), dict_value)
elif isinstance(value, (bool, str, Number)):
# Don't use get_name for field
if 'field' in key:
pretty_value = get_name(value)
QgsExpressionContextUtils.setProjectVariable(
QgsProject.instance(),
key, pretty_value)
else:
QgsExpressionContextUtils.setProjectVariable(
QgsProject.instance(), key, value)
elif isinstance(value, type(None)):
QgsExpressionContextUtils.setProjectVariable(
QgsProject.instance(), key, '')
elif isinstance(value, datetime):
QgsExpressionContextUtils.setProjectVariable(
QgsProject.instance(),
key, value.isoformat())
elif isinstance(value, QUrl):
QgsExpressionContextUtils.setProjectVariable(
QgsProject.instance(),
key, value.toString())
else:
LOGGER.warning('Not handled provenance')
LOGGER.warning('Key: %s, Type: %s, Value: %s' % (
key, type(value), value))
# Remove old provenance data first
remove_provenance_project_variables()
for key, value in list(provenances.items()):
if QgsExpressionContextUtils.globalScope().hasVariable(key):
continue
write_project_variable(key, value) |
def _formatNumbers(self, line):
"""
Format the numbers so that there are commas inserted.
For example: 1200300 becomes 1,200,300.
"""
# below thousands separator syntax only works for
# python 2.7, skip for 2.6
if sys.version_info < (2, 7):
return line
last_index = 0
try:
# find the index of the last } character
last_index = (line.rindex('}') + 1)
end = line[last_index:]
except ValueError:
return line
else:
# split the string on numbers to isolate them
splitted = re.split("(\d+)", end)
for index, val in enumerate(splitted):
converted = 0
try:
converted = int(val)
# if it's not an int pass and don't change the string
except ValueError:
pass
else:
if converted > 1000:
splitted[index] = format(converted, ",d")
return line[:last_index] + ("").join(splitted) | Format the numbers so that there are commas inserted.
For example: 1200300 becomes 1,200,300. | Below is the the instruction that describes the task:
### Input:
Format the numbers so that there are commas inserted.
For example: 1200300 becomes 1,200,300.
### Response:
def _formatNumbers(self, line):
"""
Format the numbers so that there are commas inserted.
For example: 1200300 becomes 1,200,300.
"""
# below thousands separator syntax only works for
# python 2.7, skip for 2.6
if sys.version_info < (2, 7):
return line
last_index = 0
try:
# find the index of the last } character
last_index = (line.rindex('}') + 1)
end = line[last_index:]
except ValueError:
return line
else:
# split the string on numbers to isolate them
splitted = re.split("(\d+)", end)
for index, val in enumerate(splitted):
converted = 0
try:
converted = int(val)
# if it's not an int pass and don't change the string
except ValueError:
pass
else:
if converted > 1000:
splitted[index] = format(converted, ",d")
return line[:last_index] + ("").join(splitted) |
def load_drp(self, name, entry_point='numina.pipeline.1'):
"""Load all available DRPs in 'entry_point'."""
for drpins in self.iload(entry_point):
if drpins.name == name:
return drpins
else:
raise KeyError('{}'.format(name)) | Load all available DRPs in 'entry_point'. | Below is the the instruction that describes the task:
### Input:
Load all available DRPs in 'entry_point'.
### Response:
def load_drp(self, name, entry_point='numina.pipeline.1'):
"""Load all available DRPs in 'entry_point'."""
for drpins in self.iload(entry_point):
if drpins.name == name:
return drpins
else:
raise KeyError('{}'.format(name)) |
def write_out(self, page, xml_subpages, output):
"""Banana banana
"""
# pylint: disable=missing-docstring
def subpages(_):
return xml_subpages
namespace = etree.FunctionNamespace('uri:hotdoc')
namespace['subpages'] = subpages
html_output = os.path.join(output, 'html')
rel_path = os.path.join(self.get_output_folder(page), page.link.ref)
cached_path = os.path.join(self.__cache_dir, rel_path)
full_path = os.path.join(html_output, rel_path)
if not os.path.exists(os.path.dirname(full_path)):
os.makedirs(os.path.dirname(full_path))
with open(cached_path, 'r', encoding='utf-8') as _:
doc_root = etree.HTML(_.read())
self.__validate_html(self.extension.project, page, doc_root)
self.writing_page_signal(self, page, full_path, doc_root)
with open(full_path, 'w', encoding='utf-8') as _:
transformed = str(self.__page_transform(doc_root))
_.write('<!DOCTYPE html>\n%s' % transformed) | Banana banana | Below is the the instruction that describes the task:
### Input:
Banana banana
### Response:
def write_out(self, page, xml_subpages, output):
"""Banana banana
"""
# pylint: disable=missing-docstring
def subpages(_):
return xml_subpages
namespace = etree.FunctionNamespace('uri:hotdoc')
namespace['subpages'] = subpages
html_output = os.path.join(output, 'html')
rel_path = os.path.join(self.get_output_folder(page), page.link.ref)
cached_path = os.path.join(self.__cache_dir, rel_path)
full_path = os.path.join(html_output, rel_path)
if not os.path.exists(os.path.dirname(full_path)):
os.makedirs(os.path.dirname(full_path))
with open(cached_path, 'r', encoding='utf-8') as _:
doc_root = etree.HTML(_.read())
self.__validate_html(self.extension.project, page, doc_root)
self.writing_page_signal(self, page, full_path, doc_root)
with open(full_path, 'w', encoding='utf-8') as _:
transformed = str(self.__page_transform(doc_root))
_.write('<!DOCTYPE html>\n%s' % transformed) |
def _redirect_edge(self, u_id, v_id, new_v_id):
"""Redirect the layer to a new node.
Change the edge originally from `u_id` to `v_id` into an edge from `u_id` to `new_v_id`
while keeping all other property of the edge the same.
"""
layer_id = None
for index, edge_tuple in enumerate(self.adj_list[u_id]):
if edge_tuple[0] == v_id:
layer_id = edge_tuple[1]
self.adj_list[u_id][index] = (new_v_id, layer_id)
self.layer_list[layer_id].output = self.node_list[new_v_id]
break
for index, edge_tuple in enumerate(self.reverse_adj_list[v_id]):
if edge_tuple[0] == u_id:
layer_id = edge_tuple[1]
self.reverse_adj_list[v_id].remove(edge_tuple)
break
self.reverse_adj_list[new_v_id].append((u_id, layer_id))
for index, value in enumerate(self.layer_id_to_output_node_ids[layer_id]):
if value == v_id:
self.layer_id_to_output_node_ids[layer_id][index] = new_v_id
break | Redirect the layer to a new node.
Change the edge originally from `u_id` to `v_id` into an edge from `u_id` to `new_v_id`
while keeping all other property of the edge the same. | Below is the the instruction that describes the task:
### Input:
Redirect the layer to a new node.
Change the edge originally from `u_id` to `v_id` into an edge from `u_id` to `new_v_id`
while keeping all other property of the edge the same.
### Response:
def _redirect_edge(self, u_id, v_id, new_v_id):
"""Redirect the layer to a new node.
Change the edge originally from `u_id` to `v_id` into an edge from `u_id` to `new_v_id`
while keeping all other property of the edge the same.
"""
layer_id = None
for index, edge_tuple in enumerate(self.adj_list[u_id]):
if edge_tuple[0] == v_id:
layer_id = edge_tuple[1]
self.adj_list[u_id][index] = (new_v_id, layer_id)
self.layer_list[layer_id].output = self.node_list[new_v_id]
break
for index, edge_tuple in enumerate(self.reverse_adj_list[v_id]):
if edge_tuple[0] == u_id:
layer_id = edge_tuple[1]
self.reverse_adj_list[v_id].remove(edge_tuple)
break
self.reverse_adj_list[new_v_id].append((u_id, layer_id))
for index, value in enumerate(self.layer_id_to_output_node_ids[layer_id]):
if value == v_id:
self.layer_id_to_output_node_ids[layer_id][index] = new_v_id
break |
def can_import(self, file_uris, current_doc=None):
"""
Check that the specified file looks like a PDF
"""
if len(file_uris) <= 0:
return False
for uri in file_uris:
uri = self.fs.safe(uri)
if not self.check_file_type(uri):
return False
return True | Check that the specified file looks like a PDF | Below is the the instruction that describes the task:
### Input:
Check that the specified file looks like a PDF
### Response:
def can_import(self, file_uris, current_doc=None):
"""
Check that the specified file looks like a PDF
"""
if len(file_uris) <= 0:
return False
for uri in file_uris:
uri = self.fs.safe(uri)
if not self.check_file_type(uri):
return False
return True |
def save_form(self, request, form, change):
"""
Don't show links in the sitemap.
"""
obj = form.save(commit=False)
if not obj.id and "in_sitemap" not in form.fields:
obj.in_sitemap = False
return super(LinkAdmin, self).save_form(request, form, change) | Don't show links in the sitemap. | Below is the the instruction that describes the task:
### Input:
Don't show links in the sitemap.
### Response:
def save_form(self, request, form, change):
"""
Don't show links in the sitemap.
"""
obj = form.save(commit=False)
if not obj.id and "in_sitemap" not in form.fields:
obj.in_sitemap = False
return super(LinkAdmin, self).save_form(request, form, change) |
def parse_cli_args():
"""parse args from the CLI and return a dict"""
parser = argparse.ArgumentParser(description='2048 in your terminal')
parser.add_argument('--mode', dest='mode', type=str,
default=None, help='colors mode (dark or light)')
parser.add_argument('--az', dest='azmode', action='store_true',
help='Use the letters a-z instead of numbers')
parser.add_argument('--resume', dest='resume', action='store_true',
help='restart the game from where you left')
parser.add_argument('-v', '--version', action='store_true')
parser.add_argument('-r', '--rules', action='store_true')
return vars(parser.parse_args()) | parse args from the CLI and return a dict | Below is the the instruction that describes the task:
### Input:
parse args from the CLI and return a dict
### Response:
def parse_cli_args():
"""parse args from the CLI and return a dict"""
parser = argparse.ArgumentParser(description='2048 in your terminal')
parser.add_argument('--mode', dest='mode', type=str,
default=None, help='colors mode (dark or light)')
parser.add_argument('--az', dest='azmode', action='store_true',
help='Use the letters a-z instead of numbers')
parser.add_argument('--resume', dest='resume', action='store_true',
help='restart the game from where you left')
parser.add_argument('-v', '--version', action='store_true')
parser.add_argument('-r', '--rules', action='store_true')
return vars(parser.parse_args()) |
def key(self):
"""
Returns a tuple that identifies the task instance uniquely
"""
return self.dag_id, self.task_id, self.execution_date, self.try_number | Returns a tuple that identifies the task instance uniquely | Below is the the instruction that describes the task:
### Input:
Returns a tuple that identifies the task instance uniquely
### Response:
def key(self):
"""
Returns a tuple that identifies the task instance uniquely
"""
return self.dag_id, self.task_id, self.execution_date, self.try_number |
def help(rest):
"""Help (this command)"""
rs = rest.strip()
if rs:
# give help for matching commands
for handler in Handler._registry:
if handler.name == rs.lower():
yield '!%s: %s' % (handler.name, handler.doc)
break
else:
yield "command not found"
return
# give help for all commands
def mk_entries():
handlers = (
handler
for handler in Handler._registry
if type(handler) is pmxbot.core.CommandHandler
)
handlers = sorted(handlers, key=operator.attrgetter('name'))
for handler in handlers:
res = "!" + handler.name
if handler.aliases:
alias_names = (alias.name for alias in handler.aliases)
res += " (%s)" % ', '.join(alias_names)
yield res
o = io.StringIO(" ".join(mk_entries()))
more = o.read(160)
while more:
yield more
time.sleep(0.3)
more = o.read(160) | Help (this command) | Below is the the instruction that describes the task:
### Input:
Help (this command)
### Response:
def help(rest):
"""Help (this command)"""
rs = rest.strip()
if rs:
# give help for matching commands
for handler in Handler._registry:
if handler.name == rs.lower():
yield '!%s: %s' % (handler.name, handler.doc)
break
else:
yield "command not found"
return
# give help for all commands
def mk_entries():
handlers = (
handler
for handler in Handler._registry
if type(handler) is pmxbot.core.CommandHandler
)
handlers = sorted(handlers, key=operator.attrgetter('name'))
for handler in handlers:
res = "!" + handler.name
if handler.aliases:
alias_names = (alias.name for alias in handler.aliases)
res += " (%s)" % ', '.join(alias_names)
yield res
o = io.StringIO(" ".join(mk_entries()))
more = o.read(160)
while more:
yield more
time.sleep(0.3)
more = o.read(160) |
def normalize_value(val):
"""
Normalize strings with booleans into Python types.
"""
if val is not None:
if val.lower() == 'false':
val = False
elif val.lower() == 'true':
val = True
return val | Normalize strings with booleans into Python types. | Below is the the instruction that describes the task:
### Input:
Normalize strings with booleans into Python types.
### Response:
def normalize_value(val):
"""
Normalize strings with booleans into Python types.
"""
if val is not None:
if val.lower() == 'false':
val = False
elif val.lower() == 'true':
val = True
return val |
def the_gui(gui_queue):
"""
Starts and executes the GUI
Reads data from a Queue and displays the data to the window
Returns when the user exits / closes the window
(that means it does NOT return until the user exits the window)
:param gui_queue: Queue the GUI should read from
:return:
"""
layout = [ [sg.Text('Multithreaded Window Example')],
[sg.Text('', size=(15,1), key='_OUTPUT_')],
[sg.Output(size=(40,6))],
[sg.Button('Exit')],]
window = sg.Window('Multithreaded Window').Layout(layout)
# --------------------- EVENT LOOP ---------------------
while True:
event, values = window.Read(timeout=100) # wait for up to 100 ms for a GUI event
if event is None or event == 'Exit':
break
#--------------- Loop through all messages coming in from threads ---------------
while True: # loop executes until runs out of messages in Queue
try: # see if something has been posted to Queue
message = gui_queue.get_nowait()
except queue.Empty: # get_nowait() will get exception when Queue is empty
break # break from the loop if no more messages are queued up
# if message received from queue, display the message in the Window
if message:
window.Element('_OUTPUT_').Update(message)
window.Refresh() # do a refresh because could be showing multiple messages before next Read
# if user exits the window, then close the window and exit the GUI func
window.Close() | Starts and executes the GUI
Reads data from a Queue and displays the data to the window
Returns when the user exits / closes the window
(that means it does NOT return until the user exits the window)
:param gui_queue: Queue the GUI should read from
:return: | Below is the the instruction that describes the task:
### Input:
Starts and executes the GUI
Reads data from a Queue and displays the data to the window
Returns when the user exits / closes the window
(that means it does NOT return until the user exits the window)
:param gui_queue: Queue the GUI should read from
:return:
### Response:
def the_gui(gui_queue):
"""
Starts and executes the GUI
Reads data from a Queue and displays the data to the window
Returns when the user exits / closes the window
(that means it does NOT return until the user exits the window)
:param gui_queue: Queue the GUI should read from
:return:
"""
layout = [ [sg.Text('Multithreaded Window Example')],
[sg.Text('', size=(15,1), key='_OUTPUT_')],
[sg.Output(size=(40,6))],
[sg.Button('Exit')],]
window = sg.Window('Multithreaded Window').Layout(layout)
# --------------------- EVENT LOOP ---------------------
while True:
event, values = window.Read(timeout=100) # wait for up to 100 ms for a GUI event
if event is None or event == 'Exit':
break
#--------------- Loop through all messages coming in from threads ---------------
while True: # loop executes until runs out of messages in Queue
try: # see if something has been posted to Queue
message = gui_queue.get_nowait()
except queue.Empty: # get_nowait() will get exception when Queue is empty
break # break from the loop if no more messages are queued up
# if message received from queue, display the message in the Window
if message:
window.Element('_OUTPUT_').Update(message)
window.Refresh() # do a refresh because could be showing multiple messages before next Read
# if user exits the window, then close the window and exit the GUI func
window.Close() |
def get_first_recipient_with_address(self):
""" Returns the first recipient found with a non blank address
:return: First Recipient
:rtype: Recipient
"""
recipients_with_address = [recipient for recipient in self._recipients
if recipient.address]
if recipients_with_address:
return recipients_with_address[0]
else:
return None | Returns the first recipient found with a non blank address
:return: First Recipient
:rtype: Recipient | Below is the the instruction that describes the task:
### Input:
Returns the first recipient found with a non blank address
:return: First Recipient
:rtype: Recipient
### Response:
def get_first_recipient_with_address(self):
""" Returns the first recipient found with a non blank address
:return: First Recipient
:rtype: Recipient
"""
recipients_with_address = [recipient for recipient in self._recipients
if recipient.address]
if recipients_with_address:
return recipients_with_address[0]
else:
return None |
Subsets and Splits