code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def no_results(channel):
"""Creates an embed UI for when there were no results
Args:
channel (discord.Channel): The Discord channel to bind the embed to
Returns:
ui (ui_embed.UI): The embed UI object
"""
gui = ui_embed.UI(
channel,
"No results",
":c",
modulename=modulename,
colour=0xFF8800
)
return gui | Creates an embed UI for when there were no results
Args:
channel (discord.Channel): The Discord channel to bind the embed to
Returns:
ui (ui_embed.UI): The embed UI object | Below is the the instruction that describes the task:
### Input:
Creates an embed UI for when there were no results
Args:
channel (discord.Channel): The Discord channel to bind the embed to
Returns:
ui (ui_embed.UI): The embed UI object
### Response:
def no_results(channel):
"""Creates an embed UI for when there were no results
Args:
channel (discord.Channel): The Discord channel to bind the embed to
Returns:
ui (ui_embed.UI): The embed UI object
"""
gui = ui_embed.UI(
channel,
"No results",
":c",
modulename=modulename,
colour=0xFF8800
)
return gui |
def remove_pending_work_units(self, work_spec_name, work_unit_names):
'''Remove some work units in the pending list.
If `work_unit_names` is :const:`None` (which must be passed
explicitly), all pending work units in `work_spec_name` are
removed; otherwise only the specific named work units will be.
Note that this function has the potential to confuse workers
if they are actually working on the work units in question. If
you have ensured that the workers are dead and you would be
otherwise waiting for the leases to expire before calling
:meth:`remove_available_work_units`, then this is a useful
shortcut.
:param str work_spec_name: name of the work spec
:param list work_unit_names: names of the work units, or
:const:`None` for all in `work_spec_name`
:return: number of work units removed
'''
return self._remove_some_work_units(
work_spec_name, work_unit_names, priority_min=time.time()) | Remove some work units in the pending list.
If `work_unit_names` is :const:`None` (which must be passed
explicitly), all pending work units in `work_spec_name` are
removed; otherwise only the specific named work units will be.
Note that this function has the potential to confuse workers
if they are actually working on the work units in question. If
you have ensured that the workers are dead and you would be
otherwise waiting for the leases to expire before calling
:meth:`remove_available_work_units`, then this is a useful
shortcut.
:param str work_spec_name: name of the work spec
:param list work_unit_names: names of the work units, or
:const:`None` for all in `work_spec_name`
:return: number of work units removed | Below is the the instruction that describes the task:
### Input:
Remove some work units in the pending list.
If `work_unit_names` is :const:`None` (which must be passed
explicitly), all pending work units in `work_spec_name` are
removed; otherwise only the specific named work units will be.
Note that this function has the potential to confuse workers
if they are actually working on the work units in question. If
you have ensured that the workers are dead and you would be
otherwise waiting for the leases to expire before calling
:meth:`remove_available_work_units`, then this is a useful
shortcut.
:param str work_spec_name: name of the work spec
:param list work_unit_names: names of the work units, or
:const:`None` for all in `work_spec_name`
:return: number of work units removed
### Response:
def remove_pending_work_units(self, work_spec_name, work_unit_names):
'''Remove some work units in the pending list.
If `work_unit_names` is :const:`None` (which must be passed
explicitly), all pending work units in `work_spec_name` are
removed; otherwise only the specific named work units will be.
Note that this function has the potential to confuse workers
if they are actually working on the work units in question. If
you have ensured that the workers are dead and you would be
otherwise waiting for the leases to expire before calling
:meth:`remove_available_work_units`, then this is a useful
shortcut.
:param str work_spec_name: name of the work spec
:param list work_unit_names: names of the work units, or
:const:`None` for all in `work_spec_name`
:return: number of work units removed
'''
return self._remove_some_work_units(
work_spec_name, work_unit_names, priority_min=time.time()) |
def get_all_publications(return_namedtuples=True):
"""
Get list publications from all available source.
Args:
return_namedtuples (bool, default True): Convert :class:`.Publication`
structures to namedtuples (used in AMQP
communication).
Returns:
list: List of :class:`.Publication` structures converted to namedtuple.
"""
sources = [
ben_cz.get_publications,
grada_cz.get_publications,
cpress_cz.get_publications,
zonerpress_cz.get_publications,
]
# get data from all scrappers
publications = []
for source in sources:
publications.extend(
filters.filter_publications(source())
)
# convert to namedtuples
if return_namedtuples:
publications = map(lambda x: x.to_namedtuple(), publications)
return publications | Get list publications from all available source.
Args:
return_namedtuples (bool, default True): Convert :class:`.Publication`
structures to namedtuples (used in AMQP
communication).
Returns:
list: List of :class:`.Publication` structures converted to namedtuple. | Below is the the instruction that describes the task:
### Input:
Get list publications from all available source.
Args:
return_namedtuples (bool, default True): Convert :class:`.Publication`
structures to namedtuples (used in AMQP
communication).
Returns:
list: List of :class:`.Publication` structures converted to namedtuple.
### Response:
def get_all_publications(return_namedtuples=True):
"""
Get list publications from all available source.
Args:
return_namedtuples (bool, default True): Convert :class:`.Publication`
structures to namedtuples (used in AMQP
communication).
Returns:
list: List of :class:`.Publication` structures converted to namedtuple.
"""
sources = [
ben_cz.get_publications,
grada_cz.get_publications,
cpress_cz.get_publications,
zonerpress_cz.get_publications,
]
# get data from all scrappers
publications = []
for source in sources:
publications.extend(
filters.filter_publications(source())
)
# convert to namedtuples
if return_namedtuples:
publications = map(lambda x: x.to_namedtuple(), publications)
return publications |
def determineMaxWindowSize(dtype, limit=None):
"""
Determines the largest square window size that can be used, based on
the specified datatype and amount of currently available system memory.
If `limit` is specified, then this value will be returned in the event
that it is smaller than the maximum computed size.
"""
vmem = psutil.virtual_memory()
maxSize = math.floor(math.sqrt(vmem.available / np.dtype(dtype).itemsize))
if limit is None or limit >= maxSize:
return maxSize
else:
return limit | Determines the largest square window size that can be used, based on
the specified datatype and amount of currently available system memory.
If `limit` is specified, then this value will be returned in the event
that it is smaller than the maximum computed size. | Below is the the instruction that describes the task:
### Input:
Determines the largest square window size that can be used, based on
the specified datatype and amount of currently available system memory.
If `limit` is specified, then this value will be returned in the event
that it is smaller than the maximum computed size.
### Response:
def determineMaxWindowSize(dtype, limit=None):
"""
Determines the largest square window size that can be used, based on
the specified datatype and amount of currently available system memory.
If `limit` is specified, then this value will be returned in the event
that it is smaller than the maximum computed size.
"""
vmem = psutil.virtual_memory()
maxSize = math.floor(math.sqrt(vmem.available / np.dtype(dtype).itemsize))
if limit is None or limit >= maxSize:
return maxSize
else:
return limit |
def get_frames(tback, is_breakpoint):
"""Builds a list of ErrorFrame objects from a traceback"""
frames = []
while tback is not None:
if tback.tb_next is None and is_breakpoint:
break
filename = tback.tb_frame.f_code.co_filename
function = tback.tb_frame.f_code.co_name
context = tback.tb_frame.f_locals
lineno = tback.tb_lineno - 1
tback_id = id(tback)
pre_context_lineno, pre_context, context_line, post_context = get_lines_from_file(filename, lineno + 1, 7)
frames.append(ErrorFrame(tback, filename, function, lineno, context, tback_id, pre_context, context_line, post_context, pre_context_lineno))
tback = tback.tb_next
return frames | Builds a list of ErrorFrame objects from a traceback | Below is the the instruction that describes the task:
### Input:
Builds a list of ErrorFrame objects from a traceback
### Response:
def get_frames(tback, is_breakpoint):
"""Builds a list of ErrorFrame objects from a traceback"""
frames = []
while tback is not None:
if tback.tb_next is None and is_breakpoint:
break
filename = tback.tb_frame.f_code.co_filename
function = tback.tb_frame.f_code.co_name
context = tback.tb_frame.f_locals
lineno = tback.tb_lineno - 1
tback_id = id(tback)
pre_context_lineno, pre_context, context_line, post_context = get_lines_from_file(filename, lineno + 1, 7)
frames.append(ErrorFrame(tback, filename, function, lineno, context, tback_id, pre_context, context_line, post_context, pre_context_lineno))
tback = tback.tb_next
return frames |
def birth_inds_given_contours(birth_logl_arr, logl_arr, **kwargs):
"""Maps the iso-likelihood contours on which points were born to the
index of the dead point on this contour.
MultiNest and PolyChord use different values to identify the inital live
points which were sampled from the whole prior (PolyChord uses -1e+30
and MultiNest -0.179769313486231571E+309). However in each case the first
dead point must have been sampled from the whole prior, so for either
package we can use
init_birth = birth_logl_arr[0]
If there are many points with the same logl_arr and dup_assert is False,
these points are randomly assigned an order (to ensure results are
consistent, random seeding is used).
Parameters
----------
logl_arr: 1d numpy array
logl values of each point.
birth_logl_arr: 1d numpy array
Birth contours - i.e. logl values of the iso-likelihood contour from
within each point was sampled (on which it was born).
dup_assert: bool, optional
See ns_run_utils.check_ns_run_logls docstring.
dup_warn: bool, optional
See ns_run_utils.check_ns_run_logls docstring.
Returns
-------
birth_inds: 1d numpy array of ints
Step at which each element of logl_arr was sampled. Points sampled from
the whole prior are assigned value -1.
"""
dup_assert = kwargs.pop('dup_assert', False)
dup_warn = kwargs.pop('dup_warn', False)
if kwargs:
raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))
assert logl_arr.ndim == 1, logl_arr.ndim
assert birth_logl_arr.ndim == 1, birth_logl_arr.ndim
# Check for duplicate logl values (if specified by dup_assert or dup_warn)
nestcheck.ns_run_utils.check_ns_run_logls(
{'logl': logl_arr}, dup_assert=dup_assert, dup_warn=dup_warn)
# Random seed so results are consistent if there are duplicate logls
state = np.random.get_state() # Save random state before seeding
np.random.seed(0)
# Calculate birth inds
init_birth = birth_logl_arr[0]
assert np.all(birth_logl_arr <= logl_arr), (
logl_arr[birth_logl_arr > logl_arr])
birth_inds = np.full(birth_logl_arr.shape, np.nan)
birth_inds[birth_logl_arr == init_birth] = -1
for i, birth_logl in enumerate(birth_logl_arr):
if not np.isnan(birth_inds[i]):
# birth ind has already been assigned
continue
dup_deaths = np.where(logl_arr == birth_logl)[0]
if dup_deaths.shape == (1,):
# death index is unique
birth_inds[i] = dup_deaths[0]
continue
# The remainder of this loop deals with the case that multiple points
# have the same logl value (=birth_logl). This can occur due to limited
# precision, or for likelihoods with contant regions. In this case we
# randomly assign the duplicates birth steps in a manner
# that provides a valid division into nested sampling runs
dup_births = np.where(birth_logl_arr == birth_logl)[0]
assert dup_deaths.shape[0] > 1, dup_deaths
if np.all(birth_logl_arr[dup_deaths] != birth_logl):
# If no points both are born and die on this contour, we can just
# randomly assign an order
np.random.shuffle(dup_deaths)
inds_to_use = dup_deaths
else:
# If some points are both born and die on the contour, we need to
# take care that the assigned birth inds do not result in some
# points dying before they are born
try:
inds_to_use = sample_less_than_condition(
dup_deaths, dup_births)
except ValueError:
raise ValueError((
'There is no way to allocate indexes dup_deaths={} such '
'that each is less than dup_births={}.').format(
dup_deaths, dup_births))
try:
# Add our selected inds_to_use values to the birth_inds array
# Note that dup_deaths (and hence inds to use) may have more
# members than dup_births, because one of the duplicates may be
# the final point in a thread. We therefore include only the first
# dup_births.shape[0] elements
birth_inds[dup_births] = inds_to_use[:dup_births.shape[0]]
except ValueError:
warnings.warn((
'for logl={}, the number of points born (indexes='
'{}) is bigger than the number of points dying '
'(indexes={}). This indicates a problem with your '
'nested sampling software - it may be caused by '
'a bug in PolyChord which was fixed in PolyChord '
'v1.14, so try upgrading. I will try to give an '
'approximate allocation of threads but this may '
'fail.').format(
birth_logl, dup_births, inds_to_use), UserWarning)
extra_inds = np.random.choice(
inds_to_use, size=dup_births.shape[0] - inds_to_use.shape[0])
inds_to_use = np.concatenate((inds_to_use, extra_inds))
np.random.shuffle(inds_to_use)
birth_inds[dup_births] = inds_to_use[:dup_births.shape[0]]
assert np.all(~np.isnan(birth_inds)), np.isnan(birth_inds).sum()
np.random.set_state(state) # Reset random state
return birth_inds.astype(int) | Maps the iso-likelihood contours on which points were born to the
index of the dead point on this contour.
MultiNest and PolyChord use different values to identify the inital live
points which were sampled from the whole prior (PolyChord uses -1e+30
and MultiNest -0.179769313486231571E+309). However in each case the first
dead point must have been sampled from the whole prior, so for either
package we can use
init_birth = birth_logl_arr[0]
If there are many points with the same logl_arr and dup_assert is False,
these points are randomly assigned an order (to ensure results are
consistent, random seeding is used).
Parameters
----------
logl_arr: 1d numpy array
logl values of each point.
birth_logl_arr: 1d numpy array
Birth contours - i.e. logl values of the iso-likelihood contour from
within each point was sampled (on which it was born).
dup_assert: bool, optional
See ns_run_utils.check_ns_run_logls docstring.
dup_warn: bool, optional
See ns_run_utils.check_ns_run_logls docstring.
Returns
-------
birth_inds: 1d numpy array of ints
Step at which each element of logl_arr was sampled. Points sampled from
the whole prior are assigned value -1. | Below is the the instruction that describes the task:
### Input:
Maps the iso-likelihood contours on which points were born to the
index of the dead point on this contour.
MultiNest and PolyChord use different values to identify the inital live
points which were sampled from the whole prior (PolyChord uses -1e+30
and MultiNest -0.179769313486231571E+309). However in each case the first
dead point must have been sampled from the whole prior, so for either
package we can use
init_birth = birth_logl_arr[0]
If there are many points with the same logl_arr and dup_assert is False,
these points are randomly assigned an order (to ensure results are
consistent, random seeding is used).
Parameters
----------
logl_arr: 1d numpy array
logl values of each point.
birth_logl_arr: 1d numpy array
Birth contours - i.e. logl values of the iso-likelihood contour from
within each point was sampled (on which it was born).
dup_assert: bool, optional
See ns_run_utils.check_ns_run_logls docstring.
dup_warn: bool, optional
See ns_run_utils.check_ns_run_logls docstring.
Returns
-------
birth_inds: 1d numpy array of ints
Step at which each element of logl_arr was sampled. Points sampled from
the whole prior are assigned value -1.
### Response:
def birth_inds_given_contours(birth_logl_arr, logl_arr, **kwargs):
"""Maps the iso-likelihood contours on which points were born to the
index of the dead point on this contour.
MultiNest and PolyChord use different values to identify the inital live
points which were sampled from the whole prior (PolyChord uses -1e+30
and MultiNest -0.179769313486231571E+309). However in each case the first
dead point must have been sampled from the whole prior, so for either
package we can use
init_birth = birth_logl_arr[0]
If there are many points with the same logl_arr and dup_assert is False,
these points are randomly assigned an order (to ensure results are
consistent, random seeding is used).
Parameters
----------
logl_arr: 1d numpy array
logl values of each point.
birth_logl_arr: 1d numpy array
Birth contours - i.e. logl values of the iso-likelihood contour from
within each point was sampled (on which it was born).
dup_assert: bool, optional
See ns_run_utils.check_ns_run_logls docstring.
dup_warn: bool, optional
See ns_run_utils.check_ns_run_logls docstring.
Returns
-------
birth_inds: 1d numpy array of ints
Step at which each element of logl_arr was sampled. Points sampled from
the whole prior are assigned value -1.
"""
dup_assert = kwargs.pop('dup_assert', False)
dup_warn = kwargs.pop('dup_warn', False)
if kwargs:
raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))
assert logl_arr.ndim == 1, logl_arr.ndim
assert birth_logl_arr.ndim == 1, birth_logl_arr.ndim
# Check for duplicate logl values (if specified by dup_assert or dup_warn)
nestcheck.ns_run_utils.check_ns_run_logls(
{'logl': logl_arr}, dup_assert=dup_assert, dup_warn=dup_warn)
# Random seed so results are consistent if there are duplicate logls
state = np.random.get_state() # Save random state before seeding
np.random.seed(0)
# Calculate birth inds
init_birth = birth_logl_arr[0]
assert np.all(birth_logl_arr <= logl_arr), (
logl_arr[birth_logl_arr > logl_arr])
birth_inds = np.full(birth_logl_arr.shape, np.nan)
birth_inds[birth_logl_arr == init_birth] = -1
for i, birth_logl in enumerate(birth_logl_arr):
if not np.isnan(birth_inds[i]):
# birth ind has already been assigned
continue
dup_deaths = np.where(logl_arr == birth_logl)[0]
if dup_deaths.shape == (1,):
# death index is unique
birth_inds[i] = dup_deaths[0]
continue
# The remainder of this loop deals with the case that multiple points
# have the same logl value (=birth_logl). This can occur due to limited
# precision, or for likelihoods with contant regions. In this case we
# randomly assign the duplicates birth steps in a manner
# that provides a valid division into nested sampling runs
dup_births = np.where(birth_logl_arr == birth_logl)[0]
assert dup_deaths.shape[0] > 1, dup_deaths
if np.all(birth_logl_arr[dup_deaths] != birth_logl):
# If no points both are born and die on this contour, we can just
# randomly assign an order
np.random.shuffle(dup_deaths)
inds_to_use = dup_deaths
else:
# If some points are both born and die on the contour, we need to
# take care that the assigned birth inds do not result in some
# points dying before they are born
try:
inds_to_use = sample_less_than_condition(
dup_deaths, dup_births)
except ValueError:
raise ValueError((
'There is no way to allocate indexes dup_deaths={} such '
'that each is less than dup_births={}.').format(
dup_deaths, dup_births))
try:
# Add our selected inds_to_use values to the birth_inds array
# Note that dup_deaths (and hence inds to use) may have more
# members than dup_births, because one of the duplicates may be
# the final point in a thread. We therefore include only the first
# dup_births.shape[0] elements
birth_inds[dup_births] = inds_to_use[:dup_births.shape[0]]
except ValueError:
warnings.warn((
'for logl={}, the number of points born (indexes='
'{}) is bigger than the number of points dying '
'(indexes={}). This indicates a problem with your '
'nested sampling software - it may be caused by '
'a bug in PolyChord which was fixed in PolyChord '
'v1.14, so try upgrading. I will try to give an '
'approximate allocation of threads but this may '
'fail.').format(
birth_logl, dup_births, inds_to_use), UserWarning)
extra_inds = np.random.choice(
inds_to_use, size=dup_births.shape[0] - inds_to_use.shape[0])
inds_to_use = np.concatenate((inds_to_use, extra_inds))
np.random.shuffle(inds_to_use)
birth_inds[dup_births] = inds_to_use[:dup_births.shape[0]]
assert np.all(~np.isnan(birth_inds)), np.isnan(birth_inds).sum()
np.random.set_state(state) # Reset random state
return birth_inds.astype(int) |
def get_field_for_object(field_type, field_id, form):
'''
This tag allows one to get a specific series or event form field
in registration views.
'''
field_name = field_type + '_' + str(field_id)
return form.__getitem__(field_name) | This tag allows one to get a specific series or event form field
in registration views. | Below is the the instruction that describes the task:
### Input:
This tag allows one to get a specific series or event form field
in registration views.
### Response:
def get_field_for_object(field_type, field_id, form):
'''
This tag allows one to get a specific series or event form field
in registration views.
'''
field_name = field_type + '_' + str(field_id)
return form.__getitem__(field_name) |
def get_stoplist(language):
"""Returns an built-in stop-list for the language as a set of words."""
file_path = os.path.join("stoplists", "%s.txt" % language)
try:
stopwords = pkgutil.get_data("justext", file_path)
except IOError:
raise ValueError(
"Stoplist for language '%s' is missing. "
"Please use function 'get_stoplists' for complete list of stoplists "
"and feel free to contribute by your own stoplist." % language
)
return frozenset(w.decode("utf8").lower() for w in stopwords.splitlines()) | Returns an built-in stop-list for the language as a set of words. | Below is the the instruction that describes the task:
### Input:
Returns an built-in stop-list for the language as a set of words.
### Response:
def get_stoplist(language):
"""Returns an built-in stop-list for the language as a set of words."""
file_path = os.path.join("stoplists", "%s.txt" % language)
try:
stopwords = pkgutil.get_data("justext", file_path)
except IOError:
raise ValueError(
"Stoplist for language '%s' is missing. "
"Please use function 'get_stoplists' for complete list of stoplists "
"and feel free to contribute by your own stoplist." % language
)
return frozenset(w.decode("utf8").lower() for w in stopwords.splitlines()) |
def get_random_cached_bottlenecks(sess, image_lists, how_many, category,
bottleneck_dir, image_dir, jpeg_data_tensor,
decoded_image_tensor, resized_input_tensor,
bottleneck_tensor, module_name):
"""Retrieves bottleneck values for cached images.
If no distortions are being applied, this function can retrieve the cached
bottleneck values directly from disk for images. It picks a random set of
images from the specified category.
Args:
sess: Current TensorFlow Session.
image_lists: OrderedDict of training images for each label.
how_many: If positive, a random sample of this size will be chosen.
If negative, all bottlenecks will be retrieved.
category: Name string of which set to pull from - training, testing, or
validation.
bottleneck_dir: Folder string holding cached files of bottleneck values.
image_dir: Root folder string of the subfolders containing the training
images.
jpeg_data_tensor: The layer to feed jpeg image data into.
decoded_image_tensor: The output of decoding and resizing the image.
resized_input_tensor: The input node of the recognition graph.
bottleneck_tensor: The bottleneck output layer of the CNN graph.
module_name: The name of the image module being used.
Returns:
List of bottleneck arrays, their corresponding ground truths, and the
relevant filenames.
"""
class_count = len(image_lists.keys())
bottlenecks = []
ground_truths = []
filenames = []
if how_many >= 0:
# Retrieve a random sample of bottlenecks.
for unused_i in range(how_many):
label_index = random.randrange(class_count)
label_name = list(image_lists.keys())[label_index]
image_index = random.randrange(MAX_NUM_IMAGES_PER_CLASS + 1)
image_name = get_image_path(image_lists, label_name, image_index,
image_dir, category)
bottleneck = get_or_create_bottleneck(
sess, image_lists, label_name, image_index, image_dir, category,
bottleneck_dir, jpeg_data_tensor, decoded_image_tensor,
resized_input_tensor, bottleneck_tensor, module_name)
bottlenecks.append(bottleneck)
ground_truths.append(label_index)
filenames.append(image_name)
else:
# Retrieve all bottlenecks.
for label_index, label_name in enumerate(image_lists.keys()):
for image_index, image_name in enumerate(
image_lists[label_name][category]):
image_name = get_image_path(image_lists, label_name, image_index,
image_dir, category)
bottleneck = get_or_create_bottleneck(
sess, image_lists, label_name, image_index, image_dir, category,
bottleneck_dir, jpeg_data_tensor, decoded_image_tensor,
resized_input_tensor, bottleneck_tensor, module_name)
bottlenecks.append(bottleneck)
ground_truths.append(label_index)
filenames.append(image_name)
return bottlenecks, ground_truths, filenames | Retrieves bottleneck values for cached images.
If no distortions are being applied, this function can retrieve the cached
bottleneck values directly from disk for images. It picks a random set of
images from the specified category.
Args:
sess: Current TensorFlow Session.
image_lists: OrderedDict of training images for each label.
how_many: If positive, a random sample of this size will be chosen.
If negative, all bottlenecks will be retrieved.
category: Name string of which set to pull from - training, testing, or
validation.
bottleneck_dir: Folder string holding cached files of bottleneck values.
image_dir: Root folder string of the subfolders containing the training
images.
jpeg_data_tensor: The layer to feed jpeg image data into.
decoded_image_tensor: The output of decoding and resizing the image.
resized_input_tensor: The input node of the recognition graph.
bottleneck_tensor: The bottleneck output layer of the CNN graph.
module_name: The name of the image module being used.
Returns:
List of bottleneck arrays, their corresponding ground truths, and the
relevant filenames. | Below is the the instruction that describes the task:
### Input:
Retrieves bottleneck values for cached images.
If no distortions are being applied, this function can retrieve the cached
bottleneck values directly from disk for images. It picks a random set of
images from the specified category.
Args:
sess: Current TensorFlow Session.
image_lists: OrderedDict of training images for each label.
how_many: If positive, a random sample of this size will be chosen.
If negative, all bottlenecks will be retrieved.
category: Name string of which set to pull from - training, testing, or
validation.
bottleneck_dir: Folder string holding cached files of bottleneck values.
image_dir: Root folder string of the subfolders containing the training
images.
jpeg_data_tensor: The layer to feed jpeg image data into.
decoded_image_tensor: The output of decoding and resizing the image.
resized_input_tensor: The input node of the recognition graph.
bottleneck_tensor: The bottleneck output layer of the CNN graph.
module_name: The name of the image module being used.
Returns:
List of bottleneck arrays, their corresponding ground truths, and the
relevant filenames.
### Response:
def get_random_cached_bottlenecks(sess, image_lists, how_many, category,
bottleneck_dir, image_dir, jpeg_data_tensor,
decoded_image_tensor, resized_input_tensor,
bottleneck_tensor, module_name):
"""Retrieves bottleneck values for cached images.
If no distortions are being applied, this function can retrieve the cached
bottleneck values directly from disk for images. It picks a random set of
images from the specified category.
Args:
sess: Current TensorFlow Session.
image_lists: OrderedDict of training images for each label.
how_many: If positive, a random sample of this size will be chosen.
If negative, all bottlenecks will be retrieved.
category: Name string of which set to pull from - training, testing, or
validation.
bottleneck_dir: Folder string holding cached files of bottleneck values.
image_dir: Root folder string of the subfolders containing the training
images.
jpeg_data_tensor: The layer to feed jpeg image data into.
decoded_image_tensor: The output of decoding and resizing the image.
resized_input_tensor: The input node of the recognition graph.
bottleneck_tensor: The bottleneck output layer of the CNN graph.
module_name: The name of the image module being used.
Returns:
List of bottleneck arrays, their corresponding ground truths, and the
relevant filenames.
"""
class_count = len(image_lists.keys())
bottlenecks = []
ground_truths = []
filenames = []
if how_many >= 0:
# Retrieve a random sample of bottlenecks.
for unused_i in range(how_many):
label_index = random.randrange(class_count)
label_name = list(image_lists.keys())[label_index]
image_index = random.randrange(MAX_NUM_IMAGES_PER_CLASS + 1)
image_name = get_image_path(image_lists, label_name, image_index,
image_dir, category)
bottleneck = get_or_create_bottleneck(
sess, image_lists, label_name, image_index, image_dir, category,
bottleneck_dir, jpeg_data_tensor, decoded_image_tensor,
resized_input_tensor, bottleneck_tensor, module_name)
bottlenecks.append(bottleneck)
ground_truths.append(label_index)
filenames.append(image_name)
else:
# Retrieve all bottlenecks.
for label_index, label_name in enumerate(image_lists.keys()):
for image_index, image_name in enumerate(
image_lists[label_name][category]):
image_name = get_image_path(image_lists, label_name, image_index,
image_dir, category)
bottleneck = get_or_create_bottleneck(
sess, image_lists, label_name, image_index, image_dir, category,
bottleneck_dir, jpeg_data_tensor, decoded_image_tensor,
resized_input_tensor, bottleneck_tensor, module_name)
bottlenecks.append(bottleneck)
ground_truths.append(label_index)
filenames.append(image_name)
return bottlenecks, ground_truths, filenames |
def json_charts(self, req):
""" Return charting data.
"""
disk_used, disk_total, disk_detail = 0, 0, []
for disk_usage_path in self.cfg.disk_usage_path.split(os.pathsep):
disk_usage = self.guarded(psutil.disk_usage, os.path.expanduser(disk_usage_path.strip()))
if disk_usage:
disk_used += disk_usage.used
disk_total += disk_usage.total
disk_detail.append((disk_usage.used, disk_usage.total))
data = dict(
engine = self.json_engine(req),
uptime = time.time() - psutil.BOOT_TIME, # pylint: disable=no-member
fqdn = self.guarded(socket.getfqdn),
cpu_usage = self.guarded(psutil.cpu_percent, 0),
ram_usage = self.guarded(psutil.virtual_memory),
swap_usage = self.guarded(psutil.swap_memory),
disk_usage = (disk_used, disk_total, disk_detail) if disk_total else None,
disk_io = self.guarded(psutil.disk_io_counters),
net_io = self.guarded(psutil.net_io_counters),
)
return data | Return charting data. | Below is the the instruction that describes the task:
### Input:
Return charting data.
### Response:
def json_charts(self, req):
""" Return charting data.
"""
disk_used, disk_total, disk_detail = 0, 0, []
for disk_usage_path in self.cfg.disk_usage_path.split(os.pathsep):
disk_usage = self.guarded(psutil.disk_usage, os.path.expanduser(disk_usage_path.strip()))
if disk_usage:
disk_used += disk_usage.used
disk_total += disk_usage.total
disk_detail.append((disk_usage.used, disk_usage.total))
data = dict(
engine = self.json_engine(req),
uptime = time.time() - psutil.BOOT_TIME, # pylint: disable=no-member
fqdn = self.guarded(socket.getfqdn),
cpu_usage = self.guarded(psutil.cpu_percent, 0),
ram_usage = self.guarded(psutil.virtual_memory),
swap_usage = self.guarded(psutil.swap_memory),
disk_usage = (disk_used, disk_total, disk_detail) if disk_total else None,
disk_io = self.guarded(psutil.disk_io_counters),
net_io = self.guarded(psutil.net_io_counters),
)
return data |
def change_breakpoint_state(self, bp_number, enabled, condition=None):
""" Change breakpoint status or `condition` expression.
:param bp_number: number of breakpoint to change
:return: None or an error message (string)
"""
if not (0 <= bp_number < len(IKBreakpoint.breakpoints_by_number)):
return "Found no breakpoint numbered: %s" % bp_number
bp = IKBreakpoint.breakpoints_by_number[bp_number]
if not bp:
return "Found no breakpoint numbered %s" % bp_number
_logger.b_debug(" change_breakpoint_state(bp_number=%s, enabled=%s, "
"condition=%s) found %s",
bp_number,
enabled,
repr(condition),
bp)
bp.enabled = enabled
bp.condition = condition # update condition for conditional breakpoints
IKBreakpoint.update_active_breakpoint_flag() # force flag refresh
if self.pending_stop or IKBreakpoint.any_active_breakpoint:
self.enable_tracing()
else:
self.disable_tracing()
return None | Change breakpoint status or `condition` expression.
:param bp_number: number of breakpoint to change
:return: None or an error message (string) | Below is the the instruction that describes the task:
### Input:
Change breakpoint status or `condition` expression.
:param bp_number: number of breakpoint to change
:return: None or an error message (string)
### Response:
def change_breakpoint_state(self, bp_number, enabled, condition=None):
""" Change breakpoint status or `condition` expression.
:param bp_number: number of breakpoint to change
:return: None or an error message (string)
"""
if not (0 <= bp_number < len(IKBreakpoint.breakpoints_by_number)):
return "Found no breakpoint numbered: %s" % bp_number
bp = IKBreakpoint.breakpoints_by_number[bp_number]
if not bp:
return "Found no breakpoint numbered %s" % bp_number
_logger.b_debug(" change_breakpoint_state(bp_number=%s, enabled=%s, "
"condition=%s) found %s",
bp_number,
enabled,
repr(condition),
bp)
bp.enabled = enabled
bp.condition = condition # update condition for conditional breakpoints
IKBreakpoint.update_active_breakpoint_flag() # force flag refresh
if self.pending_stop or IKBreakpoint.any_active_breakpoint:
self.enable_tracing()
else:
self.disable_tracing()
return None |
def lstm_area_attention_base():
"""Hparams for LSTM with area attention."""
hparams = lstm_luong_attention()
hparams.batch_size = 16384
hparams.num_hidden_layers = 2
hparams.hidden_size = 1024
hparams.num_heads = 4
hparams.dropout = 0.2
hparams.learning_rate = 0.1
hparams.max_area_width = 2
hparams.area_key_mode = "mean"
hparams.area_value_mode = "sum"
return hparams | Hparams for LSTM with area attention. | Below is the the instruction that describes the task:
### Input:
Hparams for LSTM with area attention.
### Response:
def lstm_area_attention_base():
"""Hparams for LSTM with area attention."""
hparams = lstm_luong_attention()
hparams.batch_size = 16384
hparams.num_hidden_layers = 2
hparams.hidden_size = 1024
hparams.num_heads = 4
hparams.dropout = 0.2
hparams.learning_rate = 0.1
hparams.max_area_width = 2
hparams.area_key_mode = "mean"
hparams.area_value_mode = "sum"
return hparams |
def connect(self):
"""Logs into the specified ftp server and returns connector."""
for tried_connection_count in range(CFG_FTP_CONNECTION_ATTEMPTS):
try:
self.ftp = FtpHandler(self.config.OXFORD.URL,
self.config.OXFORD.LOGIN,
self.config.OXFORD.PASSWORD)
self.logger.debug(("Successful connection to the "
"Oxford University Press server"))
return
except socket_timeout_exception as err:
self.logger.error(('Failed to connect %d of %d times. '
'Will sleep for %d seconds and try again.')
% (tried_connection_count+1,
CFG_FTP_CONNECTION_ATTEMPTS,
CFG_FTP_TIMEOUT_SLEEP_DURATION))
time.sleep(CFG_FTP_TIMEOUT_SLEEP_DURATION)
except Exception as err:
self.logger.error(('Failed to connect to the Oxford '
'University Press server. %s') % (err,))
break
raise LoginException(err) | Logs into the specified ftp server and returns connector. | Below is the the instruction that describes the task:
### Input:
Logs into the specified ftp server and returns connector.
### Response:
def connect(self):
"""Logs into the specified ftp server and returns connector."""
for tried_connection_count in range(CFG_FTP_CONNECTION_ATTEMPTS):
try:
self.ftp = FtpHandler(self.config.OXFORD.URL,
self.config.OXFORD.LOGIN,
self.config.OXFORD.PASSWORD)
self.logger.debug(("Successful connection to the "
"Oxford University Press server"))
return
except socket_timeout_exception as err:
self.logger.error(('Failed to connect %d of %d times. '
'Will sleep for %d seconds and try again.')
% (tried_connection_count+1,
CFG_FTP_CONNECTION_ATTEMPTS,
CFG_FTP_TIMEOUT_SLEEP_DURATION))
time.sleep(CFG_FTP_TIMEOUT_SLEEP_DURATION)
except Exception as err:
self.logger.error(('Failed to connect to the Oxford '
'University Press server. %s') % (err,))
break
raise LoginException(err) |
def stop(self, ignore_state=False):
"""Stops the service."""
self.logger.debug("Stop service")
self._toggle_running(False, ignore_state) | Stops the service. | Below is the the instruction that describes the task:
### Input:
Stops the service.
### Response:
def stop(self, ignore_state=False):
"""Stops the service."""
self.logger.debug("Stop service")
self._toggle_running(False, ignore_state) |
def update_or_create(cls, org, provider, exists):
"""
Update or create credentials state.
"""
instance, created = cls.objects.update_or_create(
org=org,
provider=provider,
defaults={'exists': exists},
)
return instance, created | Update or create credentials state. | Below is the the instruction that describes the task:
### Input:
Update or create credentials state.
### Response:
def update_or_create(cls, org, provider, exists):
"""
Update or create credentials state.
"""
instance, created = cls.objects.update_or_create(
org=org,
provider=provider,
defaults={'exists': exists},
)
return instance, created |
def fetch(self, webfonts):
"""
Store every defined webfonts.
Webfont are stored with sort on their name.
Args:
webfonts (dict): Dictionnary of webfont settings from
``settings.ICOMOON_WEBFONTS``.
"""
sorted_keys = sorted(webfonts.keys())
for webfont_name in sorted_keys:
self.get(webfont_name, webfonts[webfont_name]) | Store every defined webfonts.
Webfont are stored with sort on their name.
Args:
webfonts (dict): Dictionnary of webfont settings from
``settings.ICOMOON_WEBFONTS``. | Below is the the instruction that describes the task:
### Input:
Store every defined webfonts.
Webfont are stored with sort on their name.
Args:
webfonts (dict): Dictionnary of webfont settings from
``settings.ICOMOON_WEBFONTS``.
### Response:
def fetch(self, webfonts):
"""
Store every defined webfonts.
Webfont are stored with sort on their name.
Args:
webfonts (dict): Dictionnary of webfont settings from
``settings.ICOMOON_WEBFONTS``.
"""
sorted_keys = sorted(webfonts.keys())
for webfont_name in sorted_keys:
self.get(webfont_name, webfonts[webfont_name]) |
def cleanup_full_hashes(self, keep_expired_for=(60 * 60 * 12)):
"""Remove long expired full_hash entries."""
q = '''DELETE FROM full_hash WHERE expires_at < datetime(current_timestamp, '-{} SECONDS')
'''
log.info('Cleaning up full_hash entries expired more than {} seconds ago.'.format(keep_expired_for))
with self.get_cursor() as dbc:
dbc.execute(q.format(int(keep_expired_for))) | Remove long expired full_hash entries. | Below is the the instruction that describes the task:
### Input:
Remove long expired full_hash entries.
### Response:
def cleanup_full_hashes(self, keep_expired_for=(60 * 60 * 12)):
"""Remove long expired full_hash entries."""
q = '''DELETE FROM full_hash WHERE expires_at < datetime(current_timestamp, '-{} SECONDS')
'''
log.info('Cleaning up full_hash entries expired more than {} seconds ago.'.format(keep_expired_for))
with self.get_cursor() as dbc:
dbc.execute(q.format(int(keep_expired_for))) |
def TryCompile( self, text, extension):
"""Compiles the program given in text to an env.Object, using extension
as file extension (e.g. '.c'). Returns 1, if compilation was
successful, 0 otherwise. The target is saved in self.lastTarget (for
further processing).
"""
return self.TryBuild(self.env.Object, text, extension) | Compiles the program given in text to an env.Object, using extension
as file extension (e.g. '.c'). Returns 1, if compilation was
successful, 0 otherwise. The target is saved in self.lastTarget (for
further processing). | Below is the the instruction that describes the task:
### Input:
Compiles the program given in text to an env.Object, using extension
as file extension (e.g. '.c'). Returns 1, if compilation was
successful, 0 otherwise. The target is saved in self.lastTarget (for
further processing).
### Response:
def TryCompile( self, text, extension):
"""Compiles the program given in text to an env.Object, using extension
as file extension (e.g. '.c'). Returns 1, if compilation was
successful, 0 otherwise. The target is saved in self.lastTarget (for
further processing).
"""
return self.TryBuild(self.env.Object, text, extension) |
def detect_images_and_tex(
file_list,
allowed_image_types=('eps', 'png', 'ps', 'jpg', 'pdf'),
timeout=20):
"""Detect from a list of files which are TeX or images.
:param: file_list (list): list of absolute file paths
:param: allowed_image_types (list): list of allows image formats
:param: timeout (int): the timeout value on shell commands.
:return: (image_list, tex_file) (([string, string, ...], string)):
list of images in the tarball and the name of the TeX file in the
tarball.
"""
tex_file_extension = 'tex'
image_list = []
might_be_tex = []
for extracted_file in file_list:
# Ignore directories and hidden (metadata) files
if os.path.isdir(extracted_file) \
or os.path.basename(extracted_file).startswith('.'):
continue
magic_str = magic.from_file(extracted_file, mime=True)
if magic_str == "application/x-tex":
might_be_tex.append(extracted_file)
elif magic_str.startswith('image/') \
or magic_str == "application/postscript":
image_list.append(extracted_file)
# If neither, maybe it is TeX or an image anyway, otherwise,
# we don't care.
else:
_, dotted_file_extension = os.path.splitext(extracted_file)
file_extension = dotted_file_extension[1:]
if file_extension == tex_file_extension:
might_be_tex.append(extracted_file)
elif file_extension in allowed_image_types:
image_list.append(extracted_file)
return image_list, might_be_tex | Detect from a list of files which are TeX or images.
:param: file_list (list): list of absolute file paths
:param: allowed_image_types (list): list of allows image formats
:param: timeout (int): the timeout value on shell commands.
:return: (image_list, tex_file) (([string, string, ...], string)):
list of images in the tarball and the name of the TeX file in the
tarball. | Below is the the instruction that describes the task:
### Input:
Detect from a list of files which are TeX or images.
:param: file_list (list): list of absolute file paths
:param: allowed_image_types (list): list of allows image formats
:param: timeout (int): the timeout value on shell commands.
:return: (image_list, tex_file) (([string, string, ...], string)):
list of images in the tarball and the name of the TeX file in the
tarball.
### Response:
def detect_images_and_tex(
file_list,
allowed_image_types=('eps', 'png', 'ps', 'jpg', 'pdf'),
timeout=20):
"""Detect from a list of files which are TeX or images.
:param: file_list (list): list of absolute file paths
:param: allowed_image_types (list): list of allows image formats
:param: timeout (int): the timeout value on shell commands.
:return: (image_list, tex_file) (([string, string, ...], string)):
list of images in the tarball and the name of the TeX file in the
tarball.
"""
tex_file_extension = 'tex'
image_list = []
might_be_tex = []
for extracted_file in file_list:
# Ignore directories and hidden (metadata) files
if os.path.isdir(extracted_file) \
or os.path.basename(extracted_file).startswith('.'):
continue
magic_str = magic.from_file(extracted_file, mime=True)
if magic_str == "application/x-tex":
might_be_tex.append(extracted_file)
elif magic_str.startswith('image/') \
or magic_str == "application/postscript":
image_list.append(extracted_file)
# If neither, maybe it is TeX or an image anyway, otherwise,
# we don't care.
else:
_, dotted_file_extension = os.path.splitext(extracted_file)
file_extension = dotted_file_extension[1:]
if file_extension == tex_file_extension:
might_be_tex.append(extracted_file)
elif file_extension in allowed_image_types:
image_list.append(extracted_file)
return image_list, might_be_tex |
def get(self, timeout=None, raise_error=True):
"""
Args:
timeout (float): timeout for query element, unit seconds
Default 10s
raise_error (bool): whether to raise error if element not found
Returns:
Element: UI Element
Raises:
WDAElementNotFoundError if raise_error is True else None
"""
start_time = time.time()
if timeout is None:
timeout = self.timeout
while True:
elems = self.find_elements()
if len(elems) > 0:
return elems[0]
if start_time + timeout < time.time():
break
time.sleep(0.01)
# check alert again
if self.session.alert.exists and self.http.alert_callback:
self.http.alert_callback()
return self.get(timeout, raise_error)
if raise_error:
raise WDAElementNotFoundError("element not found") | Args:
timeout (float): timeout for query element, unit seconds
Default 10s
raise_error (bool): whether to raise error if element not found
Returns:
Element: UI Element
Raises:
WDAElementNotFoundError if raise_error is True else None | Below is the the instruction that describes the task:
### Input:
Args:
timeout (float): timeout for query element, unit seconds
Default 10s
raise_error (bool): whether to raise error if element not found
Returns:
Element: UI Element
Raises:
WDAElementNotFoundError if raise_error is True else None
### Response:
def get(self, timeout=None, raise_error=True):
"""
Args:
timeout (float): timeout for query element, unit seconds
Default 10s
raise_error (bool): whether to raise error if element not found
Returns:
Element: UI Element
Raises:
WDAElementNotFoundError if raise_error is True else None
"""
start_time = time.time()
if timeout is None:
timeout = self.timeout
while True:
elems = self.find_elements()
if len(elems) > 0:
return elems[0]
if start_time + timeout < time.time():
break
time.sleep(0.01)
# check alert again
if self.session.alert.exists and self.http.alert_callback:
self.http.alert_callback()
return self.get(timeout, raise_error)
if raise_error:
raise WDAElementNotFoundError("element not found") |
def fill_opacity(self, opacity):
"""
:param opacity: 0.0 ~ 1.0
"""
opacity = pgmagick.DrawableFillOpacity(float(opacity))
self.drawer.append(opacity) | :param opacity: 0.0 ~ 1.0 | Below is the the instruction that describes the task:
### Input:
:param opacity: 0.0 ~ 1.0
### Response:
def fill_opacity(self, opacity):
"""
:param opacity: 0.0 ~ 1.0
"""
opacity = pgmagick.DrawableFillOpacity(float(opacity))
self.drawer.append(opacity) |
def all_cities():
"""
Get a list of all Backpage city names.
Returns:
list of city names as Strings
"""
cities = []
fname = pkg_resources.resource_filename(__name__, 'resources/CityPops.csv')
with open(fname, 'rU') as csvfile:
reader = csv.reader(csvfile, delimiter = ',')
for row in reader:
cities.append(row[0])
cities.sort()
return cities | Get a list of all Backpage city names.
Returns:
list of city names as Strings | Below is the the instruction that describes the task:
### Input:
Get a list of all Backpage city names.
Returns:
list of city names as Strings
### Response:
def all_cities():
"""
Get a list of all Backpage city names.
Returns:
list of city names as Strings
"""
cities = []
fname = pkg_resources.resource_filename(__name__, 'resources/CityPops.csv')
with open(fname, 'rU') as csvfile:
reader = csv.reader(csvfile, delimiter = ',')
for row in reader:
cities.append(row[0])
cities.sort()
return cities |
def get_root(root, phonetic, compound):
"""Get the root form without markers.
Parameters
----------
root: str
The word root form.
phonetic: boolean
If True, add phonetic information to the root forms.
compound: boolean
if True, add compound word markers to root forms.
"""
global compound_regex
if not phonetic:
root = trim_phonetics(root)
if not compound:
root = trim_compounds(root)
return root | Get the root form without markers.
Parameters
----------
root: str
The word root form.
phonetic: boolean
If True, add phonetic information to the root forms.
compound: boolean
if True, add compound word markers to root forms. | Below is the the instruction that describes the task:
### Input:
Get the root form without markers.
Parameters
----------
root: str
The word root form.
phonetic: boolean
If True, add phonetic information to the root forms.
compound: boolean
if True, add compound word markers to root forms.
### Response:
def get_root(root, phonetic, compound):
"""Get the root form without markers.
Parameters
----------
root: str
The word root form.
phonetic: boolean
If True, add phonetic information to the root forms.
compound: boolean
if True, add compound word markers to root forms.
"""
global compound_regex
if not phonetic:
root = trim_phonetics(root)
if not compound:
root = trim_compounds(root)
return root |
def _coord2offset(self, coord):
"""Convert a normalized coordinate to an item offset."""
size = self.size
offset = 0
for dim, index in enumerate(coord):
size //= self._normshape[dim]
offset += size * index
return offset | Convert a normalized coordinate to an item offset. | Below is the the instruction that describes the task:
### Input:
Convert a normalized coordinate to an item offset.
### Response:
def _coord2offset(self, coord):
"""Convert a normalized coordinate to an item offset."""
size = self.size
offset = 0
for dim, index in enumerate(coord):
size //= self._normshape[dim]
offset += size * index
return offset |
def create_rack(self):
"""Get an instance of rack services facade."""
return Rack(
self.networkapi_url,
self.user,
self.password,
self.user_ldap) | Get an instance of rack services facade. | Below is the the instruction that describes the task:
### Input:
Get an instance of rack services facade.
### Response:
def create_rack(self):
"""Get an instance of rack services facade."""
return Rack(
self.networkapi_url,
self.user,
self.password,
self.user_ldap) |
def build(term_to_index_dict):
'''
Parameters
----------
term_to_index_dict: term -> idx dictionary
Returns
-------
IndexStore
'''
idxstore = IndexStore()
idxstore._val2i = term_to_index_dict
idxstore._next_i = len(term_to_index_dict)
idxstore._i2val = [None for _ in range(idxstore._next_i)]
for term, idx in idxstore._val2i.items():
idxstore._i2val[idx] = term
return idxstore | Parameters
----------
term_to_index_dict: term -> idx dictionary
Returns
-------
IndexStore | Below is the the instruction that describes the task:
### Input:
Parameters
----------
term_to_index_dict: term -> idx dictionary
Returns
-------
IndexStore
### Response:
def build(term_to_index_dict):
'''
Parameters
----------
term_to_index_dict: term -> idx dictionary
Returns
-------
IndexStore
'''
idxstore = IndexStore()
idxstore._val2i = term_to_index_dict
idxstore._next_i = len(term_to_index_dict)
idxstore._i2val = [None for _ in range(idxstore._next_i)]
for term, idx in idxstore._val2i.items():
idxstore._i2val[idx] = term
return idxstore |
def terminateMember(self, clusterId, memberId):
"""
Parameters:
- clusterId
- memberId
"""
self.send_terminateMember(clusterId, memberId)
return self.recv_terminateMember() | Parameters:
- clusterId
- memberId | Below is the the instruction that describes the task:
### Input:
Parameters:
- clusterId
- memberId
### Response:
def terminateMember(self, clusterId, memberId):
"""
Parameters:
- clusterId
- memberId
"""
self.send_terminateMember(clusterId, memberId)
return self.recv_terminateMember() |
def add_interrupt(self, interrupt):
"""
Adds the interrupt to the internal interrupt storage ``self.interrupts`` and
registers the interrupt address in the internal constants.
"""
self.interrupts.append(interrupt)
self.constants[interrupt.name] = interrupt.address | Adds the interrupt to the internal interrupt storage ``self.interrupts`` and
registers the interrupt address in the internal constants. | Below is the the instruction that describes the task:
### Input:
Adds the interrupt to the internal interrupt storage ``self.interrupts`` and
registers the interrupt address in the internal constants.
### Response:
def add_interrupt(self, interrupt):
"""
Adds the interrupt to the internal interrupt storage ``self.interrupts`` and
registers the interrupt address in the internal constants.
"""
self.interrupts.append(interrupt)
self.constants[interrupt.name] = interrupt.address |
def handle_exception(self, frame, exc_info):
"""This function is called if an exception occurs,
but only if we are to stop at or just below this level."""
type_, value, tb = exc_info
# Python 3 is broken see http://bugs.python.org/issue17413
_value = value
if not isinstance(_value, BaseException):
_value = type_(value)
fake_exc_info = type_, _value, tb
log.error('Exception during trace', exc_info=fake_exc_info)
self.obj_cache[id(exc_info)] = exc_info
self.extra_vars['__exception__'] = exc_info
exception = type_.__name__
exception_description = str(value)
init = 'Echo|%s' % dump({
'for': '__exception__',
'val': escape('%s: %s') % (exception, exception_description)
})
# User exception is 4 frames away from exception
frame = frame or sys._getframe().f_back.f_back.f_back.f_back
self.interaction(
frame, tb, exception, exception_description, init=init
) | This function is called if an exception occurs,
but only if we are to stop at or just below this level. | Below is the the instruction that describes the task:
### Input:
This function is called if an exception occurs,
but only if we are to stop at or just below this level.
### Response:
def handle_exception(self, frame, exc_info):
"""This function is called if an exception occurs,
but only if we are to stop at or just below this level."""
type_, value, tb = exc_info
# Python 3 is broken see http://bugs.python.org/issue17413
_value = value
if not isinstance(_value, BaseException):
_value = type_(value)
fake_exc_info = type_, _value, tb
log.error('Exception during trace', exc_info=fake_exc_info)
self.obj_cache[id(exc_info)] = exc_info
self.extra_vars['__exception__'] = exc_info
exception = type_.__name__
exception_description = str(value)
init = 'Echo|%s' % dump({
'for': '__exception__',
'val': escape('%s: %s') % (exception, exception_description)
})
# User exception is 4 frames away from exception
frame = frame or sys._getframe().f_back.f_back.f_back.f_back
self.interaction(
frame, tb, exception, exception_description, init=init
) |
def stream(self, code):
"""Stream in RiveScript source code dynamically.
:param code: Either a string containing RiveScript code or an array of
lines of RiveScript code.
"""
self._say("Streaming code.")
if type(code) in [str, text_type]:
code = code.split("\n")
self._parse("stream()", code) | Stream in RiveScript source code dynamically.
:param code: Either a string containing RiveScript code or an array of
lines of RiveScript code. | Below is the the instruction that describes the task:
### Input:
Stream in RiveScript source code dynamically.
:param code: Either a string containing RiveScript code or an array of
lines of RiveScript code.
### Response:
def stream(self, code):
"""Stream in RiveScript source code dynamically.
:param code: Either a string containing RiveScript code or an array of
lines of RiveScript code.
"""
self._say("Streaming code.")
if type(code) in [str, text_type]:
code = code.split("\n")
self._parse("stream()", code) |
def to_json(self, version=Version.latest):
"""Tries to convert an object into a JSON representation and return
the resulting string
An Object can define how it is serialized by overriding the as_version()
implementation. A caller may further define how the object is serialized
by passing in a custom encoder. The default encoder will ignore
properties of an object that are None at the time of serialization.
:param version: The version to which the object must be serialized to.
This will default to the latest version supported by the library.
:type version: str | unicode
"""
return json.dumps(self.as_version(version)) | Tries to convert an object into a JSON representation and return
the resulting string
An Object can define how it is serialized by overriding the as_version()
implementation. A caller may further define how the object is serialized
by passing in a custom encoder. The default encoder will ignore
properties of an object that are None at the time of serialization.
:param version: The version to which the object must be serialized to.
This will default to the latest version supported by the library.
:type version: str | unicode | Below is the the instruction that describes the task:
### Input:
Tries to convert an object into a JSON representation and return
the resulting string
An Object can define how it is serialized by overriding the as_version()
implementation. A caller may further define how the object is serialized
by passing in a custom encoder. The default encoder will ignore
properties of an object that are None at the time of serialization.
:param version: The version to which the object must be serialized to.
This will default to the latest version supported by the library.
:type version: str | unicode
### Response:
def to_json(self, version=Version.latest):
"""Tries to convert an object into a JSON representation and return
the resulting string
An Object can define how it is serialized by overriding the as_version()
implementation. A caller may further define how the object is serialized
by passing in a custom encoder. The default encoder will ignore
properties of an object that are None at the time of serialization.
:param version: The version to which the object must be serialized to.
This will default to the latest version supported by the library.
:type version: str | unicode
"""
return json.dumps(self.as_version(version)) |
def xdr(self):
"""Generate base64 encoded XDR PublicKey object.
Return a base64 encoded PublicKey XDR object, for sending over the wire
when interacting with stellar.
:return: The base64 encoded PublicKey XDR structure.
"""
kp = Xdr.StellarXDRPacker()
kp.pack_PublicKey(self.account_xdr_object())
return base64.b64encode(kp.get_buffer()) | Generate base64 encoded XDR PublicKey object.
Return a base64 encoded PublicKey XDR object, for sending over the wire
when interacting with stellar.
:return: The base64 encoded PublicKey XDR structure. | Below is the the instruction that describes the task:
### Input:
Generate base64 encoded XDR PublicKey object.
Return a base64 encoded PublicKey XDR object, for sending over the wire
when interacting with stellar.
:return: The base64 encoded PublicKey XDR structure.
### Response:
def xdr(self):
"""Generate base64 encoded XDR PublicKey object.
Return a base64 encoded PublicKey XDR object, for sending over the wire
when interacting with stellar.
:return: The base64 encoded PublicKey XDR structure.
"""
kp = Xdr.StellarXDRPacker()
kp.pack_PublicKey(self.account_xdr_object())
return base64.b64encode(kp.get_buffer()) |
def get_saver(scope, collections=(tf.GraphKeys.GLOBAL_VARIABLES,), # pylint: disable=redefined-outer-name
context=None, **kwargs):
"""Builds a `tf.train.Saver` for the scope or module, with normalized names.
The names of the variables are normalized to remove the scope prefix.
This allows the same variables to be restored into another similar scope or
module using a complementary `tf.train.Saver` object.
Args:
scope: Scope or module. Variables within will be saved or restored.
collections: Sequence of collections of variables to restrict
`tf.train.Saver` to. By default this is `tf.GraphKeys.GLOBAL_VARIABLES`
which includes moving averages variables as well as trainable variables.
context: Scope or module, identical to or parent of `scope`. If given, this
will be used as the stripped prefix.
**kwargs: Extra keyword arguments to pass to tf.train.Saver.
Returns:
A `tf.train.Saver` object for Variables in the scope or module.
"""
variable_map = {}
for collection in collections:
variable_map.update(get_normalized_variable_map(scope, collection, context))
return tf.train.Saver(var_list=variable_map, **kwargs) | Builds a `tf.train.Saver` for the scope or module, with normalized names.
The names of the variables are normalized to remove the scope prefix.
This allows the same variables to be restored into another similar scope or
module using a complementary `tf.train.Saver` object.
Args:
scope: Scope or module. Variables within will be saved or restored.
collections: Sequence of collections of variables to restrict
`tf.train.Saver` to. By default this is `tf.GraphKeys.GLOBAL_VARIABLES`
which includes moving averages variables as well as trainable variables.
context: Scope or module, identical to or parent of `scope`. If given, this
will be used as the stripped prefix.
**kwargs: Extra keyword arguments to pass to tf.train.Saver.
Returns:
A `tf.train.Saver` object for Variables in the scope or module. | Below is the the instruction that describes the task:
### Input:
Builds a `tf.train.Saver` for the scope or module, with normalized names.
The names of the variables are normalized to remove the scope prefix.
This allows the same variables to be restored into another similar scope or
module using a complementary `tf.train.Saver` object.
Args:
scope: Scope or module. Variables within will be saved or restored.
collections: Sequence of collections of variables to restrict
`tf.train.Saver` to. By default this is `tf.GraphKeys.GLOBAL_VARIABLES`
which includes moving averages variables as well as trainable variables.
context: Scope or module, identical to or parent of `scope`. If given, this
will be used as the stripped prefix.
**kwargs: Extra keyword arguments to pass to tf.train.Saver.
Returns:
A `tf.train.Saver` object for Variables in the scope or module.
### Response:
def get_saver(scope, collections=(tf.GraphKeys.GLOBAL_VARIABLES,), # pylint: disable=redefined-outer-name
context=None, **kwargs):
"""Builds a `tf.train.Saver` for the scope or module, with normalized names.
The names of the variables are normalized to remove the scope prefix.
This allows the same variables to be restored into another similar scope or
module using a complementary `tf.train.Saver` object.
Args:
scope: Scope or module. Variables within will be saved or restored.
collections: Sequence of collections of variables to restrict
`tf.train.Saver` to. By default this is `tf.GraphKeys.GLOBAL_VARIABLES`
which includes moving averages variables as well as trainable variables.
context: Scope or module, identical to or parent of `scope`. If given, this
will be used as the stripped prefix.
**kwargs: Extra keyword arguments to pass to tf.train.Saver.
Returns:
A `tf.train.Saver` object for Variables in the scope or module.
"""
variable_map = {}
for collection in collections:
variable_map.update(get_normalized_variable_map(scope, collection, context))
return tf.train.Saver(var_list=variable_map, **kwargs) |
def _check_custom_url_parameters(self):
"""Checks if custom url parameters are valid parameters.
Throws ValueError if the provided parameter is not a valid parameter.
"""
for param in self.custom_url_params:
if param not in CustomUrlParam:
raise ValueError('Parameter %s is not a valid custom url parameter. Please check and fix.' % param)
if self.service_type is ServiceType.FIS and CustomUrlParam.GEOMETRY in self.custom_url_params:
raise ValueError('{} should not be a custom url parameter of a FIS request'.format(CustomUrlParam.GEOMETRY)) | Checks if custom url parameters are valid parameters.
Throws ValueError if the provided parameter is not a valid parameter. | Below is the the instruction that describes the task:
### Input:
Checks if custom url parameters are valid parameters.
Throws ValueError if the provided parameter is not a valid parameter.
### Response:
def _check_custom_url_parameters(self):
"""Checks if custom url parameters are valid parameters.
Throws ValueError if the provided parameter is not a valid parameter.
"""
for param in self.custom_url_params:
if param not in CustomUrlParam:
raise ValueError('Parameter %s is not a valid custom url parameter. Please check and fix.' % param)
if self.service_type is ServiceType.FIS and CustomUrlParam.GEOMETRY in self.custom_url_params:
raise ValueError('{} should not be a custom url parameter of a FIS request'.format(CustomUrlParam.GEOMETRY)) |
def read_features(self, tol=1e-3):
"""Reads the features from a file and stores them in the current
object.
Parameters
----------
tol: float
Tolerance level to detect duration of audio.
"""
try:
# Read JSON file
with open(self.file_struct.features_file) as f:
feats = json.load(f)
# Store duration
if self.dur is None:
self.dur = float(feats["globals"]["dur"])
# Check that we have the correct global parameters
assert(np.isclose(
self.dur, float(feats["globals"]["dur"]), rtol=tol))
assert(self.sr == int(feats["globals"]["sample_rate"]))
assert(self.hop_length == int(feats["globals"]["hop_length"]))
assert(os.path.basename(self.file_struct.audio_file) ==
os.path.basename(feats["globals"]["audio_file"]))
# Check for specific features params
feat_params_err = FeatureParamsError(
"Couldn't find features for %s id in file %s" %
(self.get_id(), self.file_struct.features_file))
if self.get_id() not in feats.keys():
raise feat_params_err
for param_name in self.get_param_names():
value = getattr(self, param_name)
if hasattr(value, '__call__'):
# Special case of functions
if value.__name__ != \
feats[self.get_id()]["params"][param_name]:
raise feat_params_err
else:
if str(value) != \
feats[self.get_id()]["params"][param_name]:
raise feat_params_err
# Store actual features
self._est_beats_times = np.array(feats["est_beats"])
self._est_beatsync_times = np.array(feats["est_beatsync_times"])
self._est_beats_frames = librosa.core.time_to_frames(
self._est_beats_times, sr=self.sr, hop_length=self.hop_length)
self._framesync_features = \
np.array(feats[self.get_id()]["framesync"])
self._est_beatsync_features = \
np.array(feats[self.get_id()]["est_beatsync"])
# Read annotated beats if available
if "ann_beats" in feats.keys():
self._ann_beats_times = np.array(feats["ann_beats"])
self._ann_beatsync_times = np.array(feats["ann_beatsync_times"])
self._ann_beats_frames = librosa.core.time_to_frames(
self._ann_beats_times, sr=self.sr,
hop_length=self.hop_length)
self._ann_beatsync_features = \
np.array(feats[self.get_id()]["ann_beatsync"])
except KeyError:
raise WrongFeaturesFormatError(
"The features file %s is not correctly formatted" %
self.file_struct.features_file)
except AssertionError:
raise FeaturesNotFound(
"The features for the given parameters were not found in "
"features file %s" % self.file_struct.features_file)
except IOError:
raise NoFeaturesFileError("Could not find features file %s",
self.file_struct.features_file) | Reads the features from a file and stores them in the current
object.
Parameters
----------
tol: float
Tolerance level to detect duration of audio. | Below is the the instruction that describes the task:
### Input:
Reads the features from a file and stores them in the current
object.
Parameters
----------
tol: float
Tolerance level to detect duration of audio.
### Response:
def read_features(self, tol=1e-3):
"""Reads the features from a file and stores them in the current
object.
Parameters
----------
tol: float
Tolerance level to detect duration of audio.
"""
try:
# Read JSON file
with open(self.file_struct.features_file) as f:
feats = json.load(f)
# Store duration
if self.dur is None:
self.dur = float(feats["globals"]["dur"])
# Check that we have the correct global parameters
assert(np.isclose(
self.dur, float(feats["globals"]["dur"]), rtol=tol))
assert(self.sr == int(feats["globals"]["sample_rate"]))
assert(self.hop_length == int(feats["globals"]["hop_length"]))
assert(os.path.basename(self.file_struct.audio_file) ==
os.path.basename(feats["globals"]["audio_file"]))
# Check for specific features params
feat_params_err = FeatureParamsError(
"Couldn't find features for %s id in file %s" %
(self.get_id(), self.file_struct.features_file))
if self.get_id() not in feats.keys():
raise feat_params_err
for param_name in self.get_param_names():
value = getattr(self, param_name)
if hasattr(value, '__call__'):
# Special case of functions
if value.__name__ != \
feats[self.get_id()]["params"][param_name]:
raise feat_params_err
else:
if str(value) != \
feats[self.get_id()]["params"][param_name]:
raise feat_params_err
# Store actual features
self._est_beats_times = np.array(feats["est_beats"])
self._est_beatsync_times = np.array(feats["est_beatsync_times"])
self._est_beats_frames = librosa.core.time_to_frames(
self._est_beats_times, sr=self.sr, hop_length=self.hop_length)
self._framesync_features = \
np.array(feats[self.get_id()]["framesync"])
self._est_beatsync_features = \
np.array(feats[self.get_id()]["est_beatsync"])
# Read annotated beats if available
if "ann_beats" in feats.keys():
self._ann_beats_times = np.array(feats["ann_beats"])
self._ann_beatsync_times = np.array(feats["ann_beatsync_times"])
self._ann_beats_frames = librosa.core.time_to_frames(
self._ann_beats_times, sr=self.sr,
hop_length=self.hop_length)
self._ann_beatsync_features = \
np.array(feats[self.get_id()]["ann_beatsync"])
except KeyError:
raise WrongFeaturesFormatError(
"The features file %s is not correctly formatted" %
self.file_struct.features_file)
except AssertionError:
raise FeaturesNotFound(
"The features for the given parameters were not found in "
"features file %s" % self.file_struct.features_file)
except IOError:
raise NoFeaturesFileError("Could not find features file %s",
self.file_struct.features_file) |
def random(*args):
"""
Counts up sequentially from a number based on the current time
:rtype int:
"""
current_frame = inspect.currentframe().f_back
trace_string = ""
while current_frame.f_back:
trace_string = trace_string + current_frame.f_back.f_code.co_name
current_frame = current_frame.f_back
return counter.get_from_trace(trace_string) | Counts up sequentially from a number based on the current time
:rtype int: | Below is the the instruction that describes the task:
### Input:
Counts up sequentially from a number based on the current time
:rtype int:
### Response:
def random(*args):
"""
Counts up sequentially from a number based on the current time
:rtype int:
"""
current_frame = inspect.currentframe().f_back
trace_string = ""
while current_frame.f_back:
trace_string = trace_string + current_frame.f_back.f_code.co_name
current_frame = current_frame.f_back
return counter.get_from_trace(trace_string) |
def create_transaction(self, to_account):
"""Create a transaction for this statement amount and account, into to_account
This will also set this StatementLine's ``transaction`` attribute to the newly
created transaction.
Args:
to_account (Account): The account the transaction is into / out of.
Returns:
Transaction: The newly created (and committed) transaction.
"""
from_account = self.statement_import.bank_account
transaction = Transaction.objects.create()
Leg.objects.create(
transaction=transaction, account=from_account, amount=+(self.amount * -1)
)
Leg.objects.create(transaction=transaction, account=to_account, amount=-(self.amount * -1))
transaction.date = self.date
transaction.save()
self.transaction = transaction
self.save()
return transaction | Create a transaction for this statement amount and account, into to_account
This will also set this StatementLine's ``transaction`` attribute to the newly
created transaction.
Args:
to_account (Account): The account the transaction is into / out of.
Returns:
Transaction: The newly created (and committed) transaction. | Below is the the instruction that describes the task:
### Input:
Create a transaction for this statement amount and account, into to_account
This will also set this StatementLine's ``transaction`` attribute to the newly
created transaction.
Args:
to_account (Account): The account the transaction is into / out of.
Returns:
Transaction: The newly created (and committed) transaction.
### Response:
def create_transaction(self, to_account):
"""Create a transaction for this statement amount and account, into to_account
This will also set this StatementLine's ``transaction`` attribute to the newly
created transaction.
Args:
to_account (Account): The account the transaction is into / out of.
Returns:
Transaction: The newly created (and committed) transaction.
"""
from_account = self.statement_import.bank_account
transaction = Transaction.objects.create()
Leg.objects.create(
transaction=transaction, account=from_account, amount=+(self.amount * -1)
)
Leg.objects.create(transaction=transaction, account=to_account, amount=-(self.amount * -1))
transaction.date = self.date
transaction.save()
self.transaction = transaction
self.save()
return transaction |
def run(self):
""" This defines the sequence of actions that are taken when the hierarchy is executed. A hierarchy state
executes all its child states recursively. Principally this code collects all input data for the next
child state, executes it, stores its output data and determines the next state
based on the outcome of the child state.
:return:
"""
try:
self._initialize_hierarchy()
while self.child_state is not self:
# print("hs1", self.name)
self.handling_execution_mode = True
execution_mode = singleton.state_machine_execution_engine.handle_execution_mode(self, self.child_state)
# in the case of starting the sm from a specific state not the transitions define the logic flow
# but the the execution_engine.run_to_states; thus, do not alter the next state in this case
if not self._start_state_modified:
# check if e.g. the state machine was paused and the next state was modified (e.g. removed)
self.check_if_child_state_was_modified()
self.handling_execution_mode = False
if self.state_execution_status is not StateExecutionStatus.EXECUTE_CHILDREN:
self.state_execution_status = StateExecutionStatus.EXECUTE_CHILDREN
# print("hs2", self.name)
self.backward_execution = False
if self.preempted:
if self.last_transition and self.last_transition.from_outcome == -2:
logger.debug("Execute preemption handling for '{0}'".format(self.child_state))
else:
break
elif execution_mode == StateMachineExecutionStatus.BACKWARD:
break_loop = self._handle_backward_execution_before_child_execution()
if break_loop:
break
# This is only the case if this hierarchy-state is started in backward mode,
# but the user directly switches to the forward execution mode
if self.child_state is None:
break
# print("hs3", self.name)
self._execute_current_child()
if self.backward_execution:
# print("hs4", self.name)
break_loop = self._handle_backward_execution_after_child_execution()
if break_loop:
# print("hs4.1", self.name)
break
else:
# print("hs5", self.name)
break_loop = self._handle_forward_execution_after_child_execution()
if break_loop:
break
# print("hs6", self.name)
return self._finalize_hierarchy()
except Exception as e:
logger.error("{0} had an internal error: {1}\n{2}".format(self, str(e), str(traceback.format_exc())))
self.output_data["error"] = e
self.state_execution_status = StateExecutionStatus.WAIT_FOR_NEXT_STATE
self.child_state = None
self.last_child = None
return self.finalize(Outcome(-1, "aborted")) | This defines the sequence of actions that are taken when the hierarchy is executed. A hierarchy state
executes all its child states recursively. Principally this code collects all input data for the next
child state, executes it, stores its output data and determines the next state
based on the outcome of the child state.
:return: | Below is the the instruction that describes the task:
### Input:
This defines the sequence of actions that are taken when the hierarchy is executed. A hierarchy state
executes all its child states recursively. Principally this code collects all input data for the next
child state, executes it, stores its output data and determines the next state
based on the outcome of the child state.
:return:
### Response:
def run(self):
""" This defines the sequence of actions that are taken when the hierarchy is executed. A hierarchy state
executes all its child states recursively. Principally this code collects all input data for the next
child state, executes it, stores its output data and determines the next state
based on the outcome of the child state.
:return:
"""
try:
self._initialize_hierarchy()
while self.child_state is not self:
# print("hs1", self.name)
self.handling_execution_mode = True
execution_mode = singleton.state_machine_execution_engine.handle_execution_mode(self, self.child_state)
# in the case of starting the sm from a specific state not the transitions define the logic flow
# but the the execution_engine.run_to_states; thus, do not alter the next state in this case
if not self._start_state_modified:
# check if e.g. the state machine was paused and the next state was modified (e.g. removed)
self.check_if_child_state_was_modified()
self.handling_execution_mode = False
if self.state_execution_status is not StateExecutionStatus.EXECUTE_CHILDREN:
self.state_execution_status = StateExecutionStatus.EXECUTE_CHILDREN
# print("hs2", self.name)
self.backward_execution = False
if self.preempted:
if self.last_transition and self.last_transition.from_outcome == -2:
logger.debug("Execute preemption handling for '{0}'".format(self.child_state))
else:
break
elif execution_mode == StateMachineExecutionStatus.BACKWARD:
break_loop = self._handle_backward_execution_before_child_execution()
if break_loop:
break
# This is only the case if this hierarchy-state is started in backward mode,
# but the user directly switches to the forward execution mode
if self.child_state is None:
break
# print("hs3", self.name)
self._execute_current_child()
if self.backward_execution:
# print("hs4", self.name)
break_loop = self._handle_backward_execution_after_child_execution()
if break_loop:
# print("hs4.1", self.name)
break
else:
# print("hs5", self.name)
break_loop = self._handle_forward_execution_after_child_execution()
if break_loop:
break
# print("hs6", self.name)
return self._finalize_hierarchy()
except Exception as e:
logger.error("{0} had an internal error: {1}\n{2}".format(self, str(e), str(traceback.format_exc())))
self.output_data["error"] = e
self.state_execution_status = StateExecutionStatus.WAIT_FOR_NEXT_STATE
self.child_state = None
self.last_child = None
return self.finalize(Outcome(-1, "aborted")) |
def fetch_by_coord(self,coord):
"""get a single entry by the coordinate location [blockStart, innerStart]
.. warning:: creates a new instance of a BAMFile object when maybe the one we had would have worked
"""
#print coord
#print self.path
#b2 = BAMFile(self.path,blockStart=coord[0],innerStart=coord[1],index_obj=self.index,reference=self._reference)
b2 = BAMFile(self.path,BAMFile.Options(blockStart=coord[0],innerStart=coord[1],reference=self.reference))
#for bam in b2: print type(bam)
#print 'hi'
bam = b2.read_entry()
b2.close()
b2 = None
return bam | get a single entry by the coordinate location [blockStart, innerStart]
.. warning:: creates a new instance of a BAMFile object when maybe the one we had would have worked | Below is the the instruction that describes the task:
### Input:
get a single entry by the coordinate location [blockStart, innerStart]
.. warning:: creates a new instance of a BAMFile object when maybe the one we had would have worked
### Response:
def fetch_by_coord(self,coord):
"""get a single entry by the coordinate location [blockStart, innerStart]
.. warning:: creates a new instance of a BAMFile object when maybe the one we had would have worked
"""
#print coord
#print self.path
#b2 = BAMFile(self.path,blockStart=coord[0],innerStart=coord[1],index_obj=self.index,reference=self._reference)
b2 = BAMFile(self.path,BAMFile.Options(blockStart=coord[0],innerStart=coord[1],reference=self.reference))
#for bam in b2: print type(bam)
#print 'hi'
bam = b2.read_entry()
b2.close()
b2 = None
return bam |
def orbit_posvel(Ms,eccs,semimajors,mreds,obspos=None):
"""returns positions in projected AU and velocities in km/s for given mean anomalies
Returns positions and velocities as SkyCoord objects. Uses
``orbitutils.kepler.Efn`` to calculate eccentric anomalies using interpolation.
Parameters
----------
Ms, eccs, semimajors, mreds : float or array-like
Mean anomalies, eccentricities, semimajor axes (AU), reduced masses (Msun).
obspos : ``None``, (x,y,z) tuple or ``SkyCoord`` object
Locations of observers for which to return coordinates.
If ``None`` then populate randomly on sphere. If (x,y,z) or
``SkyCoord`` object provided, then use those.
Returns
-------
pos,vel : ``SkyCoord``
Objects representing the positions and velocities, the coordinates
of which are ``Quantity`` objects that have units. Positions are in
projected AU and velocities in km/s.
"""
Es = Efn(Ms,eccs) #eccentric anomalies by interpolation
rs = semimajors*(1-eccs*np.cos(Es))
nus = 2 * np.arctan2(np.sqrt(1+eccs)*np.sin(Es/2),np.sqrt(1-eccs)*np.cos(Es/2))
xs = semimajors*(np.cos(Es) - eccs) #AU
ys = semimajors*np.sqrt(1-eccs**2)*np.sin(Es) #AU
Edots = np.sqrt(G*mreds*MSUN/(semimajors*AU)**3)/(1-eccs*np.cos(Es))
xdots = -semimajors*AU*np.sin(Es)*Edots/1e5 #km/s
ydots = semimajors*AU*np.sqrt(1-eccs**2)*np.cos(Es)*Edots/1e5 # km/s
n = np.size(xs)
orbpos = SkyCoord(xs,ys,0*u.AU,representation='cartesian',unit='AU')
orbvel = SkyCoord(xdots,ydots,0*u.km/u.s,representation='cartesian',unit='km/s')
if obspos is None:
obspos = random_spherepos(n) #observer position
if type(obspos) == type((1,2,3)):
obspos = SkyCoord(obspos[0],obspos[1],obspos[2],
representation='cartesian').represent_as('physicsspherical')
if not hasattr(obspos,'theta'): #if obspos not physics spherical, make it
obspos = obspos.represent_as('physicsspherical')
#random orientation of the sky 'x-y' coordinates
psi = rand.random(n)*2*np.pi
#transform positions and velocities into observer coordinates
x,y,z = orbitproject(orbpos.x,orbpos.y,obspos.theta,obspos.phi,psi)
vx,vy,vz = orbitproject(orbvel.x,orbvel.y,obspos.theta,obspos.phi,psi)
return (SkyCoord(x,y,z,representation='cartesian'),
SkyCoord(vx,vy,vz,representation='cartesian')) | returns positions in projected AU and velocities in km/s for given mean anomalies
Returns positions and velocities as SkyCoord objects. Uses
``orbitutils.kepler.Efn`` to calculate eccentric anomalies using interpolation.
Parameters
----------
Ms, eccs, semimajors, mreds : float or array-like
Mean anomalies, eccentricities, semimajor axes (AU), reduced masses (Msun).
obspos : ``None``, (x,y,z) tuple or ``SkyCoord`` object
Locations of observers for which to return coordinates.
If ``None`` then populate randomly on sphere. If (x,y,z) or
``SkyCoord`` object provided, then use those.
Returns
-------
pos,vel : ``SkyCoord``
Objects representing the positions and velocities, the coordinates
of which are ``Quantity`` objects that have units. Positions are in
projected AU and velocities in km/s. | Below is the the instruction that describes the task:
### Input:
returns positions in projected AU and velocities in km/s for given mean anomalies
Returns positions and velocities as SkyCoord objects. Uses
``orbitutils.kepler.Efn`` to calculate eccentric anomalies using interpolation.
Parameters
----------
Ms, eccs, semimajors, mreds : float or array-like
Mean anomalies, eccentricities, semimajor axes (AU), reduced masses (Msun).
obspos : ``None``, (x,y,z) tuple or ``SkyCoord`` object
Locations of observers for which to return coordinates.
If ``None`` then populate randomly on sphere. If (x,y,z) or
``SkyCoord`` object provided, then use those.
Returns
-------
pos,vel : ``SkyCoord``
Objects representing the positions and velocities, the coordinates
of which are ``Quantity`` objects that have units. Positions are in
projected AU and velocities in km/s.
### Response:
def orbit_posvel(Ms,eccs,semimajors,mreds,obspos=None):
"""returns positions in projected AU and velocities in km/s for given mean anomalies
Returns positions and velocities as SkyCoord objects. Uses
``orbitutils.kepler.Efn`` to calculate eccentric anomalies using interpolation.
Parameters
----------
Ms, eccs, semimajors, mreds : float or array-like
Mean anomalies, eccentricities, semimajor axes (AU), reduced masses (Msun).
obspos : ``None``, (x,y,z) tuple or ``SkyCoord`` object
Locations of observers for which to return coordinates.
If ``None`` then populate randomly on sphere. If (x,y,z) or
``SkyCoord`` object provided, then use those.
Returns
-------
pos,vel : ``SkyCoord``
Objects representing the positions and velocities, the coordinates
of which are ``Quantity`` objects that have units. Positions are in
projected AU and velocities in km/s.
"""
Es = Efn(Ms,eccs) #eccentric anomalies by interpolation
rs = semimajors*(1-eccs*np.cos(Es))
nus = 2 * np.arctan2(np.sqrt(1+eccs)*np.sin(Es/2),np.sqrt(1-eccs)*np.cos(Es/2))
xs = semimajors*(np.cos(Es) - eccs) #AU
ys = semimajors*np.sqrt(1-eccs**2)*np.sin(Es) #AU
Edots = np.sqrt(G*mreds*MSUN/(semimajors*AU)**3)/(1-eccs*np.cos(Es))
xdots = -semimajors*AU*np.sin(Es)*Edots/1e5 #km/s
ydots = semimajors*AU*np.sqrt(1-eccs**2)*np.cos(Es)*Edots/1e5 # km/s
n = np.size(xs)
orbpos = SkyCoord(xs,ys,0*u.AU,representation='cartesian',unit='AU')
orbvel = SkyCoord(xdots,ydots,0*u.km/u.s,representation='cartesian',unit='km/s')
if obspos is None:
obspos = random_spherepos(n) #observer position
if type(obspos) == type((1,2,3)):
obspos = SkyCoord(obspos[0],obspos[1],obspos[2],
representation='cartesian').represent_as('physicsspherical')
if not hasattr(obspos,'theta'): #if obspos not physics spherical, make it
obspos = obspos.represent_as('physicsspherical')
#random orientation of the sky 'x-y' coordinates
psi = rand.random(n)*2*np.pi
#transform positions and velocities into observer coordinates
x,y,z = orbitproject(orbpos.x,orbpos.y,obspos.theta,obspos.phi,psi)
vx,vy,vz = orbitproject(orbvel.x,orbvel.y,obspos.theta,obspos.phi,psi)
return (SkyCoord(x,y,z,representation='cartesian'),
SkyCoord(vx,vy,vz,representation='cartesian')) |
def serialize(self, elt, sw, pyobj, name=None, orig=None, **kw):
'''
Parameters:
elt -- the current DOMWrapper element
sw -- soapWriter object
pyobj -- python object to serialize
'''
raise EvaluateException("Unimplemented evaluation", sw.Backtrace(elt)) | Parameters:
elt -- the current DOMWrapper element
sw -- soapWriter object
pyobj -- python object to serialize | Below is the the instruction that describes the task:
### Input:
Parameters:
elt -- the current DOMWrapper element
sw -- soapWriter object
pyobj -- python object to serialize
### Response:
def serialize(self, elt, sw, pyobj, name=None, orig=None, **kw):
'''
Parameters:
elt -- the current DOMWrapper element
sw -- soapWriter object
pyobj -- python object to serialize
'''
raise EvaluateException("Unimplemented evaluation", sw.Backtrace(elt)) |
def collect_tokens(cls, parseresult, mode):
"""
Collect the tokens from a (potentially) nested parse result.
"""
inner = '(%s)' if mode=='parens' else '[%s]'
if parseresult is None: return []
tokens = []
for token in parseresult.asList():
# If value is a tuple, the token will be a list
if isinstance(token, list):
token = cls.recurse_token(token, inner)
tokens[-1] = tokens[-1] + token
else:
if token.strip() == ',': continue
tokens.append(cls._strip_commas(token))
return tokens | Collect the tokens from a (potentially) nested parse result. | Below is the the instruction that describes the task:
### Input:
Collect the tokens from a (potentially) nested parse result.
### Response:
def collect_tokens(cls, parseresult, mode):
"""
Collect the tokens from a (potentially) nested parse result.
"""
inner = '(%s)' if mode=='parens' else '[%s]'
if parseresult is None: return []
tokens = []
for token in parseresult.asList():
# If value is a tuple, the token will be a list
if isinstance(token, list):
token = cls.recurse_token(token, inner)
tokens[-1] = tokens[-1] + token
else:
if token.strip() == ',': continue
tokens.append(cls._strip_commas(token))
return tokens |
def merge( self, other_cluster ):
"""
Combine two clusters into a single cluster.
Args:
other_cluster (Cluster): The second cluster to combine.
Returns:
(Cluster): The combination of both clusters.
"""
new_cluster = Cluster( self.sites | other_cluster.sites )
new_cluster.neighbours = ( self.neighbours | other_cluster.neighbours ).difference( new_cluster.sites )
return new_cluster | Combine two clusters into a single cluster.
Args:
other_cluster (Cluster): The second cluster to combine.
Returns:
(Cluster): The combination of both clusters. | Below is the the instruction that describes the task:
### Input:
Combine two clusters into a single cluster.
Args:
other_cluster (Cluster): The second cluster to combine.
Returns:
(Cluster): The combination of both clusters.
### Response:
def merge( self, other_cluster ):
"""
Combine two clusters into a single cluster.
Args:
other_cluster (Cluster): The second cluster to combine.
Returns:
(Cluster): The combination of both clusters.
"""
new_cluster = Cluster( self.sites | other_cluster.sites )
new_cluster.neighbours = ( self.neighbours | other_cluster.neighbours ).difference( new_cluster.sites )
return new_cluster |
def uncancel_invoice(self, invoice_id):
"""
Uncancelles an invoice
:param invoice_id: the invoice id
"""
return self._create_put_request(
resource=INVOICES,
billomat_id=invoice_id,
command=UNCANCEL,
) | Uncancelles an invoice
:param invoice_id: the invoice id | Below is the the instruction that describes the task:
### Input:
Uncancelles an invoice
:param invoice_id: the invoice id
### Response:
def uncancel_invoice(self, invoice_id):
"""
Uncancelles an invoice
:param invoice_id: the invoice id
"""
return self._create_put_request(
resource=INVOICES,
billomat_id=invoice_id,
command=UNCANCEL,
) |
def summarize(manager: Manager):
"""Summarize the contents of the database."""
click.echo('Networks: {}'.format(manager.count_networks()))
click.echo('Edges: {}'.format(manager.count_edges()))
click.echo('Nodes: {}'.format(manager.count_nodes()))
click.echo('Namespaces: {}'.format(manager.count_namespaces()))
click.echo('Namespaces entries: {}'.format(manager.count_namespace_entries()))
click.echo('Annotations: {}'.format(manager.count_annotations()))
click.echo('Annotation entries: {}'.format(manager.count_annotation_entries())) | Summarize the contents of the database. | Below is the the instruction that describes the task:
### Input:
Summarize the contents of the database.
### Response:
def summarize(manager: Manager):
"""Summarize the contents of the database."""
click.echo('Networks: {}'.format(manager.count_networks()))
click.echo('Edges: {}'.format(manager.count_edges()))
click.echo('Nodes: {}'.format(manager.count_nodes()))
click.echo('Namespaces: {}'.format(manager.count_namespaces()))
click.echo('Namespaces entries: {}'.format(manager.count_namespace_entries()))
click.echo('Annotations: {}'.format(manager.count_annotations()))
click.echo('Annotation entries: {}'.format(manager.count_annotation_entries())) |
def client_receives_binary(self, name=None, timeout=None, label=None):
"""Receive raw binary message.
If client `name` is not given, uses the latest client. Optional message
`label` is shown on logs.
Examples:
| ${binary} = | Client receives binary |
| ${binary} = | Client receives binary | Client1 | timeout=5 |
"""
client, name = self._clients.get_with_name(name)
msg = client.receive(timeout=timeout)
self._register_receive(client, label, name)
return msg | Receive raw binary message.
If client `name` is not given, uses the latest client. Optional message
`label` is shown on logs.
Examples:
| ${binary} = | Client receives binary |
| ${binary} = | Client receives binary | Client1 | timeout=5 | | Below is the the instruction that describes the task:
### Input:
Receive raw binary message.
If client `name` is not given, uses the latest client. Optional message
`label` is shown on logs.
Examples:
| ${binary} = | Client receives binary |
| ${binary} = | Client receives binary | Client1 | timeout=5 |
### Response:
def client_receives_binary(self, name=None, timeout=None, label=None):
"""Receive raw binary message.
If client `name` is not given, uses the latest client. Optional message
`label` is shown on logs.
Examples:
| ${binary} = | Client receives binary |
| ${binary} = | Client receives binary | Client1 | timeout=5 |
"""
client, name = self._clients.get_with_name(name)
msg = client.receive(timeout=timeout)
self._register_receive(client, label, name)
return msg |
def fso_exists(self, path):
'overlays os.path.exists()'
try:
return self._exists(self.deref(path))
except os.error:
return False | overlays os.path.exists() | Below is the the instruction that describes the task:
### Input:
overlays os.path.exists()
### Response:
def fso_exists(self, path):
'overlays os.path.exists()'
try:
return self._exists(self.deref(path))
except os.error:
return False |
def get_external_commands_from_arbiters(self):
"""Get external commands from our arbiters
As of now, only the arbiter are requested to provide their external commands that
the receiver will push to all the known schedulers to make them being executed.
:return: None
"""
for arbiter_link_uuid in self.arbiters:
link = self.arbiters[arbiter_link_uuid]
if not link.active:
logger.debug("The arbiter '%s' is not active, it is not possible to get "
"its external commands!", link.name)
continue
try:
logger.debug("Getting external commands from: %s", link.name)
external_commands = link.get_external_commands()
if external_commands:
logger.debug("Got %d commands from: %s", len(external_commands), link.name)
else:
# Simple protection against None value
external_commands = []
for external_command in external_commands:
self.add(external_command)
except LinkError:
logger.warning("Arbiter connection failed, I could not get external commands!")
except Exception as exp: # pylint: disable=broad-except
logger.error("Arbiter connection failed, I could not get external commands!")
logger.exception("Exception: %s", exp) | Get external commands from our arbiters
As of now, only the arbiter are requested to provide their external commands that
the receiver will push to all the known schedulers to make them being executed.
:return: None | Below is the the instruction that describes the task:
### Input:
Get external commands from our arbiters
As of now, only the arbiter are requested to provide their external commands that
the receiver will push to all the known schedulers to make them being executed.
:return: None
### Response:
def get_external_commands_from_arbiters(self):
"""Get external commands from our arbiters
As of now, only the arbiter are requested to provide their external commands that
the receiver will push to all the known schedulers to make them being executed.
:return: None
"""
for arbiter_link_uuid in self.arbiters:
link = self.arbiters[arbiter_link_uuid]
if not link.active:
logger.debug("The arbiter '%s' is not active, it is not possible to get "
"its external commands!", link.name)
continue
try:
logger.debug("Getting external commands from: %s", link.name)
external_commands = link.get_external_commands()
if external_commands:
logger.debug("Got %d commands from: %s", len(external_commands), link.name)
else:
# Simple protection against None value
external_commands = []
for external_command in external_commands:
self.add(external_command)
except LinkError:
logger.warning("Arbiter connection failed, I could not get external commands!")
except Exception as exp: # pylint: disable=broad-except
logger.error("Arbiter connection failed, I could not get external commands!")
logger.exception("Exception: %s", exp) |
def configure(working_dir, config_file=None, force=False, interactive=False):
"""
Configure blockstack: find and store configuration parameters to the config file.
Optionally prompt for missing data interactively (with interactive=True). Or, raise an exception
if there are any fields missing.
Optionally force a re-prompting for all configuration details (with force=True)
Return {'blockstack': {...}, 'bitcoind': {...}, 'blockstack-api': {...}}
"""
if config_file is None:
# get input for everything
config_file = virtualchain.get_config_filename(get_default_virtualchain_impl(), working_dir)
if not os.path.exists( config_file ):
# definitely ask for everything
force = True
log.debug("Load config from '%s'" % config_file)
# get blockstack opts
blockstack_opts = {}
blockstack_opts_defaults = default_blockstack_opts(working_dir, config_file=config_file)
blockstack_params = blockstack_opts_defaults.keys()
if not force or not interactive:
# default blockstack options
blockstack_opts = default_blockstack_opts(working_dir, config_file=config_file )
blockstack_msg = "Please enter blockstack configuration hints."
blockstack_opts, missing_blockstack_opts, num_blockstack_opts_prompted = find_missing( blockstack_msg, \
blockstack_params, \
blockstack_opts, \
blockstack_opts_defaults, \
prompt_missing=interactive )
blockstack_api_opts = {}
blockstack_api_defaults = default_blockstack_api_opts(working_dir, config_file=config_file)
blockstack_api_params = blockstack_api_defaults.keys()
if not force or not interactive:
# default blockstack API options
blockstack_api_opts = default_blockstack_api_opts(working_dir, config_file=config_file)
blockstack_api_msg = "Please enter blockstack RESTful API configuration hints."
blockstack_api_opts, missing_blockstack_api_opts, num_blockstack_api_opts_prompted = find_missing( blockstack_api_msg, \
blockstack_api_params, \
blockstack_api_opts, \
blockstack_api_defaults, \
prompt_missing=interactive )
bitcoind_message = "Blockstack does not have enough information to connect\n"
bitcoind_message += "to bitcoind. Please supply the following parameters, or\n"
bitcoind_message += "press [ENTER] to select the default value."
bitcoind_opts = {}
bitcoind_opts_defaults = default_bitcoind_opts( config_file=config_file )
bitcoind_params = bitcoind_opts_defaults.keys()
if not force or not interactive:
# get default set of bitcoind opts
bitcoind_opts = default_bitcoind_opts( config_file=config_file )
# get any missing bitcoind fields
bitcoind_opts, missing_bitcoin_opts, num_bitcoind_prompted = find_missing( bitcoind_message, \
bitcoind_params, \
bitcoind_opts, \
bitcoind_opts_defaults, \
prompt_missing=interactive )
if not interactive and (len(missing_bitcoin_opts) > 0 or len(missing_blockstack_opts) > 0 or len(missing_blockstack_api_opts) > 0):
# cannot continue
raise Exception("Missing configuration fields: %s" % (",".join( missing_blockstack_opts + missing_bitcoin_opts + missing_blockstack_api_opts )) )
ret = {
'blockstack': blockstack_opts,
'bitcoind': bitcoind_opts,
'blockstack-api': blockstack_api_opts
}
# if we prompted, then save
if num_bitcoind_prompted > 0 or num_blockstack_opts_prompted > 0 or num_blockstack_api_opts_prompted > 0 or \
(not os.path.exists(config_file) and not interactive):
print >> sys.stderr, "Saving configuration to %s" % config_file
# always set version when writing
config_opts = copy.deepcopy(ret)
if not config_opts['blockstack'].has_key('server_version'):
config_opts['blockstack']['server_version'] = VERSION
if not config_opts['blockstack-api'].has_key('server_version'):
config_opts['blockstack']['server_version'] = VERSION
# if the config file doesn't exist, then set the version
# in ret as well, since it's what's written
if not os.path.exists(config_file):
ret['blockstack']['server_version'] = VERSION
ret['blockstack-api']['server_version'] = VERSION
write_config_file( config_opts, config_file )
# prefix our bitcoind options, so they work with virtualchain
ret['bitcoind'] = opt_restore("bitcoind_", ret['bitcoind'])
return ret | Configure blockstack: find and store configuration parameters to the config file.
Optionally prompt for missing data interactively (with interactive=True). Or, raise an exception
if there are any fields missing.
Optionally force a re-prompting for all configuration details (with force=True)
Return {'blockstack': {...}, 'bitcoind': {...}, 'blockstack-api': {...}} | Below is the the instruction that describes the task:
### Input:
Configure blockstack: find and store configuration parameters to the config file.
Optionally prompt for missing data interactively (with interactive=True). Or, raise an exception
if there are any fields missing.
Optionally force a re-prompting for all configuration details (with force=True)
Return {'blockstack': {...}, 'bitcoind': {...}, 'blockstack-api': {...}}
### Response:
def configure(working_dir, config_file=None, force=False, interactive=False):
"""
Configure blockstack: find and store configuration parameters to the config file.
Optionally prompt for missing data interactively (with interactive=True). Or, raise an exception
if there are any fields missing.
Optionally force a re-prompting for all configuration details (with force=True)
Return {'blockstack': {...}, 'bitcoind': {...}, 'blockstack-api': {...}}
"""
if config_file is None:
# get input for everything
config_file = virtualchain.get_config_filename(get_default_virtualchain_impl(), working_dir)
if not os.path.exists( config_file ):
# definitely ask for everything
force = True
log.debug("Load config from '%s'" % config_file)
# get blockstack opts
blockstack_opts = {}
blockstack_opts_defaults = default_blockstack_opts(working_dir, config_file=config_file)
blockstack_params = blockstack_opts_defaults.keys()
if not force or not interactive:
# default blockstack options
blockstack_opts = default_blockstack_opts(working_dir, config_file=config_file )
blockstack_msg = "Please enter blockstack configuration hints."
blockstack_opts, missing_blockstack_opts, num_blockstack_opts_prompted = find_missing( blockstack_msg, \
blockstack_params, \
blockstack_opts, \
blockstack_opts_defaults, \
prompt_missing=interactive )
blockstack_api_opts = {}
blockstack_api_defaults = default_blockstack_api_opts(working_dir, config_file=config_file)
blockstack_api_params = blockstack_api_defaults.keys()
if not force or not interactive:
# default blockstack API options
blockstack_api_opts = default_blockstack_api_opts(working_dir, config_file=config_file)
blockstack_api_msg = "Please enter blockstack RESTful API configuration hints."
blockstack_api_opts, missing_blockstack_api_opts, num_blockstack_api_opts_prompted = find_missing( blockstack_api_msg, \
blockstack_api_params, \
blockstack_api_opts, \
blockstack_api_defaults, \
prompt_missing=interactive )
bitcoind_message = "Blockstack does not have enough information to connect\n"
bitcoind_message += "to bitcoind. Please supply the following parameters, or\n"
bitcoind_message += "press [ENTER] to select the default value."
bitcoind_opts = {}
bitcoind_opts_defaults = default_bitcoind_opts( config_file=config_file )
bitcoind_params = bitcoind_opts_defaults.keys()
if not force or not interactive:
# get default set of bitcoind opts
bitcoind_opts = default_bitcoind_opts( config_file=config_file )
# get any missing bitcoind fields
bitcoind_opts, missing_bitcoin_opts, num_bitcoind_prompted = find_missing( bitcoind_message, \
bitcoind_params, \
bitcoind_opts, \
bitcoind_opts_defaults, \
prompt_missing=interactive )
if not interactive and (len(missing_bitcoin_opts) > 0 or len(missing_blockstack_opts) > 0 or len(missing_blockstack_api_opts) > 0):
# cannot continue
raise Exception("Missing configuration fields: %s" % (",".join( missing_blockstack_opts + missing_bitcoin_opts + missing_blockstack_api_opts )) )
ret = {
'blockstack': blockstack_opts,
'bitcoind': bitcoind_opts,
'blockstack-api': blockstack_api_opts
}
# if we prompted, then save
if num_bitcoind_prompted > 0 or num_blockstack_opts_prompted > 0 or num_blockstack_api_opts_prompted > 0 or \
(not os.path.exists(config_file) and not interactive):
print >> sys.stderr, "Saving configuration to %s" % config_file
# always set version when writing
config_opts = copy.deepcopy(ret)
if not config_opts['blockstack'].has_key('server_version'):
config_opts['blockstack']['server_version'] = VERSION
if not config_opts['blockstack-api'].has_key('server_version'):
config_opts['blockstack']['server_version'] = VERSION
# if the config file doesn't exist, then set the version
# in ret as well, since it's what's written
if not os.path.exists(config_file):
ret['blockstack']['server_version'] = VERSION
ret['blockstack-api']['server_version'] = VERSION
write_config_file( config_opts, config_file )
# prefix our bitcoind options, so they work with virtualchain
ret['bitcoind'] = opt_restore("bitcoind_", ret['bitcoind'])
return ret |
def mod_root(a, p):
""" Return a root of `a' modulo p """
if a == 0:
return 0
if not mod_issquare(a, p):
raise ValueError
n = 2
while mod_issquare(n, p):
n += 1
q = p - 1
r = 0
while not q.getbit(r):
r += 1
q = q >> r
y = pow(n, q, p)
h = q >> 1
b = pow(a, h, p)
x = (a * b) % p
b = (b * x) % p
while b != 1:
h = (b * b) % p
m = 1
while h != 1:
h = (h * h) % p
m += 1
h = gmpy.mpz(0)
h = h.setbit(r - m - 1)
t = pow(y, h, p)
y = (t * t) % p
r = m
x = (x * t) % p
b = (b * y) % p
return x | Return a root of `a' modulo p | Below is the the instruction that describes the task:
### Input:
Return a root of `a' modulo p
### Response:
def mod_root(a, p):
""" Return a root of `a' modulo p """
if a == 0:
return 0
if not mod_issquare(a, p):
raise ValueError
n = 2
while mod_issquare(n, p):
n += 1
q = p - 1
r = 0
while not q.getbit(r):
r += 1
q = q >> r
y = pow(n, q, p)
h = q >> 1
b = pow(a, h, p)
x = (a * b) % p
b = (b * x) % p
while b != 1:
h = (b * b) % p
m = 1
while h != 1:
h = (h * h) % p
m += 1
h = gmpy.mpz(0)
h = h.setbit(r - m - 1)
t = pow(y, h, p)
y = (t * t) % p
r = m
x = (x * t) % p
b = (b * y) % p
return x |
def GetMethod(self, name, descriptor):
"""
.. deprecated:: 3.1.0
Use :meth:`get_method` instead.
"""
warnings.warn("deprecated, use get_method instead. This function might be removed in a later release!", DeprecationWarning)
return self.get_method(name, descriptor) | .. deprecated:: 3.1.0
Use :meth:`get_method` instead. | Below is the the instruction that describes the task:
### Input:
.. deprecated:: 3.1.0
Use :meth:`get_method` instead.
### Response:
def GetMethod(self, name, descriptor):
"""
.. deprecated:: 3.1.0
Use :meth:`get_method` instead.
"""
warnings.warn("deprecated, use get_method instead. This function might be removed in a later release!", DeprecationWarning)
return self.get_method(name, descriptor) |
def add_route(route, endpoint=None, **kw):
"""Add a new JSON API route
"""
# ensure correct amout of slashes
def apiurl(route):
return '/'.join(s.strip('/') for s in ["", BASE_URL, route])
return add_senaite_route(apiurl(route), endpoint, **kw) | Add a new JSON API route | Below is the the instruction that describes the task:
### Input:
Add a new JSON API route
### Response:
def add_route(route, endpoint=None, **kw):
"""Add a new JSON API route
"""
# ensure correct amout of slashes
def apiurl(route):
return '/'.join(s.strip('/') for s in ["", BASE_URL, route])
return add_senaite_route(apiurl(route), endpoint, **kw) |
async def retry_async(self, func, partition_id, retry_message,
final_failure_message, max_retries, host_id):
"""
Throws if it runs out of retries. If it returns, action succeeded.
"""
created_okay = False
retry_count = 0
while not created_okay and retry_count <= max_retries:
try:
await func(partition_id)
created_okay = True
except Exception as err: # pylint: disable=broad-except
_logger.error("%r %r %r %r", retry_message, host_id, partition_id, err)
retry_count += 1
if not created_okay:
raise Exception(host_id, final_failure_message) | Throws if it runs out of retries. If it returns, action succeeded. | Below is the the instruction that describes the task:
### Input:
Throws if it runs out of retries. If it returns, action succeeded.
### Response:
async def retry_async(self, func, partition_id, retry_message,
final_failure_message, max_retries, host_id):
"""
Throws if it runs out of retries. If it returns, action succeeded.
"""
created_okay = False
retry_count = 0
while not created_okay and retry_count <= max_retries:
try:
await func(partition_id)
created_okay = True
except Exception as err: # pylint: disable=broad-except
_logger.error("%r %r %r %r", retry_message, host_id, partition_id, err)
retry_count += 1
if not created_okay:
raise Exception(host_id, final_failure_message) |
def stack_call(self, *args):
"""Stacks a redis command inside the object.
The syntax is the same than the call() method a Client class.
Args:
*args: full redis command as variable length argument list.
Examples:
>>> pipeline = Pipeline()
>>> pipeline.stack_call("HSET", "key", "field", "value")
>>> pipeline.stack_call("PING")
>>> pipeline.stack_call("INCR", "key2")
"""
self.pipelined_args.append(args)
self.number_of_stacked_calls = self.number_of_stacked_calls + 1 | Stacks a redis command inside the object.
The syntax is the same than the call() method a Client class.
Args:
*args: full redis command as variable length argument list.
Examples:
>>> pipeline = Pipeline()
>>> pipeline.stack_call("HSET", "key", "field", "value")
>>> pipeline.stack_call("PING")
>>> pipeline.stack_call("INCR", "key2") | Below is the the instruction that describes the task:
### Input:
Stacks a redis command inside the object.
The syntax is the same than the call() method a Client class.
Args:
*args: full redis command as variable length argument list.
Examples:
>>> pipeline = Pipeline()
>>> pipeline.stack_call("HSET", "key", "field", "value")
>>> pipeline.stack_call("PING")
>>> pipeline.stack_call("INCR", "key2")
### Response:
def stack_call(self, *args):
"""Stacks a redis command inside the object.
The syntax is the same than the call() method a Client class.
Args:
*args: full redis command as variable length argument list.
Examples:
>>> pipeline = Pipeline()
>>> pipeline.stack_call("HSET", "key", "field", "value")
>>> pipeline.stack_call("PING")
>>> pipeline.stack_call("INCR", "key2")
"""
self.pipelined_args.append(args)
self.number_of_stacked_calls = self.number_of_stacked_calls + 1 |
def get_keys(self, transport, bucket, timeout=None):
"""
get_keys(bucket, timeout=None)
Lists all keys in a bucket.
.. warning:: Do not use this in production, as it requires
traversing through all keys stored in a cluster.
.. note:: This request is automatically retried :attr:`retries`
times if it fails due to network error.
:param bucket: the bucket whose keys are fetched
:type bucket: RiakBucket
:param timeout: a timeout value in milliseconds
:type timeout: int
:rtype: list
"""
if not riak.disable_list_exceptions:
raise ListError()
_validate_timeout(timeout)
return transport.get_keys(bucket, timeout=timeout) | get_keys(bucket, timeout=None)
Lists all keys in a bucket.
.. warning:: Do not use this in production, as it requires
traversing through all keys stored in a cluster.
.. note:: This request is automatically retried :attr:`retries`
times if it fails due to network error.
:param bucket: the bucket whose keys are fetched
:type bucket: RiakBucket
:param timeout: a timeout value in milliseconds
:type timeout: int
:rtype: list | Below is the the instruction that describes the task:
### Input:
get_keys(bucket, timeout=None)
Lists all keys in a bucket.
.. warning:: Do not use this in production, as it requires
traversing through all keys stored in a cluster.
.. note:: This request is automatically retried :attr:`retries`
times if it fails due to network error.
:param bucket: the bucket whose keys are fetched
:type bucket: RiakBucket
:param timeout: a timeout value in milliseconds
:type timeout: int
:rtype: list
### Response:
def get_keys(self, transport, bucket, timeout=None):
"""
get_keys(bucket, timeout=None)
Lists all keys in a bucket.
.. warning:: Do not use this in production, as it requires
traversing through all keys stored in a cluster.
.. note:: This request is automatically retried :attr:`retries`
times if it fails due to network error.
:param bucket: the bucket whose keys are fetched
:type bucket: RiakBucket
:param timeout: a timeout value in milliseconds
:type timeout: int
:rtype: list
"""
if not riak.disable_list_exceptions:
raise ListError()
_validate_timeout(timeout)
return transport.get_keys(bucket, timeout=timeout) |
def obfn_gvar(self):
"""This method is inserted into the inner cbpdn object,
replacing its own obfn_gvar method, thereby providing a hook for
applying the additional steps necessary for the AMS method.
"""
# Get inner cbpdn object gvar
gv = self.inner_obfn_gvar().copy()
# Set slice corresponding to the coefficient map of the final
# filter (the impulse inserted for the AMS method) to zero so
# that it does not affect the results (e.g. l1 norm) computed
# from this variable by the inner cbpdn object
gv[..., -self.cri.Cd:] = 0
return gv | This method is inserted into the inner cbpdn object,
replacing its own obfn_gvar method, thereby providing a hook for
applying the additional steps necessary for the AMS method. | Below is the the instruction that describes the task:
### Input:
This method is inserted into the inner cbpdn object,
replacing its own obfn_gvar method, thereby providing a hook for
applying the additional steps necessary for the AMS method.
### Response:
def obfn_gvar(self):
"""This method is inserted into the inner cbpdn object,
replacing its own obfn_gvar method, thereby providing a hook for
applying the additional steps necessary for the AMS method.
"""
# Get inner cbpdn object gvar
gv = self.inner_obfn_gvar().copy()
# Set slice corresponding to the coefficient map of the final
# filter (the impulse inserted for the AMS method) to zero so
# that it does not affect the results (e.g. l1 norm) computed
# from this variable by the inner cbpdn object
gv[..., -self.cri.Cd:] = 0
return gv |
def is_same_file(path1, path2):
"""Return True if path1 is the same file as path2.
The reason for this dance is that samefile throws if either file doesn't
exist.
Args:
path1: str or path-like.
path2: str or path-like.
Returns:
bool. True if the same file, False if not.
"""
return (
path1 and path2
and os.path.isfile(path1) and os.path.isfile(path2)
and os.path.samefile(path1, path2)) | Return True if path1 is the same file as path2.
The reason for this dance is that samefile throws if either file doesn't
exist.
Args:
path1: str or path-like.
path2: str or path-like.
Returns:
bool. True if the same file, False if not. | Below is the the instruction that describes the task:
### Input:
Return True if path1 is the same file as path2.
The reason for this dance is that samefile throws if either file doesn't
exist.
Args:
path1: str or path-like.
path2: str or path-like.
Returns:
bool. True if the same file, False if not.
### Response:
def is_same_file(path1, path2):
"""Return True if path1 is the same file as path2.
The reason for this dance is that samefile throws if either file doesn't
exist.
Args:
path1: str or path-like.
path2: str or path-like.
Returns:
bool. True if the same file, False if not.
"""
return (
path1 and path2
and os.path.isfile(path1) and os.path.isfile(path2)
and os.path.samefile(path1, path2)) |
def getRoom(self, _id):
""" Retrieve a room from it's id """
if SockJSRoomHandler._room.has_key(self._gcls() + _id):
return SockJSRoomHandler._room[self._gcls() + _id]
return None | Retrieve a room from it's id | Below is the the instruction that describes the task:
### Input:
Retrieve a room from it's id
### Response:
def getRoom(self, _id):
""" Retrieve a room from it's id """
if SockJSRoomHandler._room.has_key(self._gcls() + _id):
return SockJSRoomHandler._room[self._gcls() + _id]
return None |
def modified_lines(filename, extra_data, commit=None):
"""Returns the lines that have been modifed for this file.
Args:
filename: the file to check.
extra_data: is the extra_data returned by modified_files. Additionally, a
value of None means that the file was not modified.
commit: the complete sha1 (40 chars) of the commit. Note that specifying
this value will only work (100%) when commit == last_commit (with
respect to the currently checked out revision), otherwise, we could miss
some lines.
Returns: a list of lines that were modified, or None in case all lines are
new.
"""
if extra_data is None:
return []
if extra_data != 'M':
return None
command = ['hg', 'diff', '-U', '0']
if commit:
command.append('--change=%s' % commit)
command.append(filename)
# Split as bytes, as the output may have some non unicode characters.
diff_lines = subprocess.check_output(command).split(
os.linesep.encode('utf-8'))
diff_line_numbers = utils.filter_lines(
diff_lines,
br'@@ -\d+,\d+ \+(?P<start_line>\d+),(?P<lines>\d+) @@',
groups=('start_line', 'lines'))
modified_line_numbers = []
for start_line, lines in diff_line_numbers:
start_line = int(start_line)
lines = int(lines)
modified_line_numbers.extend(range(start_line, start_line + lines))
return modified_line_numbers | Returns the lines that have been modifed for this file.
Args:
filename: the file to check.
extra_data: is the extra_data returned by modified_files. Additionally, a
value of None means that the file was not modified.
commit: the complete sha1 (40 chars) of the commit. Note that specifying
this value will only work (100%) when commit == last_commit (with
respect to the currently checked out revision), otherwise, we could miss
some lines.
Returns: a list of lines that were modified, or None in case all lines are
new. | Below is the the instruction that describes the task:
### Input:
Returns the lines that have been modifed for this file.
Args:
filename: the file to check.
extra_data: is the extra_data returned by modified_files. Additionally, a
value of None means that the file was not modified.
commit: the complete sha1 (40 chars) of the commit. Note that specifying
this value will only work (100%) when commit == last_commit (with
respect to the currently checked out revision), otherwise, we could miss
some lines.
Returns: a list of lines that were modified, or None in case all lines are
new.
### Response:
def modified_lines(filename, extra_data, commit=None):
"""Returns the lines that have been modifed for this file.
Args:
filename: the file to check.
extra_data: is the extra_data returned by modified_files. Additionally, a
value of None means that the file was not modified.
commit: the complete sha1 (40 chars) of the commit. Note that specifying
this value will only work (100%) when commit == last_commit (with
respect to the currently checked out revision), otherwise, we could miss
some lines.
Returns: a list of lines that were modified, or None in case all lines are
new.
"""
if extra_data is None:
return []
if extra_data != 'M':
return None
command = ['hg', 'diff', '-U', '0']
if commit:
command.append('--change=%s' % commit)
command.append(filename)
# Split as bytes, as the output may have some non unicode characters.
diff_lines = subprocess.check_output(command).split(
os.linesep.encode('utf-8'))
diff_line_numbers = utils.filter_lines(
diff_lines,
br'@@ -\d+,\d+ \+(?P<start_line>\d+),(?P<lines>\d+) @@',
groups=('start_line', 'lines'))
modified_line_numbers = []
for start_line, lines in diff_line_numbers:
start_line = int(start_line)
lines = int(lines)
modified_line_numbers.extend(range(start_line, start_line + lines))
return modified_line_numbers |
def compare_pointer(self, data):
'''
Compares the string data
@return: True if the data is different
'''
if self.old_pointed != data:
self.old_pointed = data
return True
return False | Compares the string data
@return: True if the data is different | Below is the the instruction that describes the task:
### Input:
Compares the string data
@return: True if the data is different
### Response:
def compare_pointer(self, data):
'''
Compares the string data
@return: True if the data is different
'''
if self.old_pointed != data:
self.old_pointed = data
return True
return False |
def isexec(path):
'''
Check if given path points to an executable file.
:param path: file path
:type path: str
:return: True if executable, False otherwise
:rtype: bool
'''
return os.path.isfile(path) and os.access(path, os.X_OK) | Check if given path points to an executable file.
:param path: file path
:type path: str
:return: True if executable, False otherwise
:rtype: bool | Below is the the instruction that describes the task:
### Input:
Check if given path points to an executable file.
:param path: file path
:type path: str
:return: True if executable, False otherwise
:rtype: bool
### Response:
def isexec(path):
'''
Check if given path points to an executable file.
:param path: file path
:type path: str
:return: True if executable, False otherwise
:rtype: bool
'''
return os.path.isfile(path) and os.access(path, os.X_OK) |
def isometric_remesh(script, SamplingRate=10):
"""Isometric parameterization: remeshing
"""
filter_xml = ''.join([
' <filter name="Iso Parametrization Remeshing">\n',
' <Param name="SamplingRate"',
'value="%d"' % SamplingRate,
'description="Sampling Rate"',
'type="RichInt"',
'tooltip="This specify the sampling rate for remeshing."',
'/>\n',
' </filter>\n'])
util.write_filter(script, filter_xml)
return None | Isometric parameterization: remeshing | Below is the the instruction that describes the task:
### Input:
Isometric parameterization: remeshing
### Response:
def isometric_remesh(script, SamplingRate=10):
"""Isometric parameterization: remeshing
"""
filter_xml = ''.join([
' <filter name="Iso Parametrization Remeshing">\n',
' <Param name="SamplingRate"',
'value="%d"' % SamplingRate,
'description="Sampling Rate"',
'type="RichInt"',
'tooltip="This specify the sampling rate for remeshing."',
'/>\n',
' </filter>\n'])
util.write_filter(script, filter_xml)
return None |
def get_version(release_level=True):
"""
Return the formatted version information
"""
vers = ["%(major)i.%(minor)i.%(micro)i" % __version_info__]
if release_level and __version_info__['releaselevel'] != 'final':
vers.append('%(releaselevel)s%(serial)i' % __version_info__)
return ''.join(vers) | Return the formatted version information | Below is the the instruction that describes the task:
### Input:
Return the formatted version information
### Response:
def get_version(release_level=True):
"""
Return the formatted version information
"""
vers = ["%(major)i.%(minor)i.%(micro)i" % __version_info__]
if release_level and __version_info__['releaselevel'] != 'final':
vers.append('%(releaselevel)s%(serial)i' % __version_info__)
return ''.join(vers) |
def asAccessibleTo(self, query):
"""
@param query: An Axiom query describing the Items to retrieve, which this
role can access.
@type query: an L{iaxiom.IQuery} provider.
@return: an iterable which yields the shared proxies that are available
to the given role, from the given query.
"""
# XXX TODO #2371: this method really *should* be returning an L{IQuery}
# provider as well, but that is kind of tricky to do. Currently, doing
# queries leaks authority, because the resulting objects have stores
# and "real" items as part of their interface; having this be a "real"
# query provider would obviate the need to escape the L{SharedProxy}
# security constraints in order to do any querying.
allRoles = list(self.allRoles())
count = 0
unlimited = query.cloneQuery(limit=None)
for result in unlimited:
allShares = list(query.store.query(
Share,
AND(Share.sharedItem == result,
Share.sharedTo.oneOf(allRoles))))
interfaces = []
for share in allShares:
interfaces += share.sharedInterfaces
if allShares:
count += 1
yield SharedProxy(result, interfaces, allShares[0].shareID)
if count == query.limit:
return | @param query: An Axiom query describing the Items to retrieve, which this
role can access.
@type query: an L{iaxiom.IQuery} provider.
@return: an iterable which yields the shared proxies that are available
to the given role, from the given query. | Below is the the instruction that describes the task:
### Input:
@param query: An Axiom query describing the Items to retrieve, which this
role can access.
@type query: an L{iaxiom.IQuery} provider.
@return: an iterable which yields the shared proxies that are available
to the given role, from the given query.
### Response:
def asAccessibleTo(self, query):
"""
@param query: An Axiom query describing the Items to retrieve, which this
role can access.
@type query: an L{iaxiom.IQuery} provider.
@return: an iterable which yields the shared proxies that are available
to the given role, from the given query.
"""
# XXX TODO #2371: this method really *should* be returning an L{IQuery}
# provider as well, but that is kind of tricky to do. Currently, doing
# queries leaks authority, because the resulting objects have stores
# and "real" items as part of their interface; having this be a "real"
# query provider would obviate the need to escape the L{SharedProxy}
# security constraints in order to do any querying.
allRoles = list(self.allRoles())
count = 0
unlimited = query.cloneQuery(limit=None)
for result in unlimited:
allShares = list(query.store.query(
Share,
AND(Share.sharedItem == result,
Share.sharedTo.oneOf(allRoles))))
interfaces = []
for share in allShares:
interfaces += share.sharedInterfaces
if allShares:
count += 1
yield SharedProxy(result, interfaces, allShares[0].shareID)
if count == query.limit:
return |
def _write_image_description(self):
"""Write metadata to ImageDescription tag."""
if (not self._datashape or self._datashape[0] == 1 or
self._descriptionoffset <= 0):
return
colormapped = self._colormap is not None
if self._imagej:
isrgb = self._shape[-1] in (3, 4)
description = imagej_description(
self._datashape, isrgb, colormapped, **self._metadata)
else:
description = json_description(self._datashape, **self._metadata)
# rewrite description and its length to file
description = description.encode('utf-8')
description = description[:self._descriptionlen-1]
pos = self._fh.tell()
self._fh.seek(self._descriptionoffset)
self._fh.write(description)
self._fh.seek(self._descriptionlenoffset)
self._fh.write(struct.pack(self._byteorder+self._offsetformat,
len(description)+1))
self._fh.seek(pos)
self._descriptionoffset = 0
self._descriptionlenoffset = 0
self._descriptionlen = 0 | Write metadata to ImageDescription tag. | Below is the the instruction that describes the task:
### Input:
Write metadata to ImageDescription tag.
### Response:
def _write_image_description(self):
"""Write metadata to ImageDescription tag."""
if (not self._datashape or self._datashape[0] == 1 or
self._descriptionoffset <= 0):
return
colormapped = self._colormap is not None
if self._imagej:
isrgb = self._shape[-1] in (3, 4)
description = imagej_description(
self._datashape, isrgb, colormapped, **self._metadata)
else:
description = json_description(self._datashape, **self._metadata)
# rewrite description and its length to file
description = description.encode('utf-8')
description = description[:self._descriptionlen-1]
pos = self._fh.tell()
self._fh.seek(self._descriptionoffset)
self._fh.write(description)
self._fh.seek(self._descriptionlenoffset)
self._fh.write(struct.pack(self._byteorder+self._offsetformat,
len(description)+1))
self._fh.seek(pos)
self._descriptionoffset = 0
self._descriptionlenoffset = 0
self._descriptionlen = 0 |
def shuffle_srv(records):
"""Randomly reorder SRV records using their weights.
:Parameters:
- `records`: SRV records to shuffle.
:Types:
- `records`: sequence of :dns:`dns.rdtypes.IN.SRV`
:return: reordered records.
:returntype: `list` of :dns:`dns.rdtypes.IN.SRV`"""
if not records:
return []
ret = []
while len(records) > 1:
weight_sum = 0
for rrecord in records:
weight_sum += rrecord.weight + 0.1
thres = random.random() * weight_sum
weight_sum = 0
for rrecord in records:
weight_sum += rrecord.weight + 0.1
if thres < weight_sum:
records.remove(rrecord)
ret.append(rrecord)
break
ret.append(records[0])
return ret | Randomly reorder SRV records using their weights.
:Parameters:
- `records`: SRV records to shuffle.
:Types:
- `records`: sequence of :dns:`dns.rdtypes.IN.SRV`
:return: reordered records.
:returntype: `list` of :dns:`dns.rdtypes.IN.SRV` | Below is the the instruction that describes the task:
### Input:
Randomly reorder SRV records using their weights.
:Parameters:
- `records`: SRV records to shuffle.
:Types:
- `records`: sequence of :dns:`dns.rdtypes.IN.SRV`
:return: reordered records.
:returntype: `list` of :dns:`dns.rdtypes.IN.SRV`
### Response:
def shuffle_srv(records):
"""Randomly reorder SRV records using their weights.
:Parameters:
- `records`: SRV records to shuffle.
:Types:
- `records`: sequence of :dns:`dns.rdtypes.IN.SRV`
:return: reordered records.
:returntype: `list` of :dns:`dns.rdtypes.IN.SRV`"""
if not records:
return []
ret = []
while len(records) > 1:
weight_sum = 0
for rrecord in records:
weight_sum += rrecord.weight + 0.1
thres = random.random() * weight_sum
weight_sum = 0
for rrecord in records:
weight_sum += rrecord.weight + 0.1
if thres < weight_sum:
records.remove(rrecord)
ret.append(rrecord)
break
ret.append(records[0])
return ret |
def build_interpolators(self):
"""Compute 1-D interpolation functions for all the transforms so they're continuous.."""
self.phi_continuous = []
for xi, phii in zip(self.ace.x, self.ace.x_transforms):
self.phi_continuous.append(interp1d(xi, phii))
self.inverse_theta_continuous = interp1d(self.ace.y_transform, self.ace.y) | Compute 1-D interpolation functions for all the transforms so they're continuous.. | Below is the the instruction that describes the task:
### Input:
Compute 1-D interpolation functions for all the transforms so they're continuous..
### Response:
def build_interpolators(self):
"""Compute 1-D interpolation functions for all the transforms so they're continuous.."""
self.phi_continuous = []
for xi, phii in zip(self.ace.x, self.ace.x_transforms):
self.phi_continuous.append(interp1d(xi, phii))
self.inverse_theta_continuous = interp1d(self.ace.y_transform, self.ace.y) |
def build_directory(
sass_path, css_path, output_style='nested',
_root_sass=None, _root_css=None, strip_extension=False,
):
"""Compiles all Sass/SCSS files in ``path`` to CSS.
:param sass_path: the path of the directory which contains source files
to compile
:type sass_path: :class:`str`, :class:`basestring`
:param css_path: the path of the directory compiled CSS files will go
:type css_path: :class:`str`, :class:`basestring`
:param output_style: an optional coding style of the compiled result.
choose one of: ``'nested'`` (default), ``'expanded'``,
``'compact'``, ``'compressed'``
:type output_style: :class:`str`
:returns: a dictionary of source filenames to compiled CSS filenames
:rtype: :class:`collections.abc.Mapping`
.. versionadded:: 0.6.0
The ``output_style`` parameter.
"""
if _root_sass is None or _root_css is None:
_root_sass = sass_path
_root_css = css_path
result = {}
if not os.path.isdir(css_path):
os.mkdir(css_path)
for name in os.listdir(sass_path):
sass_fullname = os.path.join(sass_path, name)
if SUFFIX_PATTERN.search(name) and os.path.isfile(sass_fullname):
if name[0] == '_':
# Do not compile if it's partial
continue
if strip_extension:
name, _ = os.path.splitext(name)
css_fullname = os.path.join(css_path, name) + '.css'
css = compile(
filename=sass_fullname,
output_style=output_style,
include_paths=[_root_sass],
)
with io.open(
css_fullname, 'w', encoding='utf-8', newline='',
) as css_file:
css_file.write(css)
result[os.path.relpath(sass_fullname, _root_sass)] = \
os.path.relpath(css_fullname, _root_css)
elif os.path.isdir(sass_fullname):
css_fullname = os.path.join(css_path, name)
subresult = build_directory(
sass_fullname, css_fullname,
output_style=output_style,
_root_sass=_root_sass,
_root_css=_root_css,
strip_extension=strip_extension,
)
result.update(subresult)
return result | Compiles all Sass/SCSS files in ``path`` to CSS.
:param sass_path: the path of the directory which contains source files
to compile
:type sass_path: :class:`str`, :class:`basestring`
:param css_path: the path of the directory compiled CSS files will go
:type css_path: :class:`str`, :class:`basestring`
:param output_style: an optional coding style of the compiled result.
choose one of: ``'nested'`` (default), ``'expanded'``,
``'compact'``, ``'compressed'``
:type output_style: :class:`str`
:returns: a dictionary of source filenames to compiled CSS filenames
:rtype: :class:`collections.abc.Mapping`
.. versionadded:: 0.6.0
The ``output_style`` parameter. | Below is the the instruction that describes the task:
### Input:
Compiles all Sass/SCSS files in ``path`` to CSS.
:param sass_path: the path of the directory which contains source files
to compile
:type sass_path: :class:`str`, :class:`basestring`
:param css_path: the path of the directory compiled CSS files will go
:type css_path: :class:`str`, :class:`basestring`
:param output_style: an optional coding style of the compiled result.
choose one of: ``'nested'`` (default), ``'expanded'``,
``'compact'``, ``'compressed'``
:type output_style: :class:`str`
:returns: a dictionary of source filenames to compiled CSS filenames
:rtype: :class:`collections.abc.Mapping`
.. versionadded:: 0.6.0
The ``output_style`` parameter.
### Response:
def build_directory(
sass_path, css_path, output_style='nested',
_root_sass=None, _root_css=None, strip_extension=False,
):
"""Compiles all Sass/SCSS files in ``path`` to CSS.
:param sass_path: the path of the directory which contains source files
to compile
:type sass_path: :class:`str`, :class:`basestring`
:param css_path: the path of the directory compiled CSS files will go
:type css_path: :class:`str`, :class:`basestring`
:param output_style: an optional coding style of the compiled result.
choose one of: ``'nested'`` (default), ``'expanded'``,
``'compact'``, ``'compressed'``
:type output_style: :class:`str`
:returns: a dictionary of source filenames to compiled CSS filenames
:rtype: :class:`collections.abc.Mapping`
.. versionadded:: 0.6.0
The ``output_style`` parameter.
"""
if _root_sass is None or _root_css is None:
_root_sass = sass_path
_root_css = css_path
result = {}
if not os.path.isdir(css_path):
os.mkdir(css_path)
for name in os.listdir(sass_path):
sass_fullname = os.path.join(sass_path, name)
if SUFFIX_PATTERN.search(name) and os.path.isfile(sass_fullname):
if name[0] == '_':
# Do not compile if it's partial
continue
if strip_extension:
name, _ = os.path.splitext(name)
css_fullname = os.path.join(css_path, name) + '.css'
css = compile(
filename=sass_fullname,
output_style=output_style,
include_paths=[_root_sass],
)
with io.open(
css_fullname, 'w', encoding='utf-8', newline='',
) as css_file:
css_file.write(css)
result[os.path.relpath(sass_fullname, _root_sass)] = \
os.path.relpath(css_fullname, _root_css)
elif os.path.isdir(sass_fullname):
css_fullname = os.path.join(css_path, name)
subresult = build_directory(
sass_fullname, css_fullname,
output_style=output_style,
_root_sass=_root_sass,
_root_css=_root_css,
strip_extension=strip_extension,
)
result.update(subresult)
return result |
def acs2d(input, exec_path='', time_stamps=False, verbose=False, quiet=False,
exe_args=None):
r"""
Run the acs2d.e executable as from the shell.
Output is automatically named based on input suffix:
+--------------------+----------------+------------------------------+
| INPUT | OUTPUT | EXPECTED DATA |
+====================+================+==============================+
| ``*_raw.fits`` | ``*_flt.fits`` | SBC image. |
+--------------------+----------------+------------------------------+
| ``*_blv_tmp.fits`` | ``*_flt.fits`` | ACSCCD output. |
+--------------------+----------------+------------------------------+
| ``*_blc_tmp.fits`` | ``*_flc.fits`` | ACSCCD output with PCTECORR. |
+--------------------+----------------+------------------------------+
| ``*_crj_tmp.fits`` | ``*_crj.fits`` | ACSREJ output. |
+--------------------+----------------+------------------------------+
| ``*_crc_tmp.fits`` | ``*_crc.fits`` | ACSREJ output with PCTECORR. |
+--------------------+----------------+------------------------------+
Parameters
----------
input : str or list of str
Input filenames in one of these formats:
* a single filename ('j1234567q_blv_tmp.fits')
* a Python list of filenames
* a partial filename with wildcards ('\*blv_tmp.fits')
* filename of an ASN table ('j12345670_asn.fits')
* an at-file (``@input``)
exec_path : str, optional
The complete path to ACS2D executable.
If not given, run ACS2D given by 'acs2d.e'.
time_stamps : bool, optional
Set to True to turn on the printing of time stamps.
verbose : bool, optional
Set to True for verbose output.
quiet : bool, optional
Set to True for quiet output.
exe_args : list, optional
Arbitrary arguments passed to underlying executable call.
Note: Implementation uses subprocess.call and whitespace is not
permitted. E.g. use exe_args=['--nThreads', '1']
"""
from stsci.tools import parseinput # Optional package dependency
if exec_path:
if not os.path.exists(exec_path):
raise OSError('Executable not found: ' + exec_path)
call_list = [exec_path]
else:
call_list = ['acs2d.e']
# Parse input to get list of filenames to process.
# acs2d.e only takes 'file1,file2,...'
infiles, dummy_out = parseinput.parseinput(input)
call_list.append(','.join(infiles))
if time_stamps:
call_list.append('-t')
if verbose:
call_list.append('-v')
if quiet:
call_list.append('-q')
if exe_args:
call_list.extend(exe_args)
subprocess.check_call(call_list) | r"""
Run the acs2d.e executable as from the shell.
Output is automatically named based on input suffix:
+--------------------+----------------+------------------------------+
| INPUT | OUTPUT | EXPECTED DATA |
+====================+================+==============================+
| ``*_raw.fits`` | ``*_flt.fits`` | SBC image. |
+--------------------+----------------+------------------------------+
| ``*_blv_tmp.fits`` | ``*_flt.fits`` | ACSCCD output. |
+--------------------+----------------+------------------------------+
| ``*_blc_tmp.fits`` | ``*_flc.fits`` | ACSCCD output with PCTECORR. |
+--------------------+----------------+------------------------------+
| ``*_crj_tmp.fits`` | ``*_crj.fits`` | ACSREJ output. |
+--------------------+----------------+------------------------------+
| ``*_crc_tmp.fits`` | ``*_crc.fits`` | ACSREJ output with PCTECORR. |
+--------------------+----------------+------------------------------+
Parameters
----------
input : str or list of str
Input filenames in one of these formats:
* a single filename ('j1234567q_blv_tmp.fits')
* a Python list of filenames
* a partial filename with wildcards ('\*blv_tmp.fits')
* filename of an ASN table ('j12345670_asn.fits')
* an at-file (``@input``)
exec_path : str, optional
The complete path to ACS2D executable.
If not given, run ACS2D given by 'acs2d.e'.
time_stamps : bool, optional
Set to True to turn on the printing of time stamps.
verbose : bool, optional
Set to True for verbose output.
quiet : bool, optional
Set to True for quiet output.
exe_args : list, optional
Arbitrary arguments passed to underlying executable call.
Note: Implementation uses subprocess.call and whitespace is not
permitted. E.g. use exe_args=['--nThreads', '1'] | Below is the the instruction that describes the task:
### Input:
r"""
Run the acs2d.e executable as from the shell.
Output is automatically named based on input suffix:
+--------------------+----------------+------------------------------+
| INPUT | OUTPUT | EXPECTED DATA |
+====================+================+==============================+
| ``*_raw.fits`` | ``*_flt.fits`` | SBC image. |
+--------------------+----------------+------------------------------+
| ``*_blv_tmp.fits`` | ``*_flt.fits`` | ACSCCD output. |
+--------------------+----------------+------------------------------+
| ``*_blc_tmp.fits`` | ``*_flc.fits`` | ACSCCD output with PCTECORR. |
+--------------------+----------------+------------------------------+
| ``*_crj_tmp.fits`` | ``*_crj.fits`` | ACSREJ output. |
+--------------------+----------------+------------------------------+
| ``*_crc_tmp.fits`` | ``*_crc.fits`` | ACSREJ output with PCTECORR. |
+--------------------+----------------+------------------------------+
Parameters
----------
input : str or list of str
Input filenames in one of these formats:
* a single filename ('j1234567q_blv_tmp.fits')
* a Python list of filenames
* a partial filename with wildcards ('\*blv_tmp.fits')
* filename of an ASN table ('j12345670_asn.fits')
* an at-file (``@input``)
exec_path : str, optional
The complete path to ACS2D executable.
If not given, run ACS2D given by 'acs2d.e'.
time_stamps : bool, optional
Set to True to turn on the printing of time stamps.
verbose : bool, optional
Set to True for verbose output.
quiet : bool, optional
Set to True for quiet output.
exe_args : list, optional
Arbitrary arguments passed to underlying executable call.
Note: Implementation uses subprocess.call and whitespace is not
permitted. E.g. use exe_args=['--nThreads', '1']
### Response:
def acs2d(input, exec_path='', time_stamps=False, verbose=False, quiet=False,
exe_args=None):
r"""
Run the acs2d.e executable as from the shell.
Output is automatically named based on input suffix:
+--------------------+----------------+------------------------------+
| INPUT | OUTPUT | EXPECTED DATA |
+====================+================+==============================+
| ``*_raw.fits`` | ``*_flt.fits`` | SBC image. |
+--------------------+----------------+------------------------------+
| ``*_blv_tmp.fits`` | ``*_flt.fits`` | ACSCCD output. |
+--------------------+----------------+------------------------------+
| ``*_blc_tmp.fits`` | ``*_flc.fits`` | ACSCCD output with PCTECORR. |
+--------------------+----------------+------------------------------+
| ``*_crj_tmp.fits`` | ``*_crj.fits`` | ACSREJ output. |
+--------------------+----------------+------------------------------+
| ``*_crc_tmp.fits`` | ``*_crc.fits`` | ACSREJ output with PCTECORR. |
+--------------------+----------------+------------------------------+
Parameters
----------
input : str or list of str
Input filenames in one of these formats:
* a single filename ('j1234567q_blv_tmp.fits')
* a Python list of filenames
* a partial filename with wildcards ('\*blv_tmp.fits')
* filename of an ASN table ('j12345670_asn.fits')
* an at-file (``@input``)
exec_path : str, optional
The complete path to ACS2D executable.
If not given, run ACS2D given by 'acs2d.e'.
time_stamps : bool, optional
Set to True to turn on the printing of time stamps.
verbose : bool, optional
Set to True for verbose output.
quiet : bool, optional
Set to True for quiet output.
exe_args : list, optional
Arbitrary arguments passed to underlying executable call.
Note: Implementation uses subprocess.call and whitespace is not
permitted. E.g. use exe_args=['--nThreads', '1']
"""
from stsci.tools import parseinput # Optional package dependency
if exec_path:
if not os.path.exists(exec_path):
raise OSError('Executable not found: ' + exec_path)
call_list = [exec_path]
else:
call_list = ['acs2d.e']
# Parse input to get list of filenames to process.
# acs2d.e only takes 'file1,file2,...'
infiles, dummy_out = parseinput.parseinput(input)
call_list.append(','.join(infiles))
if time_stamps:
call_list.append('-t')
if verbose:
call_list.append('-v')
if quiet:
call_list.append('-q')
if exe_args:
call_list.extend(exe_args)
subprocess.check_call(call_list) |
def _get_possible_query_bridging_contigs(self, nucmer_hits, log_fh=None, log_outprefix=None):
'''Input is dict qry_name -> list of nucmer hits to that qry. Returns dict qry_name -> tuple(start hit, end hit)'''
bridges = {}
writing_log_file = None not in [log_fh, log_outprefix]
for qry_name, hits_to_qry in nucmer_hits.items():
if len(hits_to_qry) < 2:
continue
if writing_log_file:
print(log_outprefix, '\t', qry_name, ': checking nucmer matches', sep='', file=log_fh)
longest_start_hit = self._get_longest_hit_at_qry_start(hits_to_qry)
longest_end_hit = self._get_longest_hit_at_qry_end(hits_to_qry)
if (
None in (longest_start_hit, longest_end_hit)
or longest_start_hit.ref_name == longest_end_hit.ref_name
or self._hits_have_same_reference(longest_start_hit, longest_end_hit)
):
if writing_log_file:
print(log_outprefix, '\t', qry_name, ': no potential pairs of hits to merge contigs', sep='', file=log_fh)
continue
if writing_log_file:
print(log_outprefix, '\t', qry_name, ': potential pair of hits to merge contigs...', sep='', file=log_fh)
print(log_outprefix, '\t', qry_name, ': ', longest_start_hit, sep='', file=log_fh)
print(log_outprefix, '\t', qry_name, ': ', longest_end_hit, sep='', file=log_fh)
shortest_hit_length = self._min_qry_hit_length([longest_start_hit, longest_end_hit])
has_longer_hit = self._has_qry_hit_longer_than(
hits_to_qry,
shortest_hit_length,
hits_to_exclude={longest_start_hit, longest_end_hit}
)
if has_longer_hit and writing_log_file:
print(log_outprefix, '\t', qry_name, ': rejected - there is a longer hit to elsewhere', sep='', file=log_fh)
orientation_ok = self._orientation_ok_to_bridge_contigs(longest_start_hit, longest_end_hit)
if writing_log_file and not orientation_ok:
print(log_outprefix, '\t', qry_name, ': rejected - orientation/distance from ends not correct to make a merge', sep='', file=log_fh)
if orientation_ok and not has_longer_hit:
if writing_log_file:
print(log_outprefix, '\t', qry_name, ': might be used - no longer hits elsewhere and orientation/distance to ends OK', sep='', file=log_fh)
bridges[qry_name] = (longest_start_hit, longest_end_hit)
return bridges | Input is dict qry_name -> list of nucmer hits to that qry. Returns dict qry_name -> tuple(start hit, end hit) | Below is the the instruction that describes the task:
### Input:
Input is dict qry_name -> list of nucmer hits to that qry. Returns dict qry_name -> tuple(start hit, end hit)
### Response:
def _get_possible_query_bridging_contigs(self, nucmer_hits, log_fh=None, log_outprefix=None):
'''Input is dict qry_name -> list of nucmer hits to that qry. Returns dict qry_name -> tuple(start hit, end hit)'''
bridges = {}
writing_log_file = None not in [log_fh, log_outprefix]
for qry_name, hits_to_qry in nucmer_hits.items():
if len(hits_to_qry) < 2:
continue
if writing_log_file:
print(log_outprefix, '\t', qry_name, ': checking nucmer matches', sep='', file=log_fh)
longest_start_hit = self._get_longest_hit_at_qry_start(hits_to_qry)
longest_end_hit = self._get_longest_hit_at_qry_end(hits_to_qry)
if (
None in (longest_start_hit, longest_end_hit)
or longest_start_hit.ref_name == longest_end_hit.ref_name
or self._hits_have_same_reference(longest_start_hit, longest_end_hit)
):
if writing_log_file:
print(log_outprefix, '\t', qry_name, ': no potential pairs of hits to merge contigs', sep='', file=log_fh)
continue
if writing_log_file:
print(log_outprefix, '\t', qry_name, ': potential pair of hits to merge contigs...', sep='', file=log_fh)
print(log_outprefix, '\t', qry_name, ': ', longest_start_hit, sep='', file=log_fh)
print(log_outprefix, '\t', qry_name, ': ', longest_end_hit, sep='', file=log_fh)
shortest_hit_length = self._min_qry_hit_length([longest_start_hit, longest_end_hit])
has_longer_hit = self._has_qry_hit_longer_than(
hits_to_qry,
shortest_hit_length,
hits_to_exclude={longest_start_hit, longest_end_hit}
)
if has_longer_hit and writing_log_file:
print(log_outprefix, '\t', qry_name, ': rejected - there is a longer hit to elsewhere', sep='', file=log_fh)
orientation_ok = self._orientation_ok_to_bridge_contigs(longest_start_hit, longest_end_hit)
if writing_log_file and not orientation_ok:
print(log_outprefix, '\t', qry_name, ': rejected - orientation/distance from ends not correct to make a merge', sep='', file=log_fh)
if orientation_ok and not has_longer_hit:
if writing_log_file:
print(log_outprefix, '\t', qry_name, ': might be used - no longer hits elsewhere and orientation/distance to ends OK', sep='', file=log_fh)
bridges[qry_name] = (longest_start_hit, longest_end_hit)
return bridges |
def pattern(self, value):
"""
Setter for **self.__pattern** attribute.
:param value: Attribute value.
:type value: unicode
"""
if value is not None:
assert type(value) in (unicode, QString), \
"'{0}' attribute: '{1}' type is not 'unicode' or 'QString'!".format("pattern", value)
self.__pattern = value | Setter for **self.__pattern** attribute.
:param value: Attribute value.
:type value: unicode | Below is the the instruction that describes the task:
### Input:
Setter for **self.__pattern** attribute.
:param value: Attribute value.
:type value: unicode
### Response:
def pattern(self, value):
"""
Setter for **self.__pattern** attribute.
:param value: Attribute value.
:type value: unicode
"""
if value is not None:
assert type(value) in (unicode, QString), \
"'{0}' attribute: '{1}' type is not 'unicode' or 'QString'!".format("pattern", value)
self.__pattern = value |
def qteEmulateKeypresses(self, keysequence):
"""
Emulate the Qt key presses that define ``keysequence``.
The method will put the keys into a queue and process them one
by one once the event loop is idle, ie. the event loop
executes all signals and macros associated with the emulated
key press first before the next one is emulated.
|Args|
* ``keysequence`` (**QtmacsKeysequence**): the key sequence to
emulate.
|Returns|
* **None**
|Raises|
* **QtmacsArgumentError** if at least one argument has an invalid type.
"""
# Convert the key sequence into a QtmacsKeysequence object, or
# raise an QtmacsOtherError if the conversion is impossible.
keysequence = QtmacsKeysequence(keysequence)
key_list = keysequence.toQKeyEventList()
# Do nothing if the key list is empty.
if len(key_list) > 0:
# Add the keys to the queue which the event timer will
# process.
for event in key_list:
self._qteKeyEmulationQueue.append(event) | Emulate the Qt key presses that define ``keysequence``.
The method will put the keys into a queue and process them one
by one once the event loop is idle, ie. the event loop
executes all signals and macros associated with the emulated
key press first before the next one is emulated.
|Args|
* ``keysequence`` (**QtmacsKeysequence**): the key sequence to
emulate.
|Returns|
* **None**
|Raises|
* **QtmacsArgumentError** if at least one argument has an invalid type. | Below is the the instruction that describes the task:
### Input:
Emulate the Qt key presses that define ``keysequence``.
The method will put the keys into a queue and process them one
by one once the event loop is idle, ie. the event loop
executes all signals and macros associated with the emulated
key press first before the next one is emulated.
|Args|
* ``keysequence`` (**QtmacsKeysequence**): the key sequence to
emulate.
|Returns|
* **None**
|Raises|
* **QtmacsArgumentError** if at least one argument has an invalid type.
### Response:
def qteEmulateKeypresses(self, keysequence):
"""
Emulate the Qt key presses that define ``keysequence``.
The method will put the keys into a queue and process them one
by one once the event loop is idle, ie. the event loop
executes all signals and macros associated with the emulated
key press first before the next one is emulated.
|Args|
* ``keysequence`` (**QtmacsKeysequence**): the key sequence to
emulate.
|Returns|
* **None**
|Raises|
* **QtmacsArgumentError** if at least one argument has an invalid type.
"""
# Convert the key sequence into a QtmacsKeysequence object, or
# raise an QtmacsOtherError if the conversion is impossible.
keysequence = QtmacsKeysequence(keysequence)
key_list = keysequence.toQKeyEventList()
# Do nothing if the key list is empty.
if len(key_list) > 0:
# Add the keys to the queue which the event timer will
# process.
for event in key_list:
self._qteKeyEmulationQueue.append(event) |
def regex_validation_sealer(fields, defaults, RegexType=type(re.compile(""))):
"""
Example sealer that just does regex-based validation.
"""
required = set(fields) - set(defaults)
if required:
raise TypeError(
"regex_validation_sealer doesn't support required arguments. Fields that need fixing: %s" % required)
klass = None
kwarg_validators = dict(
(key, val if isinstance(val, RegexType) else re.compile(val)) for key, val in defaults.items()
)
arg_validators = list(
kwarg_validators[key] for key in fields
)
def __init__(self, *args, **kwargs):
for pos, (value, validator) in enumerate(zip(args, arg_validators)):
if not validator.match(value):
raise ValidationError("Positional argument %s failed validation. %r doesn't match regex %r" % (
pos, value, validator.pattern
))
for key, value in kwargs.items():
if key in kwarg_validators:
validator = kwarg_validators[key]
if not validator.match(value):
raise ValidationError("Keyword argument %r failed validation. %r doesn't match regex %r" % (
key, value, validator.pattern
))
super(klass, self).__init__(*args, **kwargs)
klass = type("RegexValidateBase", (__base__,), dict(
__init__=__init__,
))
return klass | Example sealer that just does regex-based validation. | Below is the the instruction that describes the task:
### Input:
Example sealer that just does regex-based validation.
### Response:
def regex_validation_sealer(fields, defaults, RegexType=type(re.compile(""))):
"""
Example sealer that just does regex-based validation.
"""
required = set(fields) - set(defaults)
if required:
raise TypeError(
"regex_validation_sealer doesn't support required arguments. Fields that need fixing: %s" % required)
klass = None
kwarg_validators = dict(
(key, val if isinstance(val, RegexType) else re.compile(val)) for key, val in defaults.items()
)
arg_validators = list(
kwarg_validators[key] for key in fields
)
def __init__(self, *args, **kwargs):
for pos, (value, validator) in enumerate(zip(args, arg_validators)):
if not validator.match(value):
raise ValidationError("Positional argument %s failed validation. %r doesn't match regex %r" % (
pos, value, validator.pattern
))
for key, value in kwargs.items():
if key in kwarg_validators:
validator = kwarg_validators[key]
if not validator.match(value):
raise ValidationError("Keyword argument %r failed validation. %r doesn't match regex %r" % (
key, value, validator.pattern
))
super(klass, self).__init__(*args, **kwargs)
klass = type("RegexValidateBase", (__base__,), dict(
__init__=__init__,
))
return klass |
def add_status_parser(subparsers, parent_parser):
"""Adds argument parser for the status command
Args:
subparsers: Add parsers to this subparser object
parent_parser: The parent argparse.ArgumentParser object
"""
parser = subparsers.add_parser(
'status',
help='Displays information about validator status',
description="Provides a subcommand to show a validator\'s status")
grand_parsers = parser.add_subparsers(title='subcommands',
dest='subcommand')
grand_parsers.required = True
add_status_show_parser(grand_parsers, parent_parser) | Adds argument parser for the status command
Args:
subparsers: Add parsers to this subparser object
parent_parser: The parent argparse.ArgumentParser object | Below is the the instruction that describes the task:
### Input:
Adds argument parser for the status command
Args:
subparsers: Add parsers to this subparser object
parent_parser: The parent argparse.ArgumentParser object
### Response:
def add_status_parser(subparsers, parent_parser):
"""Adds argument parser for the status command
Args:
subparsers: Add parsers to this subparser object
parent_parser: The parent argparse.ArgumentParser object
"""
parser = subparsers.add_parser(
'status',
help='Displays information about validator status',
description="Provides a subcommand to show a validator\'s status")
grand_parsers = parser.add_subparsers(title='subcommands',
dest='subcommand')
grand_parsers.required = True
add_status_show_parser(grand_parsers, parent_parser) |
def create_notifications(users, notification_model, notification_type, related_object):
"""
create notifications in a background job to avoid slowing down users
"""
# shortcuts for readability
Notification = notification_model
# text
additional = related_object.__dict__ if related_object else ''
notification_text = TEXTS[notification_type] % additional
# loop users, notification settings check is done in Notification model
for user in users:
n = Notification(
to_user=user,
type=notification_type,
text=notification_text
)
# attach related object if present
if related_object:
n.related_object = related_object
# create notification and send according to user settings
n.save() | create notifications in a background job to avoid slowing down users | Below is the the instruction that describes the task:
### Input:
create notifications in a background job to avoid slowing down users
### Response:
def create_notifications(users, notification_model, notification_type, related_object):
"""
create notifications in a background job to avoid slowing down users
"""
# shortcuts for readability
Notification = notification_model
# text
additional = related_object.__dict__ if related_object else ''
notification_text = TEXTS[notification_type] % additional
# loop users, notification settings check is done in Notification model
for user in users:
n = Notification(
to_user=user,
type=notification_type,
text=notification_text
)
# attach related object if present
if related_object:
n.related_object = related_object
# create notification and send according to user settings
n.save() |
def add_install_button(self, grid_lang, row, column):
"""
Add button that opens the window for installing more assistants
"""
btn = self.button_with_label('<b>Install more...</b>')
if row == 0 and column == 0:
grid_lang.add(btn)
else:
grid_lang.attach(btn, column, row, 1, 1)
btn.connect("clicked", self.parent.install_btn_clicked)
return btn | Add button that opens the window for installing more assistants | Below is the the instruction that describes the task:
### Input:
Add button that opens the window for installing more assistants
### Response:
def add_install_button(self, grid_lang, row, column):
"""
Add button that opens the window for installing more assistants
"""
btn = self.button_with_label('<b>Install more...</b>')
if row == 0 and column == 0:
grid_lang.add(btn)
else:
grid_lang.attach(btn, column, row, 1, 1)
btn.connect("clicked", self.parent.install_btn_clicked)
return btn |
def teardown(self):
'''Teardown trust domain by removing trusted devices.'''
for device in self.devices:
self._remove_trustee(device)
self._populate_domain()
self.domain = {} | Teardown trust domain by removing trusted devices. | Below is the the instruction that describes the task:
### Input:
Teardown trust domain by removing trusted devices.
### Response:
def teardown(self):
'''Teardown trust domain by removing trusted devices.'''
for device in self.devices:
self._remove_trustee(device)
self._populate_domain()
self.domain = {} |
def get_embeddings_index(embedding_type='glove.42B.300d', embedding_dims=None, embedding_path=None, cache=True):
"""Retrieves embeddings index from embedding name or path. Will automatically download and cache as needed.
Args:
embedding_type: The embedding type to load.
embedding_path: Path to a local embedding to use instead of the embedding type. Ignores `embedding_type` if specified.
Returns:
The embeddings indexed by word.
"""
if embedding_path is not None:
embedding_type = embedding_path # identify embedding by path
embeddings_index = _EMBEDDINGS_CACHE.get(embedding_type)
if embeddings_index is not None:
return embeddings_index
if embedding_path is None:
embedding_type_obj = get_embedding_type(embedding_type)
# some very rough wrangling of zip files with the keras util `get_file`
# a special problem: when multiple files are in one zip file
extract = embedding_type_obj.get('extract', True)
file_path = get_file(
embedding_type_obj['file'], origin=embedding_type_obj['url'], extract=extract, cache_subdir='embeddings', file_hash=embedding_type_obj.get('file_hash',))
if 'file_in_zip' in embedding_type_obj:
zip_folder = file_path.split('.zip')[0]
with ZipFile(file_path, 'r') as zf:
zf.extractall(zip_folder)
file_path = os.path.join(
zip_folder, embedding_type_obj['file_in_zip'])
else:
if extract:
if file_path.endswith('.zip'):
file_path = file_path.split('.zip')[0]
# if file_path.endswith('.gz'):
# file_path = file_path.split('.gz')[0]
else:
file_path = embedding_path
embeddings_index = _build_embeddings_index(file_path, embedding_dims)
if cache:
_EMBEDDINGS_CACHE[embedding_type] = embeddings_index
return embeddings_index | Retrieves embeddings index from embedding name or path. Will automatically download and cache as needed.
Args:
embedding_type: The embedding type to load.
embedding_path: Path to a local embedding to use instead of the embedding type. Ignores `embedding_type` if specified.
Returns:
The embeddings indexed by word. | Below is the the instruction that describes the task:
### Input:
Retrieves embeddings index from embedding name or path. Will automatically download and cache as needed.
Args:
embedding_type: The embedding type to load.
embedding_path: Path to a local embedding to use instead of the embedding type. Ignores `embedding_type` if specified.
Returns:
The embeddings indexed by word.
### Response:
def get_embeddings_index(embedding_type='glove.42B.300d', embedding_dims=None, embedding_path=None, cache=True):
"""Retrieves embeddings index from embedding name or path. Will automatically download and cache as needed.
Args:
embedding_type: The embedding type to load.
embedding_path: Path to a local embedding to use instead of the embedding type. Ignores `embedding_type` if specified.
Returns:
The embeddings indexed by word.
"""
if embedding_path is not None:
embedding_type = embedding_path # identify embedding by path
embeddings_index = _EMBEDDINGS_CACHE.get(embedding_type)
if embeddings_index is not None:
return embeddings_index
if embedding_path is None:
embedding_type_obj = get_embedding_type(embedding_type)
# some very rough wrangling of zip files with the keras util `get_file`
# a special problem: when multiple files are in one zip file
extract = embedding_type_obj.get('extract', True)
file_path = get_file(
embedding_type_obj['file'], origin=embedding_type_obj['url'], extract=extract, cache_subdir='embeddings', file_hash=embedding_type_obj.get('file_hash',))
if 'file_in_zip' in embedding_type_obj:
zip_folder = file_path.split('.zip')[0]
with ZipFile(file_path, 'r') as zf:
zf.extractall(zip_folder)
file_path = os.path.join(
zip_folder, embedding_type_obj['file_in_zip'])
else:
if extract:
if file_path.endswith('.zip'):
file_path = file_path.split('.zip')[0]
# if file_path.endswith('.gz'):
# file_path = file_path.split('.gz')[0]
else:
file_path = embedding_path
embeddings_index = _build_embeddings_index(file_path, embedding_dims)
if cache:
_EMBEDDINGS_CACHE[embedding_type] = embeddings_index
return embeddings_index |
def _add_file(self, path, **params):
"""
Attempt to add a file to the system monitoring mechanism.
"""
log = self._getparam('log', self._discard, **params)
fd = None
try:
fd = os.open(path, os.O_RDONLY)
except Exception as e:
if not self.paths[path]:
log.error("Open failed on watched path %r -- %s", path, e, exc_info=log.isEnabledFor(logging.DEBUG))
raise e
elif path in self.paths_pending:
log.debug("path %r is still pending -- %s", path, e)
else:
self.paths_pending[path] = True
log.debug("Added %r to pending list after open failure -- %s", path, e)
return False
if self._mode == WF_KQUEUE:
log.debug("path %s opened as fd %d", path, fd)
try:
ev = select.kevent(fd,
filter=select.KQ_FILTER_VNODE,
flags=select.KQ_EV_ADD | select.KQ_EV_CLEAR,
fflags=select.KQ_NOTE_WRITE | select.KQ_NOTE_ATTRIB | select.KQ_NOTE_LINK |
select.KQ_NOTE_DELETE | select.KQ_NOTE_RENAME)
self._kq.control([ev], 0, 0)
except Exception as e:
log.error("kevent failed on watched path %r -- %s", path, e)
try: os.close(fd)
except: pass
raise e
elif self._mode == WF_INOTIFYX:
# inotify doesn't need the target paths open, so now it is known to be
# accessible, close the actual fd and use the watch-descriptor as the fd.
#
# However, due to an apparent simfs bug where inotify does not fire either
# IN_DELETE_SELF or IN_MOVE_SELF, we need to record the inode so that we
# can detect deletes and renames internally. simfs is used in containers.
#
try:
s = os.fstat(fd)
self._inx_inode[path] = s.st_ino
except Exception as e:
log.error("fstat(%d) failed on open path %r -- %s", fd, path, e)
try: os.close(fd)
except: pass
raise e
try: os.close(fd)
except: pass
try:
fd = pynotifyx.add_watch(self._inx_fd, path, self._inx_mask)
log.debug("path %s watched with wd %d", path, fd)
except Exception as e:
log.error("inotify failed on watched path %r -- %s", path, e)
raise e
elif self._mode == WF_POLLING:
log.debug("path %s opened as fd %d", path, fd)
fstate = self._poll_get_stat(fd, path)
if fstate:
self._poll_stat[fd] = fstate
self.paths_open[path] = fd
self.fds_open[fd] = path
return True | Attempt to add a file to the system monitoring mechanism. | Below is the the instruction that describes the task:
### Input:
Attempt to add a file to the system monitoring mechanism.
### Response:
def _add_file(self, path, **params):
"""
Attempt to add a file to the system monitoring mechanism.
"""
log = self._getparam('log', self._discard, **params)
fd = None
try:
fd = os.open(path, os.O_RDONLY)
except Exception as e:
if not self.paths[path]:
log.error("Open failed on watched path %r -- %s", path, e, exc_info=log.isEnabledFor(logging.DEBUG))
raise e
elif path in self.paths_pending:
log.debug("path %r is still pending -- %s", path, e)
else:
self.paths_pending[path] = True
log.debug("Added %r to pending list after open failure -- %s", path, e)
return False
if self._mode == WF_KQUEUE:
log.debug("path %s opened as fd %d", path, fd)
try:
ev = select.kevent(fd,
filter=select.KQ_FILTER_VNODE,
flags=select.KQ_EV_ADD | select.KQ_EV_CLEAR,
fflags=select.KQ_NOTE_WRITE | select.KQ_NOTE_ATTRIB | select.KQ_NOTE_LINK |
select.KQ_NOTE_DELETE | select.KQ_NOTE_RENAME)
self._kq.control([ev], 0, 0)
except Exception as e:
log.error("kevent failed on watched path %r -- %s", path, e)
try: os.close(fd)
except: pass
raise e
elif self._mode == WF_INOTIFYX:
# inotify doesn't need the target paths open, so now it is known to be
# accessible, close the actual fd and use the watch-descriptor as the fd.
#
# However, due to an apparent simfs bug where inotify does not fire either
# IN_DELETE_SELF or IN_MOVE_SELF, we need to record the inode so that we
# can detect deletes and renames internally. simfs is used in containers.
#
try:
s = os.fstat(fd)
self._inx_inode[path] = s.st_ino
except Exception as e:
log.error("fstat(%d) failed on open path %r -- %s", fd, path, e)
try: os.close(fd)
except: pass
raise e
try: os.close(fd)
except: pass
try:
fd = pynotifyx.add_watch(self._inx_fd, path, self._inx_mask)
log.debug("path %s watched with wd %d", path, fd)
except Exception as e:
log.error("inotify failed on watched path %r -- %s", path, e)
raise e
elif self._mode == WF_POLLING:
log.debug("path %s opened as fd %d", path, fd)
fstate = self._poll_get_stat(fd, path)
if fstate:
self._poll_stat[fd] = fstate
self.paths_open[path] = fd
self.fds_open[fd] = path
return True |
def set_relay_off(self):
"""Turn the relay off."""
if self.get_relay_state():
try:
request = requests.get(
'{}/relay'.format(self.resource), params={'state': '0'},
timeout=self.timeout)
if request.status_code == 200:
self.data['relay'] = False
except requests.exceptions.ConnectionError:
raise exceptions.MyStromConnectionError() | Turn the relay off. | Below is the the instruction that describes the task:
### Input:
Turn the relay off.
### Response:
def set_relay_off(self):
"""Turn the relay off."""
if self.get_relay_state():
try:
request = requests.get(
'{}/relay'.format(self.resource), params={'state': '0'},
timeout=self.timeout)
if request.status_code == 200:
self.data['relay'] = False
except requests.exceptions.ConnectionError:
raise exceptions.MyStromConnectionError() |
def lignesFichier(nf):
""" L'ensemble de lignes du fichier qui ne sont ni vides ni commentées.
* Les fichiers de Collatinus ont adopté le point d'exclamation
* en début de ligne pour introduire un commentaire.
* Ces lignes doivent être ignorées par le programme.
:param nf: Nom du fichier
:type nf: str
:yield: Ligne de fichier si ce n'est pas un commentaire
:ytype: str
"""
with open(nf) as file:
for line in file.readlines():
line = line.strip()
if line and not line.startswith("!") and not line.startswith("! --- "):
if "!" in line:
line, _ = tuple(line.split("!")) # Suprimer les commentaires
yield clean_double_diacritic(line) | L'ensemble de lignes du fichier qui ne sont ni vides ni commentées.
* Les fichiers de Collatinus ont adopté le point d'exclamation
* en début de ligne pour introduire un commentaire.
* Ces lignes doivent être ignorées par le programme.
:param nf: Nom du fichier
:type nf: str
:yield: Ligne de fichier si ce n'est pas un commentaire
:ytype: str | Below is the the instruction that describes the task:
### Input:
L'ensemble de lignes du fichier qui ne sont ni vides ni commentées.
* Les fichiers de Collatinus ont adopté le point d'exclamation
* en début de ligne pour introduire un commentaire.
* Ces lignes doivent être ignorées par le programme.
:param nf: Nom du fichier
:type nf: str
:yield: Ligne de fichier si ce n'est pas un commentaire
:ytype: str
### Response:
def lignesFichier(nf):
""" L'ensemble de lignes du fichier qui ne sont ni vides ni commentées.
* Les fichiers de Collatinus ont adopté le point d'exclamation
* en début de ligne pour introduire un commentaire.
* Ces lignes doivent être ignorées par le programme.
:param nf: Nom du fichier
:type nf: str
:yield: Ligne de fichier si ce n'est pas un commentaire
:ytype: str
"""
with open(nf) as file:
for line in file.readlines():
line = line.strip()
if line and not line.startswith("!") and not line.startswith("! --- "):
if "!" in line:
line, _ = tuple(line.split("!")) # Suprimer les commentaires
yield clean_double_diacritic(line) |
def project_sequence(s, permutation=None):
"""
Projects a point or sequence of points using `project_point` to lists xs, ys
for plotting with Matplotlib.
Parameters
----------
s, Sequence-like
The sequence of points (3-tuples) to be projected.
Returns
-------
xs, ys: The sequence of projected points in coordinates as two lists
"""
xs, ys = unzip([project_point(p, permutation=permutation) for p in s])
return xs, ys | Projects a point or sequence of points using `project_point` to lists xs, ys
for plotting with Matplotlib.
Parameters
----------
s, Sequence-like
The sequence of points (3-tuples) to be projected.
Returns
-------
xs, ys: The sequence of projected points in coordinates as two lists | Below is the the instruction that describes the task:
### Input:
Projects a point or sequence of points using `project_point` to lists xs, ys
for plotting with Matplotlib.
Parameters
----------
s, Sequence-like
The sequence of points (3-tuples) to be projected.
Returns
-------
xs, ys: The sequence of projected points in coordinates as two lists
### Response:
def project_sequence(s, permutation=None):
"""
Projects a point or sequence of points using `project_point` to lists xs, ys
for plotting with Matplotlib.
Parameters
----------
s, Sequence-like
The sequence of points (3-tuples) to be projected.
Returns
-------
xs, ys: The sequence of projected points in coordinates as two lists
"""
xs, ys = unzip([project_point(p, permutation=permutation) for p in s])
return xs, ys |
def _openResources(self):
""" Uses open the underlying file
"""
with Image.open(self._fileName) as image:
self._array = np.asarray(image)
self._bands = image.getbands()
# Fill attributes. For now assume that the info item are not overridden by
# the Image items.
self._attributes = dict(image.info)
self._attributes['Format'] = image.format
self._attributes['Mode'] = image.mode
self._attributes['Size'] = image.size
self._attributes['Width'] = image.width
self._attributes['Height'] = image.height | Uses open the underlying file | Below is the the instruction that describes the task:
### Input:
Uses open the underlying file
### Response:
def _openResources(self):
""" Uses open the underlying file
"""
with Image.open(self._fileName) as image:
self._array = np.asarray(image)
self._bands = image.getbands()
# Fill attributes. For now assume that the info item are not overridden by
# the Image items.
self._attributes = dict(image.info)
self._attributes['Format'] = image.format
self._attributes['Mode'] = image.mode
self._attributes['Size'] = image.size
self._attributes['Width'] = image.width
self._attributes['Height'] = image.height |
def unpack(self):
"""Decompose a GVariant into a native Python object."""
LEAF_ACCESSORS = {
'b': self.get_boolean,
'y': self.get_byte,
'n': self.get_int16,
'q': self.get_uint16,
'i': self.get_int32,
'u': self.get_uint32,
'x': self.get_int64,
't': self.get_uint64,
'h': self.get_handle,
'd': self.get_double,
's': self.get_string,
'o': self.get_string, # object path
'g': self.get_string, # signature
}
# simple values
la = LEAF_ACCESSORS.get(self.get_type_string())
if la:
return la()
# tuple
if self.get_type_string().startswith('('):
res = [self.get_child_value(i).unpack()
for i in range(self.n_children())]
return tuple(res)
# dictionary
if self.get_type_string().startswith('a{'):
res = {}
for i in range(self.n_children()):
v = self.get_child_value(i)
res[v.get_child_value(0).unpack()] = v.get_child_value(1).unpack()
return res
# array
if self.get_type_string().startswith('a'):
return [self.get_child_value(i).unpack()
for i in range(self.n_children())]
# variant (just unbox transparently)
if self.get_type_string().startswith('v'):
return self.get_variant().unpack()
# maybe
if self.get_type_string().startswith('m'):
m = self.get_maybe()
return m.unpack() if m else None
raise NotImplementedError('unsupported GVariant type ' + self.get_type_string()) | Decompose a GVariant into a native Python object. | Below is the the instruction that describes the task:
### Input:
Decompose a GVariant into a native Python object.
### Response:
def unpack(self):
"""Decompose a GVariant into a native Python object."""
LEAF_ACCESSORS = {
'b': self.get_boolean,
'y': self.get_byte,
'n': self.get_int16,
'q': self.get_uint16,
'i': self.get_int32,
'u': self.get_uint32,
'x': self.get_int64,
't': self.get_uint64,
'h': self.get_handle,
'd': self.get_double,
's': self.get_string,
'o': self.get_string, # object path
'g': self.get_string, # signature
}
# simple values
la = LEAF_ACCESSORS.get(self.get_type_string())
if la:
return la()
# tuple
if self.get_type_string().startswith('('):
res = [self.get_child_value(i).unpack()
for i in range(self.n_children())]
return tuple(res)
# dictionary
if self.get_type_string().startswith('a{'):
res = {}
for i in range(self.n_children()):
v = self.get_child_value(i)
res[v.get_child_value(0).unpack()] = v.get_child_value(1).unpack()
return res
# array
if self.get_type_string().startswith('a'):
return [self.get_child_value(i).unpack()
for i in range(self.n_children())]
# variant (just unbox transparently)
if self.get_type_string().startswith('v'):
return self.get_variant().unpack()
# maybe
if self.get_type_string().startswith('m'):
m = self.get_maybe()
return m.unpack() if m else None
raise NotImplementedError('unsupported GVariant type ' + self.get_type_string()) |
def extract_rzip (archive, compression, cmd, verbosity, interactive, outdir):
"""Extract an RZIP archive."""
cmdlist = [cmd, '-d', '-k']
if verbosity > 1:
cmdlist.append('-v')
outfile = util.get_single_outfile(outdir, archive)
cmdlist.extend(["-o", outfile, archive])
return cmdlist | Extract an RZIP archive. | Below is the the instruction that describes the task:
### Input:
Extract an RZIP archive.
### Response:
def extract_rzip (archive, compression, cmd, verbosity, interactive, outdir):
"""Extract an RZIP archive."""
cmdlist = [cmd, '-d', '-k']
if verbosity > 1:
cmdlist.append('-v')
outfile = util.get_single_outfile(outdir, archive)
cmdlist.extend(["-o", outfile, archive])
return cmdlist |
def setChatPhoto(self, chat_id, photo):
""" See: https://core.telegram.org/bots/api#setchatphoto """
p = _strip(locals(), more=['photo'])
return self._api_request_with_file('setChatPhoto', _rectify(p), 'photo', photo) | See: https://core.telegram.org/bots/api#setchatphoto | Below is the the instruction that describes the task:
### Input:
See: https://core.telegram.org/bots/api#setchatphoto
### Response:
def setChatPhoto(self, chat_id, photo):
""" See: https://core.telegram.org/bots/api#setchatphoto """
p = _strip(locals(), more=['photo'])
return self._api_request_with_file('setChatPhoto', _rectify(p), 'photo', photo) |
def onlasso(self, verts):
"""
Main function to control the action of the lasso, allows user to draw on data image and adjust thematic map
:param verts: the vertices selected by the lasso
:return: nothin, but update the selection array so lassoed region now has the selected theme, redraws canvas
"""
p = path.Path(verts)
ind = p.contains_points(self.pix, radius=1)
self.history.append(self.selection_array.copy())
self.selection_array = self.updateArray(self.selection_array,
ind,
self.solar_class_var.get())
self.mask.set_data(self.selection_array)
self.fig.canvas.draw_idle() | Main function to control the action of the lasso, allows user to draw on data image and adjust thematic map
:param verts: the vertices selected by the lasso
:return: nothin, but update the selection array so lassoed region now has the selected theme, redraws canvas | Below is the the instruction that describes the task:
### Input:
Main function to control the action of the lasso, allows user to draw on data image and adjust thematic map
:param verts: the vertices selected by the lasso
:return: nothin, but update the selection array so lassoed region now has the selected theme, redraws canvas
### Response:
def onlasso(self, verts):
"""
Main function to control the action of the lasso, allows user to draw on data image and adjust thematic map
:param verts: the vertices selected by the lasso
:return: nothin, but update the selection array so lassoed region now has the selected theme, redraws canvas
"""
p = path.Path(verts)
ind = p.contains_points(self.pix, radius=1)
self.history.append(self.selection_array.copy())
self.selection_array = self.updateArray(self.selection_array,
ind,
self.solar_class_var.get())
self.mask.set_data(self.selection_array)
self.fig.canvas.draw_idle() |
def next_packet(self):
"""
Process next packet if present
"""
try:
start_byte_index = self.buffer.index(velbus.START_BYTE)
except ValueError:
self.buffer = bytes([])
return
if start_byte_index >= 0:
self.buffer = self.buffer[start_byte_index:]
if self.valid_header_waiting() and self.valid_body_waiting():
next_packet = self.extract_packet()
self.buffer = self.buffer[len(next_packet):]
message = self.parse(next_packet)
if isinstance(message, velbus.Message):
self.controller.new_message(message) | Process next packet if present | Below is the the instruction that describes the task:
### Input:
Process next packet if present
### Response:
def next_packet(self):
"""
Process next packet if present
"""
try:
start_byte_index = self.buffer.index(velbus.START_BYTE)
except ValueError:
self.buffer = bytes([])
return
if start_byte_index >= 0:
self.buffer = self.buffer[start_byte_index:]
if self.valid_header_waiting() and self.valid_body_waiting():
next_packet = self.extract_packet()
self.buffer = self.buffer[len(next_packet):]
message = self.parse(next_packet)
if isinstance(message, velbus.Message):
self.controller.new_message(message) |
def _gl_look_at(self, pos, target, up):
"""
The standard lookAt method
:param pos: current position
:param target: target position to look at
:param up: direction up
"""
z = vector.normalise(pos - target)
x = vector.normalise(vector3.cross(vector.normalise(up), z))
y = vector3.cross(z, x)
translate = matrix44.create_identity()
translate[3][0] = -pos.x
translate[3][1] = -pos.y
translate[3][2] = -pos.z
rotate = matrix44.create_identity()
rotate[0][0] = x[0] # -- X
rotate[1][0] = x[1]
rotate[2][0] = x[2]
rotate[0][1] = y[0] # -- Y
rotate[1][1] = y[1]
rotate[2][1] = y[2]
rotate[0][2] = z[0] # -- Z
rotate[1][2] = z[1]
rotate[2][2] = z[2]
return matrix44.multiply(translate, rotate) | The standard lookAt method
:param pos: current position
:param target: target position to look at
:param up: direction up | Below is the the instruction that describes the task:
### Input:
The standard lookAt method
:param pos: current position
:param target: target position to look at
:param up: direction up
### Response:
def _gl_look_at(self, pos, target, up):
"""
The standard lookAt method
:param pos: current position
:param target: target position to look at
:param up: direction up
"""
z = vector.normalise(pos - target)
x = vector.normalise(vector3.cross(vector.normalise(up), z))
y = vector3.cross(z, x)
translate = matrix44.create_identity()
translate[3][0] = -pos.x
translate[3][1] = -pos.y
translate[3][2] = -pos.z
rotate = matrix44.create_identity()
rotate[0][0] = x[0] # -- X
rotate[1][0] = x[1]
rotate[2][0] = x[2]
rotate[0][1] = y[0] # -- Y
rotate[1][1] = y[1]
rotate[2][1] = y[2]
rotate[0][2] = z[0] # -- Z
rotate[1][2] = z[1]
rotate[2][2] = z[2]
return matrix44.multiply(translate, rotate) |
def _link_package_versions(self, link, search_name):
"""
Return an iterable of triples (pkg_resources_version_key,
link, python_version) that can be extracted from the given
link.
Meant to be overridden by subclasses, not called by clients.
"""
if link.egg_fragment:
egg_info = link.egg_fragment
else:
egg_info, ext = link.splitext()
if not ext:
if link not in self.logged_links:
logger.debug('Skipping link %s; not a file' % link)
self.logged_links.add(link)
return []
if egg_info.endswith('.tar'):
# Special double-extension case:
egg_info = egg_info[:-4]
ext = '.tar' + ext
if ext not in ('.tar.gz', '.tar.bz2', '.tar', '.tgz', '.zip'):
if link not in self.logged_links:
logger.debug('Skipping link %s; unknown archive format: %s' % (link, ext))
self.logged_links.add(link)
return []
version = self._egg_info_matches(egg_info, search_name, link)
if version is None:
logger.debug('Skipping link %s; wrong project name (not %s)' % (link, search_name))
return []
match = self._py_version_re.search(version)
if match:
version = version[:match.start()]
py_version = match.group(1)
if py_version != sys.version[:3]:
logger.debug('Skipping %s because Python version is incorrect' % link)
return []
logger.debug('Found link %s, version: %s' % (link, version))
return [(pkg_resources.parse_version(version),
link,
version)] | Return an iterable of triples (pkg_resources_version_key,
link, python_version) that can be extracted from the given
link.
Meant to be overridden by subclasses, not called by clients. | Below is the the instruction that describes the task:
### Input:
Return an iterable of triples (pkg_resources_version_key,
link, python_version) that can be extracted from the given
link.
Meant to be overridden by subclasses, not called by clients.
### Response:
def _link_package_versions(self, link, search_name):
"""
Return an iterable of triples (pkg_resources_version_key,
link, python_version) that can be extracted from the given
link.
Meant to be overridden by subclasses, not called by clients.
"""
if link.egg_fragment:
egg_info = link.egg_fragment
else:
egg_info, ext = link.splitext()
if not ext:
if link not in self.logged_links:
logger.debug('Skipping link %s; not a file' % link)
self.logged_links.add(link)
return []
if egg_info.endswith('.tar'):
# Special double-extension case:
egg_info = egg_info[:-4]
ext = '.tar' + ext
if ext not in ('.tar.gz', '.tar.bz2', '.tar', '.tgz', '.zip'):
if link not in self.logged_links:
logger.debug('Skipping link %s; unknown archive format: %s' % (link, ext))
self.logged_links.add(link)
return []
version = self._egg_info_matches(egg_info, search_name, link)
if version is None:
logger.debug('Skipping link %s; wrong project name (not %s)' % (link, search_name))
return []
match = self._py_version_re.search(version)
if match:
version = version[:match.start()]
py_version = match.group(1)
if py_version != sys.version[:3]:
logger.debug('Skipping %s because Python version is incorrect' % link)
return []
logger.debug('Found link %s, version: %s' % (link, version))
return [(pkg_resources.parse_version(version),
link,
version)] |
def set_cluster_info(self,
disallow_cluster_termination=None,
enable_ganglia_monitoring=None,
datadog_api_token=None,
datadog_app_token=None,
node_bootstrap=None,
master_instance_type=None,
slave_instance_type=None,
min_nodes=None,
max_nodes=None,
slave_request_type=None,
fallback_to_ondemand=None,
node_base_cooldown_period=None,
node_spot_cooldown_period=None,
custom_tags=None,
heterogeneous_config=None,
maximum_bid_price_percentage=None,
timeout_for_request=None,
maximum_spot_instance_percentage=None,
stable_maximum_bid_price_percentage=None,
stable_timeout_for_request=None,
stable_spot_fallback=None,
spot_block_duration=None,
idle_cluster_timeout=None,
disk_count=None,
disk_type=None,
disk_size=None,
root_disk_size=None,
upscaling_config=None,
enable_encryption=None,
customer_ssh_key=None,
cluster_name=None,
force_tunnel=None,
image_uri_overrides=None,
env_name=None,
python_version=None,
r_version=None,
disable_cluster_pause=None,
paused_cluster_timeout_mins=None,
disable_autoscale_node_pause=None,
paused_autoscale_node_timeout_mins=None):
"""
Args:
`disallow_cluster_termination`: Set this to True if you don't want
qubole to auto-terminate idle clusters. Use this option with
extreme caution.
`enable_ganglia_monitoring`: Set this to True if you want to enable
ganglia monitoring for the cluster.
`node_bootstrap`: name of the node bootstrap file for this
cluster. It should be in stored in S3 at
<your-default-location>/scripts/hadoop/
`master_instance_type`: The instance type to use for the Hadoop master
node.
`slave_instance_type`: The instance type to use for the Hadoop slave
nodes.
`min_nodes`: Number of nodes to start the cluster with.
`max_nodes`: Maximum number of nodes the cluster may be auto-scaled up
to.
`slave_request_type`: Purchasing option for slave instances.
Valid values: "ondemand", "hybrid", "spot".
`fallback_to_ondemand`: Fallback to on-demand nodes if spot nodes could not be
obtained. Valid only if slave_request_type is 'spot'.
`node_base_cooldown_period`: Time for which an on-demand node waits before termination (Unit: minutes)
`node_spot_cooldown_period`: Time for which a spot node waits before termination (Unit: minutes)
`maximum_bid_price_percentage`: ( Valid only when `slave_request_type`
is hybrid or spot.) Maximum value to bid for spot
instances, expressed as a percentage of the base price
for the slave node instance type.
`timeout_for_request`: Timeout for a spot instance request (Unit:
minutes)
`maximum_spot_instance_percentage`: Maximum percentage of instances
that may be purchased from the AWS Spot market. Valid only when
slave_request_type is "hybrid".
`stable_maximum_bid_price_percentage`: Maximum value to bid for stable node spot
instances, expressed as a percentage of the base price
(applies to both master and slave nodes).
`stable_timeout_for_request`: Timeout for a stable node spot instance request (Unit:
minutes)
`stable_spot_fallback`: Whether to fallback to on-demand instances for
stable nodes if spot instances are not available
`spot_block_duration`: Time for which the spot block instance is provisioned (Unit:
minutes)
`disk_count`: Number of EBS volumes to attach
to each instance of the cluster.
`disk_type`: Type of the EBS volume. Valid
values are 'standard' (magnetic) and 'ssd'.
`disk_size`: Size of each EBS volume, in GB.
`root_disk_size`: Size of root volume, in GB.
`enable_encryption`: Encrypt the ephemeral drives on the instance.
`customer_ssh_key`: SSH key to use to login to the instances.
`idle_cluster_timeout`: The buffer time (range in 0-6 hrs) after a cluster goes idle
and gets terminated, given cluster auto termination is on and no cluster specific
timeout has been set (default is 2 hrs)
`heterogeneous_config` : Configuring heterogeneous nodes in Hadoop 2 and Spark clusters.
It implies that slave nodes can be of different instance types
`custom_tags` : Custom tags to be set on all instances
of the cluster. Specified as JSON object (key-value pairs)
`datadog_api_token` : Specify the Datadog API token to use the Datadog monitoring service
`datadog_app_token` : Specify the Datadog APP token to use the Datadog monitoring service
`image_uri_overrides` : Override the image name provided
`env_name`: Name of python and R environment. (For Spark clusters)
`python_version`: Version of Python for environment. (For Spark clusters)
`r_version`: Version of R for environment. (For Spark clusters)
`disable_cluster_pause`: Disable cluster pause
`paused_cluster_timeout_mins`: Paused cluster timeout in mins
`disable_autoscale_node_pause`: Disable autoscale node pause
`paused_autoscale_node_timeout_mins`: Paused autoscale node timeout in mins
Doc: For getting details about arguments
http://docs.qubole.com/en/latest/rest-api/cluster_api/create-new-cluster.html#parameters
"""
self.cluster_info['master_instance_type'] = master_instance_type
self.cluster_info['slave_instance_type'] = slave_instance_type
self.cluster_info['min_nodes'] = min_nodes
self.cluster_info['max_nodes'] = max_nodes
self.cluster_info['cluster_name'] = cluster_name
self.cluster_info['node_bootstrap'] = node_bootstrap
self.cluster_info['disallow_cluster_termination'] = disallow_cluster_termination
self.cluster_info['force_tunnel'] = force_tunnel
self.cluster_info['fallback_to_ondemand'] = fallback_to_ondemand
self.cluster_info['node_base_cooldown_period'] = node_base_cooldown_period
self.cluster_info['node_spot_cooldown_period'] = node_spot_cooldown_period
self.cluster_info['customer_ssh_key'] = customer_ssh_key
if custom_tags and custom_tags.strip():
try:
self.cluster_info['custom_tags'] = json.loads(custom_tags.strip())
except Exception as e:
raise Exception("Invalid JSON string for custom ec2 tags: %s" % e.message)
self.cluster_info['heterogeneous_config'] = heterogeneous_config
self.cluster_info['slave_request_type'] = slave_request_type
self.cluster_info['idle_cluster_timeout'] = idle_cluster_timeout
self.cluster_info['spot_settings'] = {}
self.cluster_info['rootdisk'] = {}
self.cluster_info['rootdisk']['size'] = root_disk_size
self.set_spot_instance_settings(maximum_bid_price_percentage, timeout_for_request, maximum_spot_instance_percentage)
self.set_stable_spot_bid_settings(stable_maximum_bid_price_percentage, stable_timeout_for_request, stable_spot_fallback)
self.set_spot_block_settings(spot_block_duration)
self.set_data_disk(disk_size, disk_count, disk_type, upscaling_config, enable_encryption)
self.set_monitoring(enable_ganglia_monitoring, datadog_api_token, datadog_app_token)
self.set_internal(image_uri_overrides)
self.set_env_settings(env_name, python_version, r_version)
self.set_start_stop_settings(disable_cluster_pause, paused_cluster_timeout_mins,
disable_autoscale_node_pause, paused_autoscale_node_timeout_mins) | Args:
`disallow_cluster_termination`: Set this to True if you don't want
qubole to auto-terminate idle clusters. Use this option with
extreme caution.
`enable_ganglia_monitoring`: Set this to True if you want to enable
ganglia monitoring for the cluster.
`node_bootstrap`: name of the node bootstrap file for this
cluster. It should be in stored in S3 at
<your-default-location>/scripts/hadoop/
`master_instance_type`: The instance type to use for the Hadoop master
node.
`slave_instance_type`: The instance type to use for the Hadoop slave
nodes.
`min_nodes`: Number of nodes to start the cluster with.
`max_nodes`: Maximum number of nodes the cluster may be auto-scaled up
to.
`slave_request_type`: Purchasing option for slave instances.
Valid values: "ondemand", "hybrid", "spot".
`fallback_to_ondemand`: Fallback to on-demand nodes if spot nodes could not be
obtained. Valid only if slave_request_type is 'spot'.
`node_base_cooldown_period`: Time for which an on-demand node waits before termination (Unit: minutes)
`node_spot_cooldown_period`: Time for which a spot node waits before termination (Unit: minutes)
`maximum_bid_price_percentage`: ( Valid only when `slave_request_type`
is hybrid or spot.) Maximum value to bid for spot
instances, expressed as a percentage of the base price
for the slave node instance type.
`timeout_for_request`: Timeout for a spot instance request (Unit:
minutes)
`maximum_spot_instance_percentage`: Maximum percentage of instances
that may be purchased from the AWS Spot market. Valid only when
slave_request_type is "hybrid".
`stable_maximum_bid_price_percentage`: Maximum value to bid for stable node spot
instances, expressed as a percentage of the base price
(applies to both master and slave nodes).
`stable_timeout_for_request`: Timeout for a stable node spot instance request (Unit:
minutes)
`stable_spot_fallback`: Whether to fallback to on-demand instances for
stable nodes if spot instances are not available
`spot_block_duration`: Time for which the spot block instance is provisioned (Unit:
minutes)
`disk_count`: Number of EBS volumes to attach
to each instance of the cluster.
`disk_type`: Type of the EBS volume. Valid
values are 'standard' (magnetic) and 'ssd'.
`disk_size`: Size of each EBS volume, in GB.
`root_disk_size`: Size of root volume, in GB.
`enable_encryption`: Encrypt the ephemeral drives on the instance.
`customer_ssh_key`: SSH key to use to login to the instances.
`idle_cluster_timeout`: The buffer time (range in 0-6 hrs) after a cluster goes idle
and gets terminated, given cluster auto termination is on and no cluster specific
timeout has been set (default is 2 hrs)
`heterogeneous_config` : Configuring heterogeneous nodes in Hadoop 2 and Spark clusters.
It implies that slave nodes can be of different instance types
`custom_tags` : Custom tags to be set on all instances
of the cluster. Specified as JSON object (key-value pairs)
`datadog_api_token` : Specify the Datadog API token to use the Datadog monitoring service
`datadog_app_token` : Specify the Datadog APP token to use the Datadog monitoring service
`image_uri_overrides` : Override the image name provided
`env_name`: Name of python and R environment. (For Spark clusters)
`python_version`: Version of Python for environment. (For Spark clusters)
`r_version`: Version of R for environment. (For Spark clusters)
`disable_cluster_pause`: Disable cluster pause
`paused_cluster_timeout_mins`: Paused cluster timeout in mins
`disable_autoscale_node_pause`: Disable autoscale node pause
`paused_autoscale_node_timeout_mins`: Paused autoscale node timeout in mins
Doc: For getting details about arguments
http://docs.qubole.com/en/latest/rest-api/cluster_api/create-new-cluster.html#parameters | Below is the the instruction that describes the task:
### Input:
Args:
`disallow_cluster_termination`: Set this to True if you don't want
qubole to auto-terminate idle clusters. Use this option with
extreme caution.
`enable_ganglia_monitoring`: Set this to True if you want to enable
ganglia monitoring for the cluster.
`node_bootstrap`: name of the node bootstrap file for this
cluster. It should be in stored in S3 at
<your-default-location>/scripts/hadoop/
`master_instance_type`: The instance type to use for the Hadoop master
node.
`slave_instance_type`: The instance type to use for the Hadoop slave
nodes.
`min_nodes`: Number of nodes to start the cluster with.
`max_nodes`: Maximum number of nodes the cluster may be auto-scaled up
to.
`slave_request_type`: Purchasing option for slave instances.
Valid values: "ondemand", "hybrid", "spot".
`fallback_to_ondemand`: Fallback to on-demand nodes if spot nodes could not be
obtained. Valid only if slave_request_type is 'spot'.
`node_base_cooldown_period`: Time for which an on-demand node waits before termination (Unit: minutes)
`node_spot_cooldown_period`: Time for which a spot node waits before termination (Unit: minutes)
`maximum_bid_price_percentage`: ( Valid only when `slave_request_type`
is hybrid or spot.) Maximum value to bid for spot
instances, expressed as a percentage of the base price
for the slave node instance type.
`timeout_for_request`: Timeout for a spot instance request (Unit:
minutes)
`maximum_spot_instance_percentage`: Maximum percentage of instances
that may be purchased from the AWS Spot market. Valid only when
slave_request_type is "hybrid".
`stable_maximum_bid_price_percentage`: Maximum value to bid for stable node spot
instances, expressed as a percentage of the base price
(applies to both master and slave nodes).
`stable_timeout_for_request`: Timeout for a stable node spot instance request (Unit:
minutes)
`stable_spot_fallback`: Whether to fallback to on-demand instances for
stable nodes if spot instances are not available
`spot_block_duration`: Time for which the spot block instance is provisioned (Unit:
minutes)
`disk_count`: Number of EBS volumes to attach
to each instance of the cluster.
`disk_type`: Type of the EBS volume. Valid
values are 'standard' (magnetic) and 'ssd'.
`disk_size`: Size of each EBS volume, in GB.
`root_disk_size`: Size of root volume, in GB.
`enable_encryption`: Encrypt the ephemeral drives on the instance.
`customer_ssh_key`: SSH key to use to login to the instances.
`idle_cluster_timeout`: The buffer time (range in 0-6 hrs) after a cluster goes idle
and gets terminated, given cluster auto termination is on and no cluster specific
timeout has been set (default is 2 hrs)
`heterogeneous_config` : Configuring heterogeneous nodes in Hadoop 2 and Spark clusters.
It implies that slave nodes can be of different instance types
`custom_tags` : Custom tags to be set on all instances
of the cluster. Specified as JSON object (key-value pairs)
`datadog_api_token` : Specify the Datadog API token to use the Datadog monitoring service
`datadog_app_token` : Specify the Datadog APP token to use the Datadog monitoring service
`image_uri_overrides` : Override the image name provided
`env_name`: Name of python and R environment. (For Spark clusters)
`python_version`: Version of Python for environment. (For Spark clusters)
`r_version`: Version of R for environment. (For Spark clusters)
`disable_cluster_pause`: Disable cluster pause
`paused_cluster_timeout_mins`: Paused cluster timeout in mins
`disable_autoscale_node_pause`: Disable autoscale node pause
`paused_autoscale_node_timeout_mins`: Paused autoscale node timeout in mins
Doc: For getting details about arguments
http://docs.qubole.com/en/latest/rest-api/cluster_api/create-new-cluster.html#parameters
### Response:
def set_cluster_info(self,
disallow_cluster_termination=None,
enable_ganglia_monitoring=None,
datadog_api_token=None,
datadog_app_token=None,
node_bootstrap=None,
master_instance_type=None,
slave_instance_type=None,
min_nodes=None,
max_nodes=None,
slave_request_type=None,
fallback_to_ondemand=None,
node_base_cooldown_period=None,
node_spot_cooldown_period=None,
custom_tags=None,
heterogeneous_config=None,
maximum_bid_price_percentage=None,
timeout_for_request=None,
maximum_spot_instance_percentage=None,
stable_maximum_bid_price_percentage=None,
stable_timeout_for_request=None,
stable_spot_fallback=None,
spot_block_duration=None,
idle_cluster_timeout=None,
disk_count=None,
disk_type=None,
disk_size=None,
root_disk_size=None,
upscaling_config=None,
enable_encryption=None,
customer_ssh_key=None,
cluster_name=None,
force_tunnel=None,
image_uri_overrides=None,
env_name=None,
python_version=None,
r_version=None,
disable_cluster_pause=None,
paused_cluster_timeout_mins=None,
disable_autoscale_node_pause=None,
paused_autoscale_node_timeout_mins=None):
"""
Args:
`disallow_cluster_termination`: Set this to True if you don't want
qubole to auto-terminate idle clusters. Use this option with
extreme caution.
`enable_ganglia_monitoring`: Set this to True if you want to enable
ganglia monitoring for the cluster.
`node_bootstrap`: name of the node bootstrap file for this
cluster. It should be in stored in S3 at
<your-default-location>/scripts/hadoop/
`master_instance_type`: The instance type to use for the Hadoop master
node.
`slave_instance_type`: The instance type to use for the Hadoop slave
nodes.
`min_nodes`: Number of nodes to start the cluster with.
`max_nodes`: Maximum number of nodes the cluster may be auto-scaled up
to.
`slave_request_type`: Purchasing option for slave instances.
Valid values: "ondemand", "hybrid", "spot".
`fallback_to_ondemand`: Fallback to on-demand nodes if spot nodes could not be
obtained. Valid only if slave_request_type is 'spot'.
`node_base_cooldown_period`: Time for which an on-demand node waits before termination (Unit: minutes)
`node_spot_cooldown_period`: Time for which a spot node waits before termination (Unit: minutes)
`maximum_bid_price_percentage`: ( Valid only when `slave_request_type`
is hybrid or spot.) Maximum value to bid for spot
instances, expressed as a percentage of the base price
for the slave node instance type.
`timeout_for_request`: Timeout for a spot instance request (Unit:
minutes)
`maximum_spot_instance_percentage`: Maximum percentage of instances
that may be purchased from the AWS Spot market. Valid only when
slave_request_type is "hybrid".
`stable_maximum_bid_price_percentage`: Maximum value to bid for stable node spot
instances, expressed as a percentage of the base price
(applies to both master and slave nodes).
`stable_timeout_for_request`: Timeout for a stable node spot instance request (Unit:
minutes)
`stable_spot_fallback`: Whether to fallback to on-demand instances for
stable nodes if spot instances are not available
`spot_block_duration`: Time for which the spot block instance is provisioned (Unit:
minutes)
`disk_count`: Number of EBS volumes to attach
to each instance of the cluster.
`disk_type`: Type of the EBS volume. Valid
values are 'standard' (magnetic) and 'ssd'.
`disk_size`: Size of each EBS volume, in GB.
`root_disk_size`: Size of root volume, in GB.
`enable_encryption`: Encrypt the ephemeral drives on the instance.
`customer_ssh_key`: SSH key to use to login to the instances.
`idle_cluster_timeout`: The buffer time (range in 0-6 hrs) after a cluster goes idle
and gets terminated, given cluster auto termination is on and no cluster specific
timeout has been set (default is 2 hrs)
`heterogeneous_config` : Configuring heterogeneous nodes in Hadoop 2 and Spark clusters.
It implies that slave nodes can be of different instance types
`custom_tags` : Custom tags to be set on all instances
of the cluster. Specified as JSON object (key-value pairs)
`datadog_api_token` : Specify the Datadog API token to use the Datadog monitoring service
`datadog_app_token` : Specify the Datadog APP token to use the Datadog monitoring service
`image_uri_overrides` : Override the image name provided
`env_name`: Name of python and R environment. (For Spark clusters)
`python_version`: Version of Python for environment. (For Spark clusters)
`r_version`: Version of R for environment. (For Spark clusters)
`disable_cluster_pause`: Disable cluster pause
`paused_cluster_timeout_mins`: Paused cluster timeout in mins
`disable_autoscale_node_pause`: Disable autoscale node pause
`paused_autoscale_node_timeout_mins`: Paused autoscale node timeout in mins
Doc: For getting details about arguments
http://docs.qubole.com/en/latest/rest-api/cluster_api/create-new-cluster.html#parameters
"""
self.cluster_info['master_instance_type'] = master_instance_type
self.cluster_info['slave_instance_type'] = slave_instance_type
self.cluster_info['min_nodes'] = min_nodes
self.cluster_info['max_nodes'] = max_nodes
self.cluster_info['cluster_name'] = cluster_name
self.cluster_info['node_bootstrap'] = node_bootstrap
self.cluster_info['disallow_cluster_termination'] = disallow_cluster_termination
self.cluster_info['force_tunnel'] = force_tunnel
self.cluster_info['fallback_to_ondemand'] = fallback_to_ondemand
self.cluster_info['node_base_cooldown_period'] = node_base_cooldown_period
self.cluster_info['node_spot_cooldown_period'] = node_spot_cooldown_period
self.cluster_info['customer_ssh_key'] = customer_ssh_key
if custom_tags and custom_tags.strip():
try:
self.cluster_info['custom_tags'] = json.loads(custom_tags.strip())
except Exception as e:
raise Exception("Invalid JSON string for custom ec2 tags: %s" % e.message)
self.cluster_info['heterogeneous_config'] = heterogeneous_config
self.cluster_info['slave_request_type'] = slave_request_type
self.cluster_info['idle_cluster_timeout'] = idle_cluster_timeout
self.cluster_info['spot_settings'] = {}
self.cluster_info['rootdisk'] = {}
self.cluster_info['rootdisk']['size'] = root_disk_size
self.set_spot_instance_settings(maximum_bid_price_percentage, timeout_for_request, maximum_spot_instance_percentage)
self.set_stable_spot_bid_settings(stable_maximum_bid_price_percentage, stable_timeout_for_request, stable_spot_fallback)
self.set_spot_block_settings(spot_block_duration)
self.set_data_disk(disk_size, disk_count, disk_type, upscaling_config, enable_encryption)
self.set_monitoring(enable_ganglia_monitoring, datadog_api_token, datadog_app_token)
self.set_internal(image_uri_overrides)
self.set_env_settings(env_name, python_version, r_version)
self.set_start_stop_settings(disable_cluster_pause, paused_cluster_timeout_mins,
disable_autoscale_node_pause, paused_autoscale_node_timeout_mins) |
def actions(self, **parameters):
"""Returns a list of actions (alerts) that have been generated for
your account.
Optional Parameters:
* from -- Only include actions generated later than this timestamp.
Format is UNIX time.
Type: Integer
Default: None
* to -- Only include actions generated prior to this timestamp.
Format is UNIX time.
Type: Integer
Default: None
* limit -- Limits the number of returned results to the specified
quantity.
Type: Integer (max 300)
Default: 100
* offset -- Offset for listing.
Type: Integer
Default: 0
* checkids -- Comma-separated list of check identifiers. Limit
results to actions generated from these checks.
Type: String
Default: All
* contactids -- Comma-separated list of contact identifiers.
Limit results to actions sent to these contacts.
Type: String
Default: All
* status -- Comma-separated list of statuses. Limit results to
actions with these statuses.
Type: String ['sent', 'delivered', 'error',
'not_delivered', 'no_credits']
Default: All
* via -- Comma-separated list of via mediums. Limit results to
actions with these mediums.
Type: String ['email', 'sms', 'twitter', 'iphone',
'android']
Default: All
Returned structure:
{
'alerts' : [
{
'contactname' : <String> Name of alerted contact
'contactid' : <String> Identifier of alerted contact
'checkid' : <String> Identifier of check
'time' : <Integer> Time of alert generation. Format
UNIX time
'via' : <String> Alert medium ['email', 'sms',
'twitter', 'iphone',
'android']
'status' : <String> Alert status ['sent', 'delivered',
'error',
'notdelivered',
'nocredits']
'messageshort': <String> Short description of message
'messagefull' : <String> Full message body
'sentto' : <String> Target address, phone number, etc
'charged' : <Boolean> True if your account was charged
for this message
},
...
]
}
"""
# Warn user about unhandled parameters
for key in parameters:
if key not in ['from', 'to', 'limit', 'offset', 'checkids',
'contactids', 'status', 'via']:
sys.stderr.write('%s not a valid argument for actions()\n'
% key)
response = self.request('GET', 'actions', parameters)
return response.json()['actions'] | Returns a list of actions (alerts) that have been generated for
your account.
Optional Parameters:
* from -- Only include actions generated later than this timestamp.
Format is UNIX time.
Type: Integer
Default: None
* to -- Only include actions generated prior to this timestamp.
Format is UNIX time.
Type: Integer
Default: None
* limit -- Limits the number of returned results to the specified
quantity.
Type: Integer (max 300)
Default: 100
* offset -- Offset for listing.
Type: Integer
Default: 0
* checkids -- Comma-separated list of check identifiers. Limit
results to actions generated from these checks.
Type: String
Default: All
* contactids -- Comma-separated list of contact identifiers.
Limit results to actions sent to these contacts.
Type: String
Default: All
* status -- Comma-separated list of statuses. Limit results to
actions with these statuses.
Type: String ['sent', 'delivered', 'error',
'not_delivered', 'no_credits']
Default: All
* via -- Comma-separated list of via mediums. Limit results to
actions with these mediums.
Type: String ['email', 'sms', 'twitter', 'iphone',
'android']
Default: All
Returned structure:
{
'alerts' : [
{
'contactname' : <String> Name of alerted contact
'contactid' : <String> Identifier of alerted contact
'checkid' : <String> Identifier of check
'time' : <Integer> Time of alert generation. Format
UNIX time
'via' : <String> Alert medium ['email', 'sms',
'twitter', 'iphone',
'android']
'status' : <String> Alert status ['sent', 'delivered',
'error',
'notdelivered',
'nocredits']
'messageshort': <String> Short description of message
'messagefull' : <String> Full message body
'sentto' : <String> Target address, phone number, etc
'charged' : <Boolean> True if your account was charged
for this message
},
...
]
} | Below is the the instruction that describes the task:
### Input:
Returns a list of actions (alerts) that have been generated for
your account.
Optional Parameters:
* from -- Only include actions generated later than this timestamp.
Format is UNIX time.
Type: Integer
Default: None
* to -- Only include actions generated prior to this timestamp.
Format is UNIX time.
Type: Integer
Default: None
* limit -- Limits the number of returned results to the specified
quantity.
Type: Integer (max 300)
Default: 100
* offset -- Offset for listing.
Type: Integer
Default: 0
* checkids -- Comma-separated list of check identifiers. Limit
results to actions generated from these checks.
Type: String
Default: All
* contactids -- Comma-separated list of contact identifiers.
Limit results to actions sent to these contacts.
Type: String
Default: All
* status -- Comma-separated list of statuses. Limit results to
actions with these statuses.
Type: String ['sent', 'delivered', 'error',
'not_delivered', 'no_credits']
Default: All
* via -- Comma-separated list of via mediums. Limit results to
actions with these mediums.
Type: String ['email', 'sms', 'twitter', 'iphone',
'android']
Default: All
Returned structure:
{
'alerts' : [
{
'contactname' : <String> Name of alerted contact
'contactid' : <String> Identifier of alerted contact
'checkid' : <String> Identifier of check
'time' : <Integer> Time of alert generation. Format
UNIX time
'via' : <String> Alert medium ['email', 'sms',
'twitter', 'iphone',
'android']
'status' : <String> Alert status ['sent', 'delivered',
'error',
'notdelivered',
'nocredits']
'messageshort': <String> Short description of message
'messagefull' : <String> Full message body
'sentto' : <String> Target address, phone number, etc
'charged' : <Boolean> True if your account was charged
for this message
},
...
]
}
### Response:
def actions(self, **parameters):
"""Returns a list of actions (alerts) that have been generated for
your account.
Optional Parameters:
* from -- Only include actions generated later than this timestamp.
Format is UNIX time.
Type: Integer
Default: None
* to -- Only include actions generated prior to this timestamp.
Format is UNIX time.
Type: Integer
Default: None
* limit -- Limits the number of returned results to the specified
quantity.
Type: Integer (max 300)
Default: 100
* offset -- Offset for listing.
Type: Integer
Default: 0
* checkids -- Comma-separated list of check identifiers. Limit
results to actions generated from these checks.
Type: String
Default: All
* contactids -- Comma-separated list of contact identifiers.
Limit results to actions sent to these contacts.
Type: String
Default: All
* status -- Comma-separated list of statuses. Limit results to
actions with these statuses.
Type: String ['sent', 'delivered', 'error',
'not_delivered', 'no_credits']
Default: All
* via -- Comma-separated list of via mediums. Limit results to
actions with these mediums.
Type: String ['email', 'sms', 'twitter', 'iphone',
'android']
Default: All
Returned structure:
{
'alerts' : [
{
'contactname' : <String> Name of alerted contact
'contactid' : <String> Identifier of alerted contact
'checkid' : <String> Identifier of check
'time' : <Integer> Time of alert generation. Format
UNIX time
'via' : <String> Alert medium ['email', 'sms',
'twitter', 'iphone',
'android']
'status' : <String> Alert status ['sent', 'delivered',
'error',
'notdelivered',
'nocredits']
'messageshort': <String> Short description of message
'messagefull' : <String> Full message body
'sentto' : <String> Target address, phone number, etc
'charged' : <Boolean> True if your account was charged
for this message
},
...
]
}
"""
# Warn user about unhandled parameters
for key in parameters:
if key not in ['from', 'to', 'limit', 'offset', 'checkids',
'contactids', 'status', 'via']:
sys.stderr.write('%s not a valid argument for actions()\n'
% key)
response = self.request('GET', 'actions', parameters)
return response.json()['actions'] |
def main():
'''
Parse command line options and launch the prebuilder.
'''
parser = optparse.OptionParser(usage="%prog [options] <model_path> [another_model_path..]",
version=xtuml.version.complete_string,
formatter=optparse.TitledHelpFormatter())
parser.add_option("-v", "--verbosity", dest='verbosity',
action="count",
help="increase debug logging level",
default=1)
parser.add_option("-o", "--output", dest="output", metavar="PATH",
help="set output to PATH",
action="store",
default=None)
(opts, args) = parser.parse_args()
if len(args) == 0 or opts.output is None:
parser.print_help()
sys.exit(1)
levels = {
0: logging.ERROR,
1: logging.WARNING,
2: logging.INFO,
3: logging.DEBUG,
}
logging.basicConfig(level=levels.get(opts.verbosity, logging.DEBUG))
m = ooaofooa.load_metamodel(args)
prebuild_model(m)
xtuml.persist_instances(m, opts.output) | Parse command line options and launch the prebuilder. | Below is the the instruction that describes the task:
### Input:
Parse command line options and launch the prebuilder.
### Response:
def main():
'''
Parse command line options and launch the prebuilder.
'''
parser = optparse.OptionParser(usage="%prog [options] <model_path> [another_model_path..]",
version=xtuml.version.complete_string,
formatter=optparse.TitledHelpFormatter())
parser.add_option("-v", "--verbosity", dest='verbosity',
action="count",
help="increase debug logging level",
default=1)
parser.add_option("-o", "--output", dest="output", metavar="PATH",
help="set output to PATH",
action="store",
default=None)
(opts, args) = parser.parse_args()
if len(args) == 0 or opts.output is None:
parser.print_help()
sys.exit(1)
levels = {
0: logging.ERROR,
1: logging.WARNING,
2: logging.INFO,
3: logging.DEBUG,
}
logging.basicConfig(level=levels.get(opts.verbosity, logging.DEBUG))
m = ooaofooa.load_metamodel(args)
prebuild_model(m)
xtuml.persist_instances(m, opts.output) |
def _infer_embedded_object(value):
"""
Infer CIMProperty/CIMParameter.embedded_object from the CIM value.
"""
if value is None:
# The default behavior is to assume that a value of None is not
# an embedded object. If the user wants that, they must specify
# the embedded_object parameter.
return False
if isinstance(value, list):
if not value:
# The default behavior is to assume that an empty array value
# is not an embedded object. If the user wants that, they must
# specify the embedded_object parameter.
return False
value = value[0]
if isinstance(value, CIMInstance):
# The default behavior is to produce 'instance', although 'object'
# would also be valid.
return 'instance'
if isinstance(value, CIMClass):
return 'object'
return False | Infer CIMProperty/CIMParameter.embedded_object from the CIM value. | Below is the the instruction that describes the task:
### Input:
Infer CIMProperty/CIMParameter.embedded_object from the CIM value.
### Response:
def _infer_embedded_object(value):
"""
Infer CIMProperty/CIMParameter.embedded_object from the CIM value.
"""
if value is None:
# The default behavior is to assume that a value of None is not
# an embedded object. If the user wants that, they must specify
# the embedded_object parameter.
return False
if isinstance(value, list):
if not value:
# The default behavior is to assume that an empty array value
# is not an embedded object. If the user wants that, they must
# specify the embedded_object parameter.
return False
value = value[0]
if isinstance(value, CIMInstance):
# The default behavior is to produce 'instance', although 'object'
# would also be valid.
return 'instance'
if isinstance(value, CIMClass):
return 'object'
return False |
def update(self, resource, uri=None, force=False, timeout=-1, custom_headers=None):
"""Makes a PUT request to update a resource when a request body is required.
Args:
resource: Data to update the resource.
uri: Resource uri
force: If set to true, the operation completes despite any problems
with network connectivity or errors on the resource itself. The default is false.
timeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation
in OneView; it just stops waiting for its completion.
custom_headers: Allows to add custom HTTP headers.
Returns:
A dict with the updated resource data.
"""
logger.debug('Update async (uri = %s, resource = %s)' %
(uri, str(resource)))
if not uri:
uri = resource['uri']
if force:
uri += '?force=True'
return self.do_put(uri, resource, timeout, custom_headers) | Makes a PUT request to update a resource when a request body is required.
Args:
resource: Data to update the resource.
uri: Resource uri
force: If set to true, the operation completes despite any problems
with network connectivity or errors on the resource itself. The default is false.
timeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation
in OneView; it just stops waiting for its completion.
custom_headers: Allows to add custom HTTP headers.
Returns:
A dict with the updated resource data. | Below is the the instruction that describes the task:
### Input:
Makes a PUT request to update a resource when a request body is required.
Args:
resource: Data to update the resource.
uri: Resource uri
force: If set to true, the operation completes despite any problems
with network connectivity or errors on the resource itself. The default is false.
timeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation
in OneView; it just stops waiting for its completion.
custom_headers: Allows to add custom HTTP headers.
Returns:
A dict with the updated resource data.
### Response:
def update(self, resource, uri=None, force=False, timeout=-1, custom_headers=None):
"""Makes a PUT request to update a resource when a request body is required.
Args:
resource: Data to update the resource.
uri: Resource uri
force: If set to true, the operation completes despite any problems
with network connectivity or errors on the resource itself. The default is false.
timeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation
in OneView; it just stops waiting for its completion.
custom_headers: Allows to add custom HTTP headers.
Returns:
A dict with the updated resource data.
"""
logger.debug('Update async (uri = %s, resource = %s)' %
(uri, str(resource)))
if not uri:
uri = resource['uri']
if force:
uri += '?force=True'
return self.do_put(uri, resource, timeout, custom_headers) |
def populate_local_cache(self):
"""Populate the local cache from DB.
Read the entries from FW DB and Calls routines to populate the cache.
"""
fw_dict = self.get_all_fw_db()
for fw_id in fw_dict:
LOG.info("Populating cache for FW %s", fw_id)
fw_data = fw_dict[fw_id]
self.populate_local_cache_tenant(fw_id, fw_data) | Populate the local cache from DB.
Read the entries from FW DB and Calls routines to populate the cache. | Below is the the instruction that describes the task:
### Input:
Populate the local cache from DB.
Read the entries from FW DB and Calls routines to populate the cache.
### Response:
def populate_local_cache(self):
"""Populate the local cache from DB.
Read the entries from FW DB and Calls routines to populate the cache.
"""
fw_dict = self.get_all_fw_db()
for fw_id in fw_dict:
LOG.info("Populating cache for FW %s", fw_id)
fw_data = fw_dict[fw_id]
self.populate_local_cache_tenant(fw_id, fw_data) |
def load_info(self, client, info):
"""Fill out information about the gateway"""
if 'identity' in info:
info['stages'] = client.get_stages(restApiId=info['identity'])['item']
info['resources'] = client.get_resources(restApiId=info['identity'])['items']
for resource in info['resources']:
for method in resource.get('resourceMethods', {}):
resource['resourceMethods'][method] = client.get_method(restApiId=info['identity'], resourceId=resource['id'], httpMethod=method)
for status_code, options in resource['resourceMethods'][method]['methodResponses'].items():
options.update(client.get_method_response(restApiId=info['identity'], resourceId=resource['id'], httpMethod=method, statusCode=status_code))
info['deployment'] = client.get_deployments(restApiId=info['identity'])['items']
else:
for key in ('stages', 'resources', 'deployment'):
info[key] = []
info['api_keys'] = client.get_api_keys()['items']
info['domains'] = client.get_domain_names()['items']
for domain in info['domains']:
domain['mappings'] = client.get_base_path_mappings(domainName=domain['domainName']).get('items', []) | Fill out information about the gateway | Below is the the instruction that describes the task:
### Input:
Fill out information about the gateway
### Response:
def load_info(self, client, info):
"""Fill out information about the gateway"""
if 'identity' in info:
info['stages'] = client.get_stages(restApiId=info['identity'])['item']
info['resources'] = client.get_resources(restApiId=info['identity'])['items']
for resource in info['resources']:
for method in resource.get('resourceMethods', {}):
resource['resourceMethods'][method] = client.get_method(restApiId=info['identity'], resourceId=resource['id'], httpMethod=method)
for status_code, options in resource['resourceMethods'][method]['methodResponses'].items():
options.update(client.get_method_response(restApiId=info['identity'], resourceId=resource['id'], httpMethod=method, statusCode=status_code))
info['deployment'] = client.get_deployments(restApiId=info['identity'])['items']
else:
for key in ('stages', 'resources', 'deployment'):
info[key] = []
info['api_keys'] = client.get_api_keys()['items']
info['domains'] = client.get_domain_names()['items']
for domain in info['domains']:
domain['mappings'] = client.get_base_path_mappings(domainName=domain['domainName']).get('items', []) |
def collection_generator(collection):
"""This function returns a generator which iterates over the collection,
similar to Collection.itertuples(). Collections are viewed by this module,
regardless of type, as a mapping from an index to the value. For sets, the
"index" is the value itself (ie, (V, V)). For dicts, it's a string, and
for lists, it's an int.
In general, this function defers to ``itertuples`` and/or ``iteritems``
methods defined on the instances; however, when duck typing, this function
typically provides the generator.
"""
if collection is _nothing:
def generator():
if False:
yield any
elif hasattr(collection, "itertuples"):
return collection.itertuples()
elif hasattr(collection, "keys"):
def generator():
for key in collection.keys():
yield (key, collection[key])
elif hasattr(collection, "items"):
return collection.items()
elif hasattr(collection, "iteritems"):
return collection.iteritems()
elif hasattr(collection, "__getitem__"):
def generator():
i = 0
for item in collection:
yield (i, item)
i += 1
else:
def generator():
for item in collection:
yield (item, item)
return generator() | This function returns a generator which iterates over the collection,
similar to Collection.itertuples(). Collections are viewed by this module,
regardless of type, as a mapping from an index to the value. For sets, the
"index" is the value itself (ie, (V, V)). For dicts, it's a string, and
for lists, it's an int.
In general, this function defers to ``itertuples`` and/or ``iteritems``
methods defined on the instances; however, when duck typing, this function
typically provides the generator. | Below is the the instruction that describes the task:
### Input:
This function returns a generator which iterates over the collection,
similar to Collection.itertuples(). Collections are viewed by this module,
regardless of type, as a mapping from an index to the value. For sets, the
"index" is the value itself (ie, (V, V)). For dicts, it's a string, and
for lists, it's an int.
In general, this function defers to ``itertuples`` and/or ``iteritems``
methods defined on the instances; however, when duck typing, this function
typically provides the generator.
### Response:
def collection_generator(collection):
"""This function returns a generator which iterates over the collection,
similar to Collection.itertuples(). Collections are viewed by this module,
regardless of type, as a mapping from an index to the value. For sets, the
"index" is the value itself (ie, (V, V)). For dicts, it's a string, and
for lists, it's an int.
In general, this function defers to ``itertuples`` and/or ``iteritems``
methods defined on the instances; however, when duck typing, this function
typically provides the generator.
"""
if collection is _nothing:
def generator():
if False:
yield any
elif hasattr(collection, "itertuples"):
return collection.itertuples()
elif hasattr(collection, "keys"):
def generator():
for key in collection.keys():
yield (key, collection[key])
elif hasattr(collection, "items"):
return collection.items()
elif hasattr(collection, "iteritems"):
return collection.iteritems()
elif hasattr(collection, "__getitem__"):
def generator():
i = 0
for item in collection:
yield (i, item)
i += 1
else:
def generator():
for item in collection:
yield (item, item)
return generator() |
def GetPupil(self):
"""Retrieve pupil data
"""
pupil_data = _co.namedtuple('pupil_data', ['ZemaxApertureType',
'ApertureValue',
'entrancePupilDiameter',
'entrancePupilPosition',
'exitPupilDiameter',
'exitPupilPosition',
'ApodizationType',
'ApodizationFactor'])
data = self._ilensdataeditor.GetPupil()
return pupil_data(*data) | Retrieve pupil data | Below is the the instruction that describes the task:
### Input:
Retrieve pupil data
### Response:
def GetPupil(self):
"""Retrieve pupil data
"""
pupil_data = _co.namedtuple('pupil_data', ['ZemaxApertureType',
'ApertureValue',
'entrancePupilDiameter',
'entrancePupilPosition',
'exitPupilDiameter',
'exitPupilPosition',
'ApodizationType',
'ApodizationFactor'])
data = self._ilensdataeditor.GetPupil()
return pupil_data(*data) |
def _update_panic_status(self, status=None):
"""
Updates the panic status of the alarm panel.
:param status: status to use to update
:type status: boolean
:returns: boolean indicating the new status
"""
if status is None:
return
if status != self._panic_status:
self._panic_status, old_status = status, self._panic_status
if old_status is not None:
self.on_panic(status=self._panic_status)
return self._panic_status | Updates the panic status of the alarm panel.
:param status: status to use to update
:type status: boolean
:returns: boolean indicating the new status | Below is the the instruction that describes the task:
### Input:
Updates the panic status of the alarm panel.
:param status: status to use to update
:type status: boolean
:returns: boolean indicating the new status
### Response:
def _update_panic_status(self, status=None):
"""
Updates the panic status of the alarm panel.
:param status: status to use to update
:type status: boolean
:returns: boolean indicating the new status
"""
if status is None:
return
if status != self._panic_status:
self._panic_status, old_status = status, self._panic_status
if old_status is not None:
self.on_panic(status=self._panic_status)
return self._panic_status |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.