code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def learn(self, x):
"""Encodes an input array, and performs weight updates and updates to the activity
statistics according to the respective methods implemented below."""
y = self.encode(x)
self.update_statistics([y])
self.update_weights([x],[y])
return y | Encodes an input array, and performs weight updates and updates to the activity
statistics according to the respective methods implemented below. | Below is the the instruction that describes the task:
### Input:
Encodes an input array, and performs weight updates and updates to the activity
statistics according to the respective methods implemented below.
### Response:
def learn(self, x):
"""Encodes an input array, and performs weight updates and updates to the activity
statistics according to the respective methods implemented below."""
y = self.encode(x)
self.update_statistics([y])
self.update_weights([x],[y])
return y |
def _process_properties(self, properties):
"""
Transforms the command line properties into python dictionary
:return:
"""
if properties is not None:
self._properties = {}
for p in properties:
d = p.split('=')
self._properties[d[0]] = d[1] | Transforms the command line properties into python dictionary
:return: | Below is the the instruction that describes the task:
### Input:
Transforms the command line properties into python dictionary
:return:
### Response:
def _process_properties(self, properties):
"""
Transforms the command line properties into python dictionary
:return:
"""
if properties is not None:
self._properties = {}
for p in properties:
d = p.split('=')
self._properties[d[0]] = d[1] |
def config_new(args):
'''Attempt to install a new method config into a workspace, by: generating
a template from a versioned method in the methods repo, then launching
a local editor (respecting the $EDITOR environment variable) to fill in
the incomplete input/output fields. Returns True if the config was
successfully installed, otherwise False'''
cfg = config_template(args)
# Iteratively try to edit/install the config: exit iteration by EITHER
# Successful config_put() after editing
# Leaving config unchanged in editor, e.g. quitting out of VI with :q
# FIXME: put an small integer upper bound on the # of loops here
while True:
try:
edited = fccore.edit_text(cfg)
if edited == cfg:
eprint("No edits made, method config not installed ...")
break
if __EDITME__ in edited:
eprint("Edit is incomplete, method config not installed ...")
time.sleep(1)
continue
args.config = cfg = edited
config_put(args)
return True
except FireCloudServerError as fce:
__pretty_print_fc_exception(fce)
return False | Attempt to install a new method config into a workspace, by: generating
a template from a versioned method in the methods repo, then launching
a local editor (respecting the $EDITOR environment variable) to fill in
the incomplete input/output fields. Returns True if the config was
successfully installed, otherwise False | Below is the the instruction that describes the task:
### Input:
Attempt to install a new method config into a workspace, by: generating
a template from a versioned method in the methods repo, then launching
a local editor (respecting the $EDITOR environment variable) to fill in
the incomplete input/output fields. Returns True if the config was
successfully installed, otherwise False
### Response:
def config_new(args):
'''Attempt to install a new method config into a workspace, by: generating
a template from a versioned method in the methods repo, then launching
a local editor (respecting the $EDITOR environment variable) to fill in
the incomplete input/output fields. Returns True if the config was
successfully installed, otherwise False'''
cfg = config_template(args)
# Iteratively try to edit/install the config: exit iteration by EITHER
# Successful config_put() after editing
# Leaving config unchanged in editor, e.g. quitting out of VI with :q
# FIXME: put an small integer upper bound on the # of loops here
while True:
try:
edited = fccore.edit_text(cfg)
if edited == cfg:
eprint("No edits made, method config not installed ...")
break
if __EDITME__ in edited:
eprint("Edit is incomplete, method config not installed ...")
time.sleep(1)
continue
args.config = cfg = edited
config_put(args)
return True
except FireCloudServerError as fce:
__pretty_print_fc_exception(fce)
return False |
def _update_param(self):
r"""Update parameters
This method updates the values of the algorthm parameters with the
methods provided
"""
# Update the gamma parameter.
if not isinstance(self._gamma_update, type(None)):
self._gamma = self._gamma_update(self._gamma)
# Update lambda parameter.
if not isinstance(self._lambda_update, type(None)):
self._lambda_param = self._lambda_update(self._lambda_param) | r"""Update parameters
This method updates the values of the algorthm parameters with the
methods provided | Below is the the instruction that describes the task:
### Input:
r"""Update parameters
This method updates the values of the algorthm parameters with the
methods provided
### Response:
def _update_param(self):
r"""Update parameters
This method updates the values of the algorthm parameters with the
methods provided
"""
# Update the gamma parameter.
if not isinstance(self._gamma_update, type(None)):
self._gamma = self._gamma_update(self._gamma)
# Update lambda parameter.
if not isinstance(self._lambda_update, type(None)):
self._lambda_param = self._lambda_update(self._lambda_param) |
def get_course_data_sharing_consent(username, course_id, enterprise_customer_uuid):
"""
Get the data sharing consent object associated with a certain user of a customer for a course.
:param username: The user that grants consent.
:param course_id: The course for which consent is granted.
:param enterprise_customer_uuid: The consent requester.
:return: The data sharing consent object
"""
# Prevent circular imports.
DataSharingConsent = apps.get_model('consent', 'DataSharingConsent') # pylint: disable=invalid-name
return DataSharingConsent.objects.proxied_get(
username=username,
course_id=course_id,
enterprise_customer__uuid=enterprise_customer_uuid
) | Get the data sharing consent object associated with a certain user of a customer for a course.
:param username: The user that grants consent.
:param course_id: The course for which consent is granted.
:param enterprise_customer_uuid: The consent requester.
:return: The data sharing consent object | Below is the the instruction that describes the task:
### Input:
Get the data sharing consent object associated with a certain user of a customer for a course.
:param username: The user that grants consent.
:param course_id: The course for which consent is granted.
:param enterprise_customer_uuid: The consent requester.
:return: The data sharing consent object
### Response:
def get_course_data_sharing_consent(username, course_id, enterprise_customer_uuid):
"""
Get the data sharing consent object associated with a certain user of a customer for a course.
:param username: The user that grants consent.
:param course_id: The course for which consent is granted.
:param enterprise_customer_uuid: The consent requester.
:return: The data sharing consent object
"""
# Prevent circular imports.
DataSharingConsent = apps.get_model('consent', 'DataSharingConsent') # pylint: disable=invalid-name
return DataSharingConsent.objects.proxied_get(
username=username,
course_id=course_id,
enterprise_customer__uuid=enterprise_customer_uuid
) |
def set_read_only(self, value):
"""
Sets whether model could be modified or not
"""
if self.__read_only__ != value:
self.__read_only__ = value
self._update_read_only() | Sets whether model could be modified or not | Below is the the instruction that describes the task:
### Input:
Sets whether model could be modified or not
### Response:
def set_read_only(self, value):
"""
Sets whether model could be modified or not
"""
if self.__read_only__ != value:
self.__read_only__ = value
self._update_read_only() |
def create_from_pybankid_exception(cls, exception):
"""Class method for initiating from a `PyBankID` exception.
:param bankid.exceptions.BankIDError exception:
:return: The wrapped exception.
:rtype: :py:class:`~FlaskPyBankIDError`
"""
return cls(
"{0}: {1}".format(exception.__class__.__name__, str(exception)),
_exception_class_to_status_code.get(exception.__class__),
) | Class method for initiating from a `PyBankID` exception.
:param bankid.exceptions.BankIDError exception:
:return: The wrapped exception.
:rtype: :py:class:`~FlaskPyBankIDError` | Below is the the instruction that describes the task:
### Input:
Class method for initiating from a `PyBankID` exception.
:param bankid.exceptions.BankIDError exception:
:return: The wrapped exception.
:rtype: :py:class:`~FlaskPyBankIDError`
### Response:
def create_from_pybankid_exception(cls, exception):
"""Class method for initiating from a `PyBankID` exception.
:param bankid.exceptions.BankIDError exception:
:return: The wrapped exception.
:rtype: :py:class:`~FlaskPyBankIDError`
"""
return cls(
"{0}: {1}".format(exception.__class__.__name__, str(exception)),
_exception_class_to_status_code.get(exception.__class__),
) |
def evaluate_binop_logical(self, operation, left, right, **kwargs):
"""
Evaluate given logical binary operation with given operands.
"""
if not operation in self.binops_logical:
raise ValueError("Invalid logical binary operation '{}'".format(operation))
result = self.binops_logical[operation](left, right)
return bool(result) | Evaluate given logical binary operation with given operands. | Below is the the instruction that describes the task:
### Input:
Evaluate given logical binary operation with given operands.
### Response:
def evaluate_binop_logical(self, operation, left, right, **kwargs):
"""
Evaluate given logical binary operation with given operands.
"""
if not operation in self.binops_logical:
raise ValueError("Invalid logical binary operation '{}'".format(operation))
result = self.binops_logical[operation](left, right)
return bool(result) |
def _resubscribe(self, soft=False):
"""Resubscribes to all channels found in self.channel_configs.
:param soft: if True, unsubscribes first.
:return: None
"""
# Restore non-default Bitfinex websocket configuration
if self.bitfinex_config:
self.send(**self.bitfinex_config)
q_list = []
while True:
try:
identifier, q = self.channel_configs.popitem(last=True if soft else False)
except KeyError:
break
q_list.append((identifier, q.copy()))
if identifier == 'auth':
self.send(**q, auth=True)
continue
if soft:
q['event'] = 'unsubscribe'
self.send(**q)
# Resubscribe for soft start.
if soft:
for identifier, q in reversed(q_list):
self.channel_configs[identifier] = q
self.send(**q)
else:
for identifier, q in q_list:
self.channel_configs[identifier] = q | Resubscribes to all channels found in self.channel_configs.
:param soft: if True, unsubscribes first.
:return: None | Below is the the instruction that describes the task:
### Input:
Resubscribes to all channels found in self.channel_configs.
:param soft: if True, unsubscribes first.
:return: None
### Response:
def _resubscribe(self, soft=False):
"""Resubscribes to all channels found in self.channel_configs.
:param soft: if True, unsubscribes first.
:return: None
"""
# Restore non-default Bitfinex websocket configuration
if self.bitfinex_config:
self.send(**self.bitfinex_config)
q_list = []
while True:
try:
identifier, q = self.channel_configs.popitem(last=True if soft else False)
except KeyError:
break
q_list.append((identifier, q.copy()))
if identifier == 'auth':
self.send(**q, auth=True)
continue
if soft:
q['event'] = 'unsubscribe'
self.send(**q)
# Resubscribe for soft start.
if soft:
for identifier, q in reversed(q_list):
self.channel_configs[identifier] = q
self.send(**q)
else:
for identifier, q in q_list:
self.channel_configs[identifier] = q |
def dump_file_by_path(self, path, **kwargs):
"""
Returns the concrete content for a file by path.
:param path: file path as string
:param kwargs: passed to state.solver.eval
:return: file contents as string
"""
file = self.state.fs.get(path)
if file is None:
return None
return file.concretize(**kwargs) | Returns the concrete content for a file by path.
:param path: file path as string
:param kwargs: passed to state.solver.eval
:return: file contents as string | Below is the the instruction that describes the task:
### Input:
Returns the concrete content for a file by path.
:param path: file path as string
:param kwargs: passed to state.solver.eval
:return: file contents as string
### Response:
def dump_file_by_path(self, path, **kwargs):
"""
Returns the concrete content for a file by path.
:param path: file path as string
:param kwargs: passed to state.solver.eval
:return: file contents as string
"""
file = self.state.fs.get(path)
if file is None:
return None
return file.concretize(**kwargs) |
def _prepare_io_handler(self, handler):
"""Call the `interfaces.IOHandler.prepare` method and
remove the handler from unprepared handler list when done.
"""
logger.debug(" preparing handler: {0!r}".format(handler))
ret = handler.prepare()
logger.debug(" prepare result: {0!r}".format(ret))
if isinstance(ret, HandlerReady):
del self._unprepared_handlers[handler]
prepared = True
elif isinstance(ret, PrepareAgain):
if ret.timeout is not None:
if self._timeout is not None:
self._timeout = min(self._timeout, ret.timeout)
else:
self._timeout = ret.timeout
prepared = False
else:
raise TypeError("Unexpected result type from prepare()")
return prepared | Call the `interfaces.IOHandler.prepare` method and
remove the handler from unprepared handler list when done. | Below is the the instruction that describes the task:
### Input:
Call the `interfaces.IOHandler.prepare` method and
remove the handler from unprepared handler list when done.
### Response:
def _prepare_io_handler(self, handler):
"""Call the `interfaces.IOHandler.prepare` method and
remove the handler from unprepared handler list when done.
"""
logger.debug(" preparing handler: {0!r}".format(handler))
ret = handler.prepare()
logger.debug(" prepare result: {0!r}".format(ret))
if isinstance(ret, HandlerReady):
del self._unprepared_handlers[handler]
prepared = True
elif isinstance(ret, PrepareAgain):
if ret.timeout is not None:
if self._timeout is not None:
self._timeout = min(self._timeout, ret.timeout)
else:
self._timeout = ret.timeout
prepared = False
else:
raise TypeError("Unexpected result type from prepare()")
return prepared |
def fail_fast_imap(pool, call, items):
""" Run a function against each item in a given list, yielding each
function result in turn, where the function call is handled in a
:class:`~eventlet.greenthread.GreenThread` spawned by the provided pool.
If any function raises an exception, all other ongoing threads are killed,
and the exception is raised to the caller.
This function is similar to :meth:`~eventlet.greenpool.GreenPool.imap`.
:param pool: Pool to spawn function threads from
:type pool: eventlet.greenpool.GreenPool
:param call: Function call to make, expecting to receive an item from the
given list
"""
result_queue = LightQueue(maxsize=len(items))
spawned_threads = set()
def handle_result(finished_thread):
try:
thread_result = finished_thread.wait()
spawned_threads.remove(finished_thread)
result_queue.put((thread_result, None))
except Exception:
spawned_threads.remove(finished_thread)
result_queue.put((None, sys.exc_info()))
for item in items:
gt = pool.spawn(call, item)
spawned_threads.add(gt)
gt.link(handle_result)
while spawned_threads:
result, exc_info = result_queue.get()
if exc_info is not None:
# Kill all other ongoing threads
for ongoing_thread in spawned_threads:
ongoing_thread.kill()
# simply raising here (even raising a full exc_info) isn't
# sufficient to preserve the original stack trace.
# greenlet.throw() achieves this.
eventlet.getcurrent().throw(*exc_info)
yield result | Run a function against each item in a given list, yielding each
function result in turn, where the function call is handled in a
:class:`~eventlet.greenthread.GreenThread` spawned by the provided pool.
If any function raises an exception, all other ongoing threads are killed,
and the exception is raised to the caller.
This function is similar to :meth:`~eventlet.greenpool.GreenPool.imap`.
:param pool: Pool to spawn function threads from
:type pool: eventlet.greenpool.GreenPool
:param call: Function call to make, expecting to receive an item from the
given list | Below is the the instruction that describes the task:
### Input:
Run a function against each item in a given list, yielding each
function result in turn, where the function call is handled in a
:class:`~eventlet.greenthread.GreenThread` spawned by the provided pool.
If any function raises an exception, all other ongoing threads are killed,
and the exception is raised to the caller.
This function is similar to :meth:`~eventlet.greenpool.GreenPool.imap`.
:param pool: Pool to spawn function threads from
:type pool: eventlet.greenpool.GreenPool
:param call: Function call to make, expecting to receive an item from the
given list
### Response:
def fail_fast_imap(pool, call, items):
""" Run a function against each item in a given list, yielding each
function result in turn, where the function call is handled in a
:class:`~eventlet.greenthread.GreenThread` spawned by the provided pool.
If any function raises an exception, all other ongoing threads are killed,
and the exception is raised to the caller.
This function is similar to :meth:`~eventlet.greenpool.GreenPool.imap`.
:param pool: Pool to spawn function threads from
:type pool: eventlet.greenpool.GreenPool
:param call: Function call to make, expecting to receive an item from the
given list
"""
result_queue = LightQueue(maxsize=len(items))
spawned_threads = set()
def handle_result(finished_thread):
try:
thread_result = finished_thread.wait()
spawned_threads.remove(finished_thread)
result_queue.put((thread_result, None))
except Exception:
spawned_threads.remove(finished_thread)
result_queue.put((None, sys.exc_info()))
for item in items:
gt = pool.spawn(call, item)
spawned_threads.add(gt)
gt.link(handle_result)
while spawned_threads:
result, exc_info = result_queue.get()
if exc_info is not None:
# Kill all other ongoing threads
for ongoing_thread in spawned_threads:
ongoing_thread.kill()
# simply raising here (even raising a full exc_info) isn't
# sufficient to preserve the original stack trace.
# greenlet.throw() achieves this.
eventlet.getcurrent().throw(*exc_info)
yield result |
def _prob_match(self, features):
"""Compute match probabilities.
Parameters
----------
features : numpy.ndarray
The data to train the model on.
Returns
-------
numpy.ndarray
The match probabilties.
"""
# compute the probabilities
probs = self.kernel.predict_proba(features)
# get the position of match probabilities
classes = list(self.kernel.classes_)
match_class_position = classes.index(1)
return probs[:, match_class_position] | Compute match probabilities.
Parameters
----------
features : numpy.ndarray
The data to train the model on.
Returns
-------
numpy.ndarray
The match probabilties. | Below is the the instruction that describes the task:
### Input:
Compute match probabilities.
Parameters
----------
features : numpy.ndarray
The data to train the model on.
Returns
-------
numpy.ndarray
The match probabilties.
### Response:
def _prob_match(self, features):
"""Compute match probabilities.
Parameters
----------
features : numpy.ndarray
The data to train the model on.
Returns
-------
numpy.ndarray
The match probabilties.
"""
# compute the probabilities
probs = self.kernel.predict_proba(features)
# get the position of match probabilities
classes = list(self.kernel.classes_)
match_class_position = classes.index(1)
return probs[:, match_class_position] |
def find_genus(files, database, threads=12):
"""
Uses MASH to find the genus of fasta files.
:param files: File dictionary returned by filer method.
:param database: Path to reduced refseq database sketch.
:param threads: Number of threads to run mash with.
:return: genus_dict: Dictionary of genus for each sample. Will return NA if genus could not be found.
"""
genus_dict = dict()
tmpdir = str(time.time()).split('.')[-1]
if not os.path.isdir(tmpdir):
os.makedirs(tmpdir)
for file_name, fasta in files.items():
mash.screen(database, fasta,
threads=threads,
w='',
i=0.95,
output_file=os.path.join(tmpdir, 'screen.tab'))
screen_output = mash.read_mash_screen(os.path.join(tmpdir, 'screen.tab'))
try:
os.remove(os.path.join(tmpdir, 'screen.tab'))
except IOError:
pass
try:
genus = screen_output[0].query_id.split('/')[-3]
if genus == 'Shigella':
genus = 'Escherichia'
genus_dict[file_name] = genus
except IndexError:
genus_dict[file_name] = 'NA'
shutil.rmtree(tmpdir)
return genus_dict | Uses MASH to find the genus of fasta files.
:param files: File dictionary returned by filer method.
:param database: Path to reduced refseq database sketch.
:param threads: Number of threads to run mash with.
:return: genus_dict: Dictionary of genus for each sample. Will return NA if genus could not be found. | Below is the the instruction that describes the task:
### Input:
Uses MASH to find the genus of fasta files.
:param files: File dictionary returned by filer method.
:param database: Path to reduced refseq database sketch.
:param threads: Number of threads to run mash with.
:return: genus_dict: Dictionary of genus for each sample. Will return NA if genus could not be found.
### Response:
def find_genus(files, database, threads=12):
"""
Uses MASH to find the genus of fasta files.
:param files: File dictionary returned by filer method.
:param database: Path to reduced refseq database sketch.
:param threads: Number of threads to run mash with.
:return: genus_dict: Dictionary of genus for each sample. Will return NA if genus could not be found.
"""
genus_dict = dict()
tmpdir = str(time.time()).split('.')[-1]
if not os.path.isdir(tmpdir):
os.makedirs(tmpdir)
for file_name, fasta in files.items():
mash.screen(database, fasta,
threads=threads,
w='',
i=0.95,
output_file=os.path.join(tmpdir, 'screen.tab'))
screen_output = mash.read_mash_screen(os.path.join(tmpdir, 'screen.tab'))
try:
os.remove(os.path.join(tmpdir, 'screen.tab'))
except IOError:
pass
try:
genus = screen_output[0].query_id.split('/')[-3]
if genus == 'Shigella':
genus = 'Escherichia'
genus_dict[file_name] = genus
except IndexError:
genus_dict[file_name] = 'NA'
shutil.rmtree(tmpdir)
return genus_dict |
def reject(self, f, *args):
"""Like 'match', but throw a parse error if 'f' matches.
This is useful when a parser wants to be strict about specific things
being prohibited. For example, DottySQL bans the use of SQL keywords as
variable names.
"""
match = self.match(f, *args)
if match:
token = self.peek(0)
raise errors.EfilterParseError(
query=self.tokenizer.source, token=token,
message="Was not expecting a %s here." % token.name) | Like 'match', but throw a parse error if 'f' matches.
This is useful when a parser wants to be strict about specific things
being prohibited. For example, DottySQL bans the use of SQL keywords as
variable names. | Below is the the instruction that describes the task:
### Input:
Like 'match', but throw a parse error if 'f' matches.
This is useful when a parser wants to be strict about specific things
being prohibited. For example, DottySQL bans the use of SQL keywords as
variable names.
### Response:
def reject(self, f, *args):
"""Like 'match', but throw a parse error if 'f' matches.
This is useful when a parser wants to be strict about specific things
being prohibited. For example, DottySQL bans the use of SQL keywords as
variable names.
"""
match = self.match(f, *args)
if match:
token = self.peek(0)
raise errors.EfilterParseError(
query=self.tokenizer.source, token=token,
message="Was not expecting a %s here." % token.name) |
def __look_up_geom(self, geomType):
""" compares the geometry object's type verse the JSOn
specs for geometry types
Inputs:
geomType - string - geometry object's type
Returns:
string JSON geometry type or None if not an allowed type
"""
if geomType.lower() == "point":
return "esriGeometryPoint"
elif geomType.lower() == "polyline":
return "esriGeometryPolyline"
elif geomType.lower() == "polygon":
return "esriGeometryPolygon"
elif geomType.lower() == "multipoint":
return "esriGeometryMultipoint"
else:
return None | compares the geometry object's type verse the JSOn
specs for geometry types
Inputs:
geomType - string - geometry object's type
Returns:
string JSON geometry type or None if not an allowed type | Below is the the instruction that describes the task:
### Input:
compares the geometry object's type verse the JSOn
specs for geometry types
Inputs:
geomType - string - geometry object's type
Returns:
string JSON geometry type or None if not an allowed type
### Response:
def __look_up_geom(self, geomType):
""" compares the geometry object's type verse the JSOn
specs for geometry types
Inputs:
geomType - string - geometry object's type
Returns:
string JSON geometry type or None if not an allowed type
"""
if geomType.lower() == "point":
return "esriGeometryPoint"
elif geomType.lower() == "polyline":
return "esriGeometryPolyline"
elif geomType.lower() == "polygon":
return "esriGeometryPolygon"
elif geomType.lower() == "multipoint":
return "esriGeometryMultipoint"
else:
return None |
def guard_sample(analysis_request):
"""Returns whether 'sample' transition can be performed or not. Returns
True only if the analysis request has the DateSampled and Sampler set or if
the user belongs to the Samplers group
"""
if analysis_request.getDateSampled() and analysis_request.getSampler():
return True
current_user = api.get_current_user()
return "Sampler" in current_user.getRolesInContext(analysis_request) | Returns whether 'sample' transition can be performed or not. Returns
True only if the analysis request has the DateSampled and Sampler set or if
the user belongs to the Samplers group | Below is the the instruction that describes the task:
### Input:
Returns whether 'sample' transition can be performed or not. Returns
True only if the analysis request has the DateSampled and Sampler set or if
the user belongs to the Samplers group
### Response:
def guard_sample(analysis_request):
"""Returns whether 'sample' transition can be performed or not. Returns
True only if the analysis request has the DateSampled and Sampler set or if
the user belongs to the Samplers group
"""
if analysis_request.getDateSampled() and analysis_request.getSampler():
return True
current_user = api.get_current_user()
return "Sampler" in current_user.getRolesInContext(analysis_request) |
def plot_ants_plane(off_screen=False, notebook=None):
"""
Demonstrate how to create a plot class to plot multiple meshes while
adding scalars and text.
Plot two ants and airplane
"""
# load and shrink airplane
airplane = vtki.PolyData(planefile)
airplane.points /= 10
# pts = airplane.points # gets pointer to array
# pts /= 10 # shrink
# rotate and translate ant so it is on the plane
ant = vtki.PolyData(antfile)
ant.rotate_x(90)
ant.translate([90, 60, 15])
# Make a copy and add another ant
ant_copy = ant.copy()
ant_copy.translate([30, 0, -10])
# Create plotting object
plotter = vtki.Plotter(off_screen=off_screen, notebook=notebook)
plotter.add_mesh(ant, 'r')
plotter.add_mesh(ant_copy, 'b')
# Add airplane mesh and make the color equal to the Y position
plane_scalars = airplane.points[:, 1]
plotter.add_mesh(airplane, scalars=plane_scalars, stitle='Plane Y\nLocation')
plotter.add_text('Ants and Plane Example')
plotter.plot() | Demonstrate how to create a plot class to plot multiple meshes while
adding scalars and text.
Plot two ants and airplane | Below is the the instruction that describes the task:
### Input:
Demonstrate how to create a plot class to plot multiple meshes while
adding scalars and text.
Plot two ants and airplane
### Response:
def plot_ants_plane(off_screen=False, notebook=None):
"""
Demonstrate how to create a plot class to plot multiple meshes while
adding scalars and text.
Plot two ants and airplane
"""
# load and shrink airplane
airplane = vtki.PolyData(planefile)
airplane.points /= 10
# pts = airplane.points # gets pointer to array
# pts /= 10 # shrink
# rotate and translate ant so it is on the plane
ant = vtki.PolyData(antfile)
ant.rotate_x(90)
ant.translate([90, 60, 15])
# Make a copy and add another ant
ant_copy = ant.copy()
ant_copy.translate([30, 0, -10])
# Create plotting object
plotter = vtki.Plotter(off_screen=off_screen, notebook=notebook)
plotter.add_mesh(ant, 'r')
plotter.add_mesh(ant_copy, 'b')
# Add airplane mesh and make the color equal to the Y position
plane_scalars = airplane.points[:, 1]
plotter.add_mesh(airplane, scalars=plane_scalars, stitle='Plane Y\nLocation')
plotter.add_text('Ants and Plane Example')
plotter.plot() |
def migrated(name,
remote_addr,
cert,
key,
verify_cert,
src_remote_addr,
stop_and_start=False,
src_cert=None,
src_key=None,
src_verify_cert=None):
''' Ensure a container is migrated to another host
If the container is running, it either must be shut down
first (use stop_and_start=True) or criu must be installed
on the source and destination machines.
For this operation both certs need to be authenticated,
use :mod:`lxd.authenticate <salt.states.lxd.authenticate`
to authenticate your cert(s).
name :
The container to migrate
remote_addr :
An URL to the destination remote Server
Examples:
https://myserver.lan:8443
/var/lib/mysocket.sock
cert :
PEM Formatted SSL Zertifikate.
Examples:
~/.config/lxc/client.crt
key :
PEM Formatted SSL Key.
Examples:
~/.config/lxc/client.key
verify_cert : True
Wherever to verify the cert, this is by default True
but in the most cases you want to set it off as LXD
normaly uses self-signed certificates.
src_remote_addr :
An URL to the source remote Server
Examples:
https://myserver.lan:8443
/var/lib/mysocket.sock
stop_and_start:
Stop before migrating and start after
src_cert :
PEM Formatted SSL Zertifikate, if None we copy "cert"
Examples:
~/.config/lxc/client.crt
src_key :
PEM Formatted SSL Key, if None we copy "key"
Examples:
~/.config/lxc/client.key
src_verify_cert :
Wherever to verify the cert, if None we copy "verify_cert"
'''
ret = {
'name': name,
'remote_addr': remote_addr,
'cert': cert,
'key': key,
'verify_cert': verify_cert,
'src_remote_addr': src_remote_addr,
'src_and_start': stop_and_start,
'src_cert': src_cert,
'src_key': src_key,
'changes': {}
}
dest_container = None
try:
dest_container = __salt__['lxd.container_get'](
name, remote_addr, cert, key,
verify_cert, _raw=True
)
except CommandExecutionError as e:
return _error(ret, six.text_type(e))
except SaltInvocationError as e:
# Destination container not found
pass
if dest_container is not None:
return _success(
ret,
'Container "{0}" exists on the destination'.format(name)
)
if src_verify_cert is None:
src_verify_cert = verify_cert
try:
__salt__['lxd.container_get'](
name, src_remote_addr, src_cert, src_key, src_verify_cert, _raw=True
)
except CommandExecutionError as e:
return _error(ret, six.text_type(e))
except SaltInvocationError as e:
# Container not found
return _error(ret, 'Source Container "{0}" not found'.format(name))
if __opts__['test']:
ret['changes']['migrated'] = (
'Would migrate the container "{0}" from "{1}" to "{2}"'
).format(name, src_remote_addr, remote_addr)
return _unchanged(ret, ret['changes']['migrated'])
try:
__salt__['lxd.container_migrate'](
name, stop_and_start, remote_addr, cert, key,
verify_cert, src_remote_addr, src_cert, src_key, src_verify_cert
)
except CommandExecutionError as e:
return _error(ret, six.text_type(e))
ret['changes']['migrated'] = (
'Migrated the container "{0}" from "{1}" to "{2}"'
).format(name, src_remote_addr, remote_addr)
return _success(ret, ret['changes']['migrated']) | Ensure a container is migrated to another host
If the container is running, it either must be shut down
first (use stop_and_start=True) or criu must be installed
on the source and destination machines.
For this operation both certs need to be authenticated,
use :mod:`lxd.authenticate <salt.states.lxd.authenticate`
to authenticate your cert(s).
name :
The container to migrate
remote_addr :
An URL to the destination remote Server
Examples:
https://myserver.lan:8443
/var/lib/mysocket.sock
cert :
PEM Formatted SSL Zertifikate.
Examples:
~/.config/lxc/client.crt
key :
PEM Formatted SSL Key.
Examples:
~/.config/lxc/client.key
verify_cert : True
Wherever to verify the cert, this is by default True
but in the most cases you want to set it off as LXD
normaly uses self-signed certificates.
src_remote_addr :
An URL to the source remote Server
Examples:
https://myserver.lan:8443
/var/lib/mysocket.sock
stop_and_start:
Stop before migrating and start after
src_cert :
PEM Formatted SSL Zertifikate, if None we copy "cert"
Examples:
~/.config/lxc/client.crt
src_key :
PEM Formatted SSL Key, if None we copy "key"
Examples:
~/.config/lxc/client.key
src_verify_cert :
Wherever to verify the cert, if None we copy "verify_cert" | Below is the the instruction that describes the task:
### Input:
Ensure a container is migrated to another host
If the container is running, it either must be shut down
first (use stop_and_start=True) or criu must be installed
on the source and destination machines.
For this operation both certs need to be authenticated,
use :mod:`lxd.authenticate <salt.states.lxd.authenticate`
to authenticate your cert(s).
name :
The container to migrate
remote_addr :
An URL to the destination remote Server
Examples:
https://myserver.lan:8443
/var/lib/mysocket.sock
cert :
PEM Formatted SSL Zertifikate.
Examples:
~/.config/lxc/client.crt
key :
PEM Formatted SSL Key.
Examples:
~/.config/lxc/client.key
verify_cert : True
Wherever to verify the cert, this is by default True
but in the most cases you want to set it off as LXD
normaly uses self-signed certificates.
src_remote_addr :
An URL to the source remote Server
Examples:
https://myserver.lan:8443
/var/lib/mysocket.sock
stop_and_start:
Stop before migrating and start after
src_cert :
PEM Formatted SSL Zertifikate, if None we copy "cert"
Examples:
~/.config/lxc/client.crt
src_key :
PEM Formatted SSL Key, if None we copy "key"
Examples:
~/.config/lxc/client.key
src_verify_cert :
Wherever to verify the cert, if None we copy "verify_cert"
### Response:
def migrated(name,
remote_addr,
cert,
key,
verify_cert,
src_remote_addr,
stop_and_start=False,
src_cert=None,
src_key=None,
src_verify_cert=None):
''' Ensure a container is migrated to another host
If the container is running, it either must be shut down
first (use stop_and_start=True) or criu must be installed
on the source and destination machines.
For this operation both certs need to be authenticated,
use :mod:`lxd.authenticate <salt.states.lxd.authenticate`
to authenticate your cert(s).
name :
The container to migrate
remote_addr :
An URL to the destination remote Server
Examples:
https://myserver.lan:8443
/var/lib/mysocket.sock
cert :
PEM Formatted SSL Zertifikate.
Examples:
~/.config/lxc/client.crt
key :
PEM Formatted SSL Key.
Examples:
~/.config/lxc/client.key
verify_cert : True
Wherever to verify the cert, this is by default True
but in the most cases you want to set it off as LXD
normaly uses self-signed certificates.
src_remote_addr :
An URL to the source remote Server
Examples:
https://myserver.lan:8443
/var/lib/mysocket.sock
stop_and_start:
Stop before migrating and start after
src_cert :
PEM Formatted SSL Zertifikate, if None we copy "cert"
Examples:
~/.config/lxc/client.crt
src_key :
PEM Formatted SSL Key, if None we copy "key"
Examples:
~/.config/lxc/client.key
src_verify_cert :
Wherever to verify the cert, if None we copy "verify_cert"
'''
ret = {
'name': name,
'remote_addr': remote_addr,
'cert': cert,
'key': key,
'verify_cert': verify_cert,
'src_remote_addr': src_remote_addr,
'src_and_start': stop_and_start,
'src_cert': src_cert,
'src_key': src_key,
'changes': {}
}
dest_container = None
try:
dest_container = __salt__['lxd.container_get'](
name, remote_addr, cert, key,
verify_cert, _raw=True
)
except CommandExecutionError as e:
return _error(ret, six.text_type(e))
except SaltInvocationError as e:
# Destination container not found
pass
if dest_container is not None:
return _success(
ret,
'Container "{0}" exists on the destination'.format(name)
)
if src_verify_cert is None:
src_verify_cert = verify_cert
try:
__salt__['lxd.container_get'](
name, src_remote_addr, src_cert, src_key, src_verify_cert, _raw=True
)
except CommandExecutionError as e:
return _error(ret, six.text_type(e))
except SaltInvocationError as e:
# Container not found
return _error(ret, 'Source Container "{0}" not found'.format(name))
if __opts__['test']:
ret['changes']['migrated'] = (
'Would migrate the container "{0}" from "{1}" to "{2}"'
).format(name, src_remote_addr, remote_addr)
return _unchanged(ret, ret['changes']['migrated'])
try:
__salt__['lxd.container_migrate'](
name, stop_and_start, remote_addr, cert, key,
verify_cert, src_remote_addr, src_cert, src_key, src_verify_cert
)
except CommandExecutionError as e:
return _error(ret, six.text_type(e))
ret['changes']['migrated'] = (
'Migrated the container "{0}" from "{1}" to "{2}"'
).format(name, src_remote_addr, remote_addr)
return _success(ret, ret['changes']['migrated']) |
def hicpro_mapping_chart (self):
""" Generate the HiC-Pro Aligned reads plot """
# Specify the order of the different possible categories
keys = OrderedDict()
keys['Full_Alignments_Read'] = { 'color': '#005ce6', 'name': 'Full reads Alignments' }
keys['Trimmed_Alignments_Read'] = { 'color': '#3385ff', 'name': 'Trimmed reads Alignments' }
keys['Failed_To_Align_Read'] = { 'color': '#a9a2a2', 'name': 'Failed To Align' }
data = [{},{}]
for s_name in self.hicpro_data:
for r in [1,2]:
data[r-1]['{} [R{}]'.format(s_name, r)] = {
'Full_Alignments_Read': self.hicpro_data[s_name]['global_R{}'.format(r)],
'Trimmed_Alignments_Read': self.hicpro_data[s_name]['local_R{}'.format(r)],
'Failed_To_Align_Read': int(self.hicpro_data[s_name]['total_R{}'.format(r)]) - int(self.hicpro_data[s_name]['mapped_R{}'.format(r)])
}
# Config for the plot
config = {
'id': 'hicpro_mapping_stats_plot',
'title': 'HiC-Pro: Mapping Statistics',
'ylab': '# Reads',
'ylab': '# Reads: Read 1',
'data_labels': [
{'name': 'Read 1', 'ylab': '# Reads: Read 1'},
{'name': 'Read 2', 'ylab': '# Reads: Read 2'}
]
}
return bargraph.plot(data, [keys, keys], config) | Generate the HiC-Pro Aligned reads plot | Below is the the instruction that describes the task:
### Input:
Generate the HiC-Pro Aligned reads plot
### Response:
def hicpro_mapping_chart (self):
""" Generate the HiC-Pro Aligned reads plot """
# Specify the order of the different possible categories
keys = OrderedDict()
keys['Full_Alignments_Read'] = { 'color': '#005ce6', 'name': 'Full reads Alignments' }
keys['Trimmed_Alignments_Read'] = { 'color': '#3385ff', 'name': 'Trimmed reads Alignments' }
keys['Failed_To_Align_Read'] = { 'color': '#a9a2a2', 'name': 'Failed To Align' }
data = [{},{}]
for s_name in self.hicpro_data:
for r in [1,2]:
data[r-1]['{} [R{}]'.format(s_name, r)] = {
'Full_Alignments_Read': self.hicpro_data[s_name]['global_R{}'.format(r)],
'Trimmed_Alignments_Read': self.hicpro_data[s_name]['local_R{}'.format(r)],
'Failed_To_Align_Read': int(self.hicpro_data[s_name]['total_R{}'.format(r)]) - int(self.hicpro_data[s_name]['mapped_R{}'.format(r)])
}
# Config for the plot
config = {
'id': 'hicpro_mapping_stats_plot',
'title': 'HiC-Pro: Mapping Statistics',
'ylab': '# Reads',
'ylab': '# Reads: Read 1',
'data_labels': [
{'name': 'Read 1', 'ylab': '# Reads: Read 1'},
{'name': 'Read 2', 'ylab': '# Reads: Read 2'}
]
}
return bargraph.plot(data, [keys, keys], config) |
def find(soup, name=None, attrs=None, recursive=True, text=None, **kwargs):
"""Modified find method; see `find_all`, above.
"""
tags = find_all(
soup, name, attrs or {}, recursive, text, 1, **kwargs
)
if tags:
return tags[0] | Modified find method; see `find_all`, above. | Below is the the instruction that describes the task:
### Input:
Modified find method; see `find_all`, above.
### Response:
def find(soup, name=None, attrs=None, recursive=True, text=None, **kwargs):
"""Modified find method; see `find_all`, above.
"""
tags = find_all(
soup, name, attrs or {}, recursive, text, 1, **kwargs
)
if tags:
return tags[0] |
def timestamp_pb(self):
"""Return a timestamp message.
Returns:
(:class:`~google.protobuf.timestamp_pb2.Timestamp`): Timestamp message
"""
inst = self if self.tzinfo is not None else self.replace(tzinfo=pytz.UTC)
delta = inst - _UTC_EPOCH
seconds = int(delta.total_seconds())
nanos = self._nanosecond or self.microsecond * 1000
return timestamp_pb2.Timestamp(seconds=seconds, nanos=nanos) | Return a timestamp message.
Returns:
(:class:`~google.protobuf.timestamp_pb2.Timestamp`): Timestamp message | Below is the the instruction that describes the task:
### Input:
Return a timestamp message.
Returns:
(:class:`~google.protobuf.timestamp_pb2.Timestamp`): Timestamp message
### Response:
def timestamp_pb(self):
"""Return a timestamp message.
Returns:
(:class:`~google.protobuf.timestamp_pb2.Timestamp`): Timestamp message
"""
inst = self if self.tzinfo is not None else self.replace(tzinfo=pytz.UTC)
delta = inst - _UTC_EPOCH
seconds = int(delta.total_seconds())
nanos = self._nanosecond or self.microsecond * 1000
return timestamp_pb2.Timestamp(seconds=seconds, nanos=nanos) |
def bind(cls):
"""
Bind the buttons to adapter's event handler.
"""
super(cls, cls).bind()
cls.search_btn_el.bind("click", cls.start)
cls.input_el.bind("keypress", func_on_enter(cls.start)) | Bind the buttons to adapter's event handler. | Below is the the instruction that describes the task:
### Input:
Bind the buttons to adapter's event handler.
### Response:
def bind(cls):
"""
Bind the buttons to adapter's event handler.
"""
super(cls, cls).bind()
cls.search_btn_el.bind("click", cls.start)
cls.input_el.bind("keypress", func_on_enter(cls.start)) |
def error_response(self, e):
"""Make response for an IIIFError e.
Also add compliance header.
"""
self.add_compliance_header()
return self.make_response(*e.image_server_response(self.api_version)) | Make response for an IIIFError e.
Also add compliance header. | Below is the the instruction that describes the task:
### Input:
Make response for an IIIFError e.
Also add compliance header.
### Response:
def error_response(self, e):
"""Make response for an IIIFError e.
Also add compliance header.
"""
self.add_compliance_header()
return self.make_response(*e.image_server_response(self.api_version)) |
def run(self):
"""A bit bulky atm..."""
self.close_connection = False
try:
while True:
self.started_response = False
self.status = ""
self.outheaders = []
self.sent_headers = False
self.chunked_write = False
self.write_buffer = StringIO.StringIO()
self.content_length = None
# Copy the class environ into self.
ENVIRON = self.environ = self.connection_environ.copy()
self.environ.update(self.server_environ)
request_line = yield self.connfh.readline()
if request_line == "\r\n":
# RFC 2616 sec 4.1: "... it should ignore the CRLF."
tolerance = 5
while tolerance and request_line == "\r\n":
request_line = yield self.connfh.readline()
tolerance -= 1
if not tolerance:
return
method, path, req_protocol = request_line.strip().split(" ", 2)
ENVIRON["REQUEST_METHOD"] = method
ENVIRON["CONTENT_LENGTH"] = ''
scheme, location, path, params, qs, frag = urlparse(path)
if frag:
yield self.simple_response("400 Bad Request",
"Illegal #fragment in Request-URI.")
return
if scheme:
ENVIRON["wsgi.url_scheme"] = scheme
if params:
path = path + ";" + params
ENVIRON["SCRIPT_NAME"] = ""
# Unquote the path+params (e.g. "/this%20path" -> "this path").
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec5.html#sec5.1.2
#
# But note that "...a URI must be separated into its components
# before the escaped characters within those components can be
# safely decoded." http://www.ietf.org/rfc/rfc2396.txt, sec 2.4.2
atoms = [unquote(x) for x in quoted_slash.split(path)]
path = "%2F".join(atoms)
ENVIRON["PATH_INFO"] = path
# Note that, like wsgiref and most other WSGI servers,
# we unquote the path but not the query string.
ENVIRON["QUERY_STRING"] = qs
# Compare request and server HTTP protocol versions, in case our
# server does not support the requested protocol. Limit our output
# to min(req, server). We want the following output:
# request server actual written supported response
# protocol protocol response protocol feature set
# a 1.0 1.0 1.0 1.0
# b 1.0 1.1 1.1 1.0
# c 1.1 1.0 1.0 1.0
# d 1.1 1.1 1.1 1.1
# Notice that, in (b), the response will be "HTTP/1.1" even though
# the client only understands 1.0. RFC 2616 10.5.6 says we should
# only return 505 if the _major_ version is different.
rp = int(req_protocol[5]), int(req_protocol[7])
server_protocol = ENVIRON["ACTUAL_SERVER_PROTOCOL"]
sp = int(server_protocol[5]), int(server_protocol[7])
if sp[0] != rp[0]:
yield self.simple_response("505 HTTP Version Not Supported")
return
# Bah. "SERVER_PROTOCOL" is actually the REQUEST protocol.
ENVIRON["SERVER_PROTOCOL"] = req_protocol
self.response_protocol = "HTTP/%s.%s" % min(rp, sp)
# If the Request-URI was an absoluteURI, use its location atom.
if location:
ENVIRON["SERVER_NAME"] = location
# then all the http headers
try:
while True:
line = yield self.connfh.readline()
if line == '\r\n':
# Normal end of headers
break
if line[0] in ' \t':
# It's a continuation line.
v = line.strip()
else:
k, v = line.split(":", 1)
k, v = k.strip().upper(), v.strip()
envname = "HTTP_" + k.replace("-", "_")
if k in comma_separated_headers:
existing = ENVIRON.get(envname)
if existing:
v = ", ".join((existing, v))
ENVIRON[envname] = v
ct = ENVIRON.pop("HTTP_CONTENT_TYPE", None)
if ct:
ENVIRON["CONTENT_TYPE"] = ct
cl = ENVIRON.pop("HTTP_CONTENT_LENGTH", None)
if cl:
ENVIRON["CONTENT_LENGTH"] = cl
except ValueError, ex:
yield self.simple_response("400 Bad Request", repr(ex.args))
return
creds = ENVIRON.get("HTTP_AUTHORIZATION", "").split(" ", 1)
ENVIRON["AUTH_TYPE"] = creds[0]
if creds[0].lower() == 'basic':
user, pw = base64.decodestring(creds[1]).split(":", 1)
ENVIRON["REMOTE_USER"] = user
# Persistent connection support
if req_protocol == "HTTP/1.1":
if ENVIRON.get("HTTP_CONNECTION", "") == "close":
self.close_connection = True
else:
# HTTP/1.0
if ENVIRON.get("HTTP_CONNECTION", "").lower() != "keep-alive":
self.close_connection = True
# Transfer-Encoding support
te = None
if self.response_protocol == "HTTP/1.1":
te = ENVIRON.get("HTTP_TRANSFER_ENCODING")
if te:
te = [x.strip().lower() for x in te.split(",") if x.strip()]
if te:
# reject transfer encodings for now
yield self.simple_response("501 Unimplemented")
self.close_connection = True
return
ENV_COGEN_PROXY = ENVIRON['cogen.wsgi'] = async.COGENProxy(
content_length = int(ENVIRON.get('CONTENT_LENGTH', None) or 0) or None,
read_count = 0,
operation = None,
result = None,
exception = None
)
ENVIRON['cogen.http_connection'] = self
ENVIRON['cogen.core'] = async.COGENOperationWrapper(
ENV_COGEN_PROXY,
core
)
ENVIRON['cogen.call'] = async.COGENCallWrapper(ENV_COGEN_PROXY)
ENVIRON['cogen.input'] = async.COGENOperationWrapper(
ENV_COGEN_PROXY,
self.connfh
)
ENVIRON['cogen.yield'] = async.COGENSimpleWrapper(ENV_COGEN_PROXY)
response = self.wsgi_app(ENVIRON, self.start_response)
#~ print 'WSGI RESPONSE:', response
try:
if isinstance(response, WSGIFileWrapper):
# set tcp_cork to pack the header with the file data
if hasattr(socket, "TCP_CORK"):
self.conn.setsockopt(socket.IPPROTO_TCP, socket.TCP_CORK, 1)
assert self.started_response, "App returned the wsgi.file_wrapper but didn't call start_response."
assert not self.sent_headers
self.sent_headers = True
yield sockets.SendAll(self.conn,
self.render_headers()+self.write_buffer.getvalue()
)
offset = response.filelike.tell()
if self.chunked_write:
fsize = os.fstat(response.filelike.fileno()).st_size
yield sockets.SendAll(self.conn, hex(int(fsize-offset))+"\r\n")
yield self.conn.sendfile(
response.filelike,
blocksize=response.blocksize,
offset=offset,
length=self.content_length,
timeout=self.sendfile_timeout
)
if self.chunked_write:
yield sockets.SendAll(self.conn, "\r\n")
# also, tcp_cork will make the file data sent on packet boundaries,
# wich is a good thing
if hasattr(socket, "TCP_CORK"):
self.conn.setsockopt(socket.IPPROTO_TCP, socket.TCP_CORK, 0)
else:
for chunk in response:
if chunk:
assert self.started_response, "App sended a value but hasn't called start_response."
if not self.sent_headers:
self.sent_headers = True
headers = [self.render_headers(), self.write_buffer.getvalue()]
else:
headers = []
if self.chunked_write:
buf = [hex(len(chunk))[2:], "\r\n", chunk, "\r\n"]
if headers:
headers.extend(buf)
yield sockets.SendAll(self.conn, "".join(headers))
else:
yield sockets.SendAll(self.conn, "".join(buf))
else:
if headers:
headers.append(chunk)
yield sockets.SendAll(self.conn, "".join(headers))
else:
yield sockets.SendAll(self.conn, chunk)
else:
if self.started_response:
if not self.sent_headers:
self.sent_headers = True
yield sockets.SendAll(self.conn,
self.render_headers()+self.write_buffer.getvalue())
if ENV_COGEN_PROXY.operation:
op = ENV_COGEN_PROXY.operation
ENV_COGEN_PROXY.operation = None
try:
#~ print 'WSGI OP:', op
ENV_COGEN_PROXY.exception = None
ENV_COGEN_PROXY.result = yield op
#~ print 'WSGI OP RESULT:',ENVIRON['cogen.wsgi'].result
except:
#~ print 'WSGI OP EXCEPTION:', sys.exc_info()
ENV_COGEN_PROXY.exception = sys.exc_info()
ENV_COGEN_PROXY.result = ENV_COGEN_PROXY.exception[1]
del op
finally:
if hasattr(response, 'close'):
response.close()
if self.started_response:
if not self.sent_headers:
self.sent_headers = True
yield sockets.SendAll(self.conn,
self.render_headers()+self.write_buffer.getvalue()
)
else:
import warnings
warnings.warn("App was consumed and hasn't called start_response")
if self.chunked_write:
yield sockets.SendAll(self.conn, "0\r\n\r\n")
if self.close_connection:
return
# TODO: consume any unread data
except (socket.error, OSError, pywinerror), e:
errno = e.args[0]
if errno not in useless_socket_errors:
yield self.simple_response("500 Internal Server Error",
format_exc())
return
except (OperationTimeout, ConnectionClosed, SocketError):
return
except (KeyboardInterrupt, SystemExit, GeneratorExit, MemoryError):
raise
except:
if not self.started_response:
yield self.simple_response(
"500 Internal Server Error",
format_exc()
)
else:
print "*" * 60
traceback.print_exc()
print "*" * 60
sys.exc_clear()
finally:
self.conn.close()
ENVIRON = self.environ = None | A bit bulky atm... | Below is the the instruction that describes the task:
### Input:
A bit bulky atm...
### Response:
def run(self):
"""A bit bulky atm..."""
self.close_connection = False
try:
while True:
self.started_response = False
self.status = ""
self.outheaders = []
self.sent_headers = False
self.chunked_write = False
self.write_buffer = StringIO.StringIO()
self.content_length = None
# Copy the class environ into self.
ENVIRON = self.environ = self.connection_environ.copy()
self.environ.update(self.server_environ)
request_line = yield self.connfh.readline()
if request_line == "\r\n":
# RFC 2616 sec 4.1: "... it should ignore the CRLF."
tolerance = 5
while tolerance and request_line == "\r\n":
request_line = yield self.connfh.readline()
tolerance -= 1
if not tolerance:
return
method, path, req_protocol = request_line.strip().split(" ", 2)
ENVIRON["REQUEST_METHOD"] = method
ENVIRON["CONTENT_LENGTH"] = ''
scheme, location, path, params, qs, frag = urlparse(path)
if frag:
yield self.simple_response("400 Bad Request",
"Illegal #fragment in Request-URI.")
return
if scheme:
ENVIRON["wsgi.url_scheme"] = scheme
if params:
path = path + ";" + params
ENVIRON["SCRIPT_NAME"] = ""
# Unquote the path+params (e.g. "/this%20path" -> "this path").
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec5.html#sec5.1.2
#
# But note that "...a URI must be separated into its components
# before the escaped characters within those components can be
# safely decoded." http://www.ietf.org/rfc/rfc2396.txt, sec 2.4.2
atoms = [unquote(x) for x in quoted_slash.split(path)]
path = "%2F".join(atoms)
ENVIRON["PATH_INFO"] = path
# Note that, like wsgiref and most other WSGI servers,
# we unquote the path but not the query string.
ENVIRON["QUERY_STRING"] = qs
# Compare request and server HTTP protocol versions, in case our
# server does not support the requested protocol. Limit our output
# to min(req, server). We want the following output:
# request server actual written supported response
# protocol protocol response protocol feature set
# a 1.0 1.0 1.0 1.0
# b 1.0 1.1 1.1 1.0
# c 1.1 1.0 1.0 1.0
# d 1.1 1.1 1.1 1.1
# Notice that, in (b), the response will be "HTTP/1.1" even though
# the client only understands 1.0. RFC 2616 10.5.6 says we should
# only return 505 if the _major_ version is different.
rp = int(req_protocol[5]), int(req_protocol[7])
server_protocol = ENVIRON["ACTUAL_SERVER_PROTOCOL"]
sp = int(server_protocol[5]), int(server_protocol[7])
if sp[0] != rp[0]:
yield self.simple_response("505 HTTP Version Not Supported")
return
# Bah. "SERVER_PROTOCOL" is actually the REQUEST protocol.
ENVIRON["SERVER_PROTOCOL"] = req_protocol
self.response_protocol = "HTTP/%s.%s" % min(rp, sp)
# If the Request-URI was an absoluteURI, use its location atom.
if location:
ENVIRON["SERVER_NAME"] = location
# then all the http headers
try:
while True:
line = yield self.connfh.readline()
if line == '\r\n':
# Normal end of headers
break
if line[0] in ' \t':
# It's a continuation line.
v = line.strip()
else:
k, v = line.split(":", 1)
k, v = k.strip().upper(), v.strip()
envname = "HTTP_" + k.replace("-", "_")
if k in comma_separated_headers:
existing = ENVIRON.get(envname)
if existing:
v = ", ".join((existing, v))
ENVIRON[envname] = v
ct = ENVIRON.pop("HTTP_CONTENT_TYPE", None)
if ct:
ENVIRON["CONTENT_TYPE"] = ct
cl = ENVIRON.pop("HTTP_CONTENT_LENGTH", None)
if cl:
ENVIRON["CONTENT_LENGTH"] = cl
except ValueError, ex:
yield self.simple_response("400 Bad Request", repr(ex.args))
return
creds = ENVIRON.get("HTTP_AUTHORIZATION", "").split(" ", 1)
ENVIRON["AUTH_TYPE"] = creds[0]
if creds[0].lower() == 'basic':
user, pw = base64.decodestring(creds[1]).split(":", 1)
ENVIRON["REMOTE_USER"] = user
# Persistent connection support
if req_protocol == "HTTP/1.1":
if ENVIRON.get("HTTP_CONNECTION", "") == "close":
self.close_connection = True
else:
# HTTP/1.0
if ENVIRON.get("HTTP_CONNECTION", "").lower() != "keep-alive":
self.close_connection = True
# Transfer-Encoding support
te = None
if self.response_protocol == "HTTP/1.1":
te = ENVIRON.get("HTTP_TRANSFER_ENCODING")
if te:
te = [x.strip().lower() for x in te.split(",") if x.strip()]
if te:
# reject transfer encodings for now
yield self.simple_response("501 Unimplemented")
self.close_connection = True
return
ENV_COGEN_PROXY = ENVIRON['cogen.wsgi'] = async.COGENProxy(
content_length = int(ENVIRON.get('CONTENT_LENGTH', None) or 0) or None,
read_count = 0,
operation = None,
result = None,
exception = None
)
ENVIRON['cogen.http_connection'] = self
ENVIRON['cogen.core'] = async.COGENOperationWrapper(
ENV_COGEN_PROXY,
core
)
ENVIRON['cogen.call'] = async.COGENCallWrapper(ENV_COGEN_PROXY)
ENVIRON['cogen.input'] = async.COGENOperationWrapper(
ENV_COGEN_PROXY,
self.connfh
)
ENVIRON['cogen.yield'] = async.COGENSimpleWrapper(ENV_COGEN_PROXY)
response = self.wsgi_app(ENVIRON, self.start_response)
#~ print 'WSGI RESPONSE:', response
try:
if isinstance(response, WSGIFileWrapper):
# set tcp_cork to pack the header with the file data
if hasattr(socket, "TCP_CORK"):
self.conn.setsockopt(socket.IPPROTO_TCP, socket.TCP_CORK, 1)
assert self.started_response, "App returned the wsgi.file_wrapper but didn't call start_response."
assert not self.sent_headers
self.sent_headers = True
yield sockets.SendAll(self.conn,
self.render_headers()+self.write_buffer.getvalue()
)
offset = response.filelike.tell()
if self.chunked_write:
fsize = os.fstat(response.filelike.fileno()).st_size
yield sockets.SendAll(self.conn, hex(int(fsize-offset))+"\r\n")
yield self.conn.sendfile(
response.filelike,
blocksize=response.blocksize,
offset=offset,
length=self.content_length,
timeout=self.sendfile_timeout
)
if self.chunked_write:
yield sockets.SendAll(self.conn, "\r\n")
# also, tcp_cork will make the file data sent on packet boundaries,
# wich is a good thing
if hasattr(socket, "TCP_CORK"):
self.conn.setsockopt(socket.IPPROTO_TCP, socket.TCP_CORK, 0)
else:
for chunk in response:
if chunk:
assert self.started_response, "App sended a value but hasn't called start_response."
if not self.sent_headers:
self.sent_headers = True
headers = [self.render_headers(), self.write_buffer.getvalue()]
else:
headers = []
if self.chunked_write:
buf = [hex(len(chunk))[2:], "\r\n", chunk, "\r\n"]
if headers:
headers.extend(buf)
yield sockets.SendAll(self.conn, "".join(headers))
else:
yield sockets.SendAll(self.conn, "".join(buf))
else:
if headers:
headers.append(chunk)
yield sockets.SendAll(self.conn, "".join(headers))
else:
yield sockets.SendAll(self.conn, chunk)
else:
if self.started_response:
if not self.sent_headers:
self.sent_headers = True
yield sockets.SendAll(self.conn,
self.render_headers()+self.write_buffer.getvalue())
if ENV_COGEN_PROXY.operation:
op = ENV_COGEN_PROXY.operation
ENV_COGEN_PROXY.operation = None
try:
#~ print 'WSGI OP:', op
ENV_COGEN_PROXY.exception = None
ENV_COGEN_PROXY.result = yield op
#~ print 'WSGI OP RESULT:',ENVIRON['cogen.wsgi'].result
except:
#~ print 'WSGI OP EXCEPTION:', sys.exc_info()
ENV_COGEN_PROXY.exception = sys.exc_info()
ENV_COGEN_PROXY.result = ENV_COGEN_PROXY.exception[1]
del op
finally:
if hasattr(response, 'close'):
response.close()
if self.started_response:
if not self.sent_headers:
self.sent_headers = True
yield sockets.SendAll(self.conn,
self.render_headers()+self.write_buffer.getvalue()
)
else:
import warnings
warnings.warn("App was consumed and hasn't called start_response")
if self.chunked_write:
yield sockets.SendAll(self.conn, "0\r\n\r\n")
if self.close_connection:
return
# TODO: consume any unread data
except (socket.error, OSError, pywinerror), e:
errno = e.args[0]
if errno not in useless_socket_errors:
yield self.simple_response("500 Internal Server Error",
format_exc())
return
except (OperationTimeout, ConnectionClosed, SocketError):
return
except (KeyboardInterrupt, SystemExit, GeneratorExit, MemoryError):
raise
except:
if not self.started_response:
yield self.simple_response(
"500 Internal Server Error",
format_exc()
)
else:
print "*" * 60
traceback.print_exc()
print "*" * 60
sys.exc_clear()
finally:
self.conn.close()
ENVIRON = self.environ = None |
def _make_key(self):
"""Make a key for caching files in the LRU cache."""
value = (self._opener,
self._args,
'a' if self._mode == 'w' else self._mode,
tuple(sorted(self._kwargs.items())))
return _HashedSequence(value) | Make a key for caching files in the LRU cache. | Below is the the instruction that describes the task:
### Input:
Make a key for caching files in the LRU cache.
### Response:
def _make_key(self):
"""Make a key for caching files in the LRU cache."""
value = (self._opener,
self._args,
'a' if self._mode == 'w' else self._mode,
tuple(sorted(self._kwargs.items())))
return _HashedSequence(value) |
def word2vec(
train,
output,
size=100,
window=5,
sample="1e-3",
hs=0,
negative=5,
threads=12,
iter_=5,
min_count=5,
alpha=0.025,
debug=2,
binary=1,
cbow=1,
save_vocab=None,
read_vocab=None,
verbose=False,
):
"""
word2vec execution
Parameters for training:
train <file>
Use text data from <file> to train the model
output <file>
Use <file> to save the resulting word vectors / word clusters
size <int>
Set size of word vectors; default is 100
window <int>
Set max skip length between words; default is 5
sample <float>
Set threshold for occurrence of words. Those that appear with
higher frequency in the training data will be randomly
down-sampled; default is 0 (off), useful value is 1e-5
hs <int>
Use Hierarchical Softmax; default is 1 (0 = not used)
negative <int>
Number of negative examples; default is 0, common values are 5 - 10
(0 = not used)
threads <int>
Use <int> threads (default 1)
min_count <int>
This will discard words that appear less than <int> times; default
is 5
alpha <float>
Set the starting learning rate; default is 0.025
debug <int>
Set the debug mode (default = 2 = more info during training)
binary <int>
Save the resulting vectors in binary moded; default is 0 (off)
cbow <int>
Use the continuous back of words model; default is 1 (use 0 for
skip-gram model)
save_vocab <file>
The vocabulary will be saved to <file>
read_vocab <file>
The vocabulary will be read from <file>, not constructed from the
training data
verbose
Print output from training
"""
command = ["word2vec"]
args = [
"-train",
"-output",
"-size",
"-window",
"-sample",
"-hs",
"-negative",
"-threads",
"-iter",
"-min-count",
"-alpha",
"-debug",
"-binary",
"-cbow",
]
values = [
train,
output,
size,
window,
sample,
hs,
negative,
threads,
iter_,
min_count,
alpha,
debug,
binary,
cbow,
]
for arg, value in zip(args, values):
command.append(arg)
command.append(str(value))
if save_vocab is not None:
command.append("-save-vocab")
command.append(str(save_vocab))
if read_vocab is not None:
command.append("-read-vocab")
command.append(str(read_vocab))
run_cmd(command, verbose=verbose) | word2vec execution
Parameters for training:
train <file>
Use text data from <file> to train the model
output <file>
Use <file> to save the resulting word vectors / word clusters
size <int>
Set size of word vectors; default is 100
window <int>
Set max skip length between words; default is 5
sample <float>
Set threshold for occurrence of words. Those that appear with
higher frequency in the training data will be randomly
down-sampled; default is 0 (off), useful value is 1e-5
hs <int>
Use Hierarchical Softmax; default is 1 (0 = not used)
negative <int>
Number of negative examples; default is 0, common values are 5 - 10
(0 = not used)
threads <int>
Use <int> threads (default 1)
min_count <int>
This will discard words that appear less than <int> times; default
is 5
alpha <float>
Set the starting learning rate; default is 0.025
debug <int>
Set the debug mode (default = 2 = more info during training)
binary <int>
Save the resulting vectors in binary moded; default is 0 (off)
cbow <int>
Use the continuous back of words model; default is 1 (use 0 for
skip-gram model)
save_vocab <file>
The vocabulary will be saved to <file>
read_vocab <file>
The vocabulary will be read from <file>, not constructed from the
training data
verbose
Print output from training | Below is the the instruction that describes the task:
### Input:
word2vec execution
Parameters for training:
train <file>
Use text data from <file> to train the model
output <file>
Use <file> to save the resulting word vectors / word clusters
size <int>
Set size of word vectors; default is 100
window <int>
Set max skip length between words; default is 5
sample <float>
Set threshold for occurrence of words. Those that appear with
higher frequency in the training data will be randomly
down-sampled; default is 0 (off), useful value is 1e-5
hs <int>
Use Hierarchical Softmax; default is 1 (0 = not used)
negative <int>
Number of negative examples; default is 0, common values are 5 - 10
(0 = not used)
threads <int>
Use <int> threads (default 1)
min_count <int>
This will discard words that appear less than <int> times; default
is 5
alpha <float>
Set the starting learning rate; default is 0.025
debug <int>
Set the debug mode (default = 2 = more info during training)
binary <int>
Save the resulting vectors in binary moded; default is 0 (off)
cbow <int>
Use the continuous back of words model; default is 1 (use 0 for
skip-gram model)
save_vocab <file>
The vocabulary will be saved to <file>
read_vocab <file>
The vocabulary will be read from <file>, not constructed from the
training data
verbose
Print output from training
### Response:
def word2vec(
train,
output,
size=100,
window=5,
sample="1e-3",
hs=0,
negative=5,
threads=12,
iter_=5,
min_count=5,
alpha=0.025,
debug=2,
binary=1,
cbow=1,
save_vocab=None,
read_vocab=None,
verbose=False,
):
"""
word2vec execution
Parameters for training:
train <file>
Use text data from <file> to train the model
output <file>
Use <file> to save the resulting word vectors / word clusters
size <int>
Set size of word vectors; default is 100
window <int>
Set max skip length between words; default is 5
sample <float>
Set threshold for occurrence of words. Those that appear with
higher frequency in the training data will be randomly
down-sampled; default is 0 (off), useful value is 1e-5
hs <int>
Use Hierarchical Softmax; default is 1 (0 = not used)
negative <int>
Number of negative examples; default is 0, common values are 5 - 10
(0 = not used)
threads <int>
Use <int> threads (default 1)
min_count <int>
This will discard words that appear less than <int> times; default
is 5
alpha <float>
Set the starting learning rate; default is 0.025
debug <int>
Set the debug mode (default = 2 = more info during training)
binary <int>
Save the resulting vectors in binary moded; default is 0 (off)
cbow <int>
Use the continuous back of words model; default is 1 (use 0 for
skip-gram model)
save_vocab <file>
The vocabulary will be saved to <file>
read_vocab <file>
The vocabulary will be read from <file>, not constructed from the
training data
verbose
Print output from training
"""
command = ["word2vec"]
args = [
"-train",
"-output",
"-size",
"-window",
"-sample",
"-hs",
"-negative",
"-threads",
"-iter",
"-min-count",
"-alpha",
"-debug",
"-binary",
"-cbow",
]
values = [
train,
output,
size,
window,
sample,
hs,
negative,
threads,
iter_,
min_count,
alpha,
debug,
binary,
cbow,
]
for arg, value in zip(args, values):
command.append(arg)
command.append(str(value))
if save_vocab is not None:
command.append("-save-vocab")
command.append(str(save_vocab))
if read_vocab is not None:
command.append("-read-vocab")
command.append(str(read_vocab))
run_cmd(command, verbose=verbose) |
def get(self, name, default=None):
'''
Gets the object for "name", or None if there's no such object. If
"default" is provided, return it if no object is found.
'''
session = self.__get_session_from_db()
return session.get(name, default) | Gets the object for "name", or None if there's no such object. If
"default" is provided, return it if no object is found. | Below is the the instruction that describes the task:
### Input:
Gets the object for "name", or None if there's no such object. If
"default" is provided, return it if no object is found.
### Response:
def get(self, name, default=None):
'''
Gets the object for "name", or None if there's no such object. If
"default" is provided, return it if no object is found.
'''
session = self.__get_session_from_db()
return session.get(name, default) |
def get_computation(self,
message: Message,
transaction_context: 'BaseTransactionContext') -> 'BaseComputation':
"""
Return a computation instance for the given `message` and `transaction_context`
"""
if self.computation_class is None:
raise AttributeError("No `computation_class` has been set for this State")
else:
computation = self.computation_class(self, message, transaction_context)
return computation | Return a computation instance for the given `message` and `transaction_context` | Below is the the instruction that describes the task:
### Input:
Return a computation instance for the given `message` and `transaction_context`
### Response:
def get_computation(self,
message: Message,
transaction_context: 'BaseTransactionContext') -> 'BaseComputation':
"""
Return a computation instance for the given `message` and `transaction_context`
"""
if self.computation_class is None:
raise AttributeError("No `computation_class` has been set for this State")
else:
computation = self.computation_class(self, message, transaction_context)
return computation |
def _GetUsernameFromProfilePath(self, path):
"""Retrieves the username from a Windows profile path.
Trailing path path segment are ignored.
Args:
path (str): a Windows path with '\\' as path segment separator.
Returns:
str: basename which is the last path segment.
"""
# Strip trailing key separators.
while path and path[-1] == '\\':
path = path[:-1]
if path:
_, _, path = path.rpartition('\\')
return path | Retrieves the username from a Windows profile path.
Trailing path path segment are ignored.
Args:
path (str): a Windows path with '\\' as path segment separator.
Returns:
str: basename which is the last path segment. | Below is the the instruction that describes the task:
### Input:
Retrieves the username from a Windows profile path.
Trailing path path segment are ignored.
Args:
path (str): a Windows path with '\\' as path segment separator.
Returns:
str: basename which is the last path segment.
### Response:
def _GetUsernameFromProfilePath(self, path):
"""Retrieves the username from a Windows profile path.
Trailing path path segment are ignored.
Args:
path (str): a Windows path with '\\' as path segment separator.
Returns:
str: basename which is the last path segment.
"""
# Strip trailing key separators.
while path and path[-1] == '\\':
path = path[:-1]
if path:
_, _, path = path.rpartition('\\')
return path |
def is_depsignal_handler(class_, signal_name, cb, *, defer=False):
"""
Return true if `cb` has been decorated with :func:`depsignal` for the given
signal, class and connection mode.
"""
try:
handlers = get_magic_attr(cb)
except AttributeError:
return False
return _depsignal_spec(class_, signal_name, cb, defer) in handlers | Return true if `cb` has been decorated with :func:`depsignal` for the given
signal, class and connection mode. | Below is the the instruction that describes the task:
### Input:
Return true if `cb` has been decorated with :func:`depsignal` for the given
signal, class and connection mode.
### Response:
def is_depsignal_handler(class_, signal_name, cb, *, defer=False):
"""
Return true if `cb` has been decorated with :func:`depsignal` for the given
signal, class and connection mode.
"""
try:
handlers = get_magic_attr(cb)
except AttributeError:
return False
return _depsignal_spec(class_, signal_name, cb, defer) in handlers |
def _handle_next_task(self):
"""
We have to catch three ways a task can be "done":
1. normal execution: the task runs/fails and puts a result back on the queue,
2. new dependencies: the task yielded new deps that were not complete and
will be rescheduled and dependencies added,
3. child process dies: we need to catch this separately.
"""
self._idle_since = None
while True:
self._purge_children() # Deal with subprocess failures
try:
task_id, status, expl, missing, new_requirements = (
self._task_result_queue.get(
timeout=self._config.wait_interval))
except Queue.Empty:
return
task = self._scheduled_tasks[task_id]
if not task or task_id not in self._running_tasks:
continue
# Not a running task. Probably already removed.
# Maybe it yielded something?
# external task if run not implemented, retry-able if config option is enabled.
external_task_retryable = _is_external(task) and self._config.retry_external_tasks
if status == FAILED and not external_task_retryable:
self._email_task_failure(task, expl)
new_deps = []
if new_requirements:
new_req = [load_task(module, name, params)
for module, name, params in new_requirements]
for t in new_req:
self.add(t)
new_deps = [t.task_id for t in new_req]
self._add_task(worker=self._id,
task_id=task_id,
status=status,
expl=json.dumps(expl),
resources=task.process_resources(),
runnable=None,
params=task.to_str_params(),
family=task.task_family,
module=task.task_module,
new_deps=new_deps,
assistant=self._assistant,
retry_policy_dict=_get_retry_policy_dict(task))
self._running_tasks.pop(task_id)
# re-add task to reschedule missing dependencies
if missing:
reschedule = True
# keep out of infinite loops by not rescheduling too many times
for task_id in missing:
self.unfulfilled_counts[task_id] += 1
if (self.unfulfilled_counts[task_id] >
self._config.max_reschedules):
reschedule = False
if reschedule:
self.add(task)
self.run_succeeded &= (status == DONE) or (len(new_deps) > 0)
return | We have to catch three ways a task can be "done":
1. normal execution: the task runs/fails and puts a result back on the queue,
2. new dependencies: the task yielded new deps that were not complete and
will be rescheduled and dependencies added,
3. child process dies: we need to catch this separately. | Below is the the instruction that describes the task:
### Input:
We have to catch three ways a task can be "done":
1. normal execution: the task runs/fails and puts a result back on the queue,
2. new dependencies: the task yielded new deps that were not complete and
will be rescheduled and dependencies added,
3. child process dies: we need to catch this separately.
### Response:
def _handle_next_task(self):
"""
We have to catch three ways a task can be "done":
1. normal execution: the task runs/fails and puts a result back on the queue,
2. new dependencies: the task yielded new deps that were not complete and
will be rescheduled and dependencies added,
3. child process dies: we need to catch this separately.
"""
self._idle_since = None
while True:
self._purge_children() # Deal with subprocess failures
try:
task_id, status, expl, missing, new_requirements = (
self._task_result_queue.get(
timeout=self._config.wait_interval))
except Queue.Empty:
return
task = self._scheduled_tasks[task_id]
if not task or task_id not in self._running_tasks:
continue
# Not a running task. Probably already removed.
# Maybe it yielded something?
# external task if run not implemented, retry-able if config option is enabled.
external_task_retryable = _is_external(task) and self._config.retry_external_tasks
if status == FAILED and not external_task_retryable:
self._email_task_failure(task, expl)
new_deps = []
if new_requirements:
new_req = [load_task(module, name, params)
for module, name, params in new_requirements]
for t in new_req:
self.add(t)
new_deps = [t.task_id for t in new_req]
self._add_task(worker=self._id,
task_id=task_id,
status=status,
expl=json.dumps(expl),
resources=task.process_resources(),
runnable=None,
params=task.to_str_params(),
family=task.task_family,
module=task.task_module,
new_deps=new_deps,
assistant=self._assistant,
retry_policy_dict=_get_retry_policy_dict(task))
self._running_tasks.pop(task_id)
# re-add task to reschedule missing dependencies
if missing:
reschedule = True
# keep out of infinite loops by not rescheduling too many times
for task_id in missing:
self.unfulfilled_counts[task_id] += 1
if (self.unfulfilled_counts[task_id] >
self._config.max_reschedules):
reschedule = False
if reschedule:
self.add(task)
self.run_succeeded &= (status == DONE) or (len(new_deps) > 0)
return |
def local_open(url):
"""Read a local path, with special support for directories"""
scheme, server, path, param, query, frag = urlparse(url)
filename = url2pathname(path)
if os.path.isfile(filename):
return urllib2.urlopen(url)
elif path.endswith('/') and os.path.isdir(filename):
files = []
for f in os.listdir(filename):
if f=='index.html':
with open(os.path.join(filename,f),'r') as fp:
body = fp.read()
break
elif os.path.isdir(os.path.join(filename,f)):
f+='/'
files.append("<a href=%r>%s</a>" % (f,f))
else:
body = ("<html><head><title>%s</title>" % url) + \
"</head><body>%s</body></html>" % '\n'.join(files)
status, message = 200, "OK"
else:
status, message, body = 404, "Path not found", "Not found"
headers = {'content-type': 'text/html'}
return HTTPError(url, status, message, headers, StringIO(body)) | Read a local path, with special support for directories | Below is the the instruction that describes the task:
### Input:
Read a local path, with special support for directories
### Response:
def local_open(url):
"""Read a local path, with special support for directories"""
scheme, server, path, param, query, frag = urlparse(url)
filename = url2pathname(path)
if os.path.isfile(filename):
return urllib2.urlopen(url)
elif path.endswith('/') and os.path.isdir(filename):
files = []
for f in os.listdir(filename):
if f=='index.html':
with open(os.path.join(filename,f),'r') as fp:
body = fp.read()
break
elif os.path.isdir(os.path.join(filename,f)):
f+='/'
files.append("<a href=%r>%s</a>" % (f,f))
else:
body = ("<html><head><title>%s</title>" % url) + \
"</head><body>%s</body></html>" % '\n'.join(files)
status, message = 200, "OK"
else:
status, message, body = 404, "Path not found", "Not found"
headers = {'content-type': 'text/html'}
return HTTPError(url, status, message, headers, StringIO(body)) |
def _encode_long(name, value, dummy0, dummy1):
"""Encode a python long (python 2.x)"""
try:
return b"\x12" + name + _PACK_LONG(value)
except struct.error:
raise OverflowError("BSON can only handle up to 8-byte ints") | Encode a python long (python 2.x) | Below is the the instruction that describes the task:
### Input:
Encode a python long (python 2.x)
### Response:
def _encode_long(name, value, dummy0, dummy1):
"""Encode a python long (python 2.x)"""
try:
return b"\x12" + name + _PACK_LONG(value)
except struct.error:
raise OverflowError("BSON can only handle up to 8-byte ints") |
def _update_message(self, sending_cluster):
"""
This is the message-update method.
Parameters
----------
sending_cluster: The resulting messages are lambda_{c-->s} from the given
cluster 'c' to all of its intersection_sets 's'.
Here 's' are the elements of intersection_sets_for_cluster_c.
Reference
---------
Fixing Max-Product: Convergent Message-Passing Algorithms for MAP LP Relaxations
by Amir Globerson and Tommi Jaakkola.
Section 6, Page: 5; Beyond pairwise potentials: Generalized MPLP
Later Modified by Sontag in "Introduction to Dual decomposition for Inference" Pg: 7 & 17
"""
# The new updates will take place for the intersection_sets of this cluster.
# The new updates are:
# \delta_{f \rightarrow i}(x_i) = - \delta_i^{-f} +
# 1/{\| f \|} max_{x_{f-i}}\left[{\theta_f(x_f) + \sum_{i' in f}{\delta_{i'}^{-f}}(x_i')} \right ]
# Step. 1) Calculate {\theta_f(x_f) + \sum_{i' in f}{\delta_{i'}^{-f}}(x_i')}
objective_cluster = self.objective[sending_cluster.cluster_variables]
for current_intersect in sending_cluster.intersection_sets_for_cluster_c:
objective_cluster += self.objective[current_intersect]
updated_results = []
objective = []
for current_intersect in sending_cluster.intersection_sets_for_cluster_c:
# Step. 2) Maximize step.1 result wrt variables present in the cluster but not in the current intersect.
phi = objective_cluster.maximize(list(sending_cluster.cluster_variables - current_intersect),
inplace=False)
# Step. 3) Multiply 1/{\| f \|}
intersection_length = len(sending_cluster.intersection_sets_for_cluster_c)
phi *= (1 / intersection_length)
objective.append(phi)
# Step. 4) Subtract \delta_i^{-f}
# These are the messages not emanating from the sending cluster but going into the current intersect.
# which is = Objective[current_intersect_node] - messages from the cluster to the current intersect node.
updated_results.append(phi + -1 * (self.objective[current_intersect] + -1 * sending_cluster.
message_from_cluster[current_intersect]))
# This loop is primarily for simultaneous updating:
# 1. This cluster's message to each of the intersects.
# 2. The value of the Objective for intersection_nodes.
index = -1
cluster_potential = copy.deepcopy(sending_cluster.cluster_potential)
for current_intersect in sending_cluster.intersection_sets_for_cluster_c:
index += 1
sending_cluster.message_from_cluster[current_intersect] = updated_results[index]
self.objective[current_intersect] = objective[index]
cluster_potential += (-1) * updated_results[index]
# Here we update the Objective for the current factor.
self.objective[sending_cluster.cluster_variables] = cluster_potential | This is the message-update method.
Parameters
----------
sending_cluster: The resulting messages are lambda_{c-->s} from the given
cluster 'c' to all of its intersection_sets 's'.
Here 's' are the elements of intersection_sets_for_cluster_c.
Reference
---------
Fixing Max-Product: Convergent Message-Passing Algorithms for MAP LP Relaxations
by Amir Globerson and Tommi Jaakkola.
Section 6, Page: 5; Beyond pairwise potentials: Generalized MPLP
Later Modified by Sontag in "Introduction to Dual decomposition for Inference" Pg: 7 & 17 | Below is the the instruction that describes the task:
### Input:
This is the message-update method.
Parameters
----------
sending_cluster: The resulting messages are lambda_{c-->s} from the given
cluster 'c' to all of its intersection_sets 's'.
Here 's' are the elements of intersection_sets_for_cluster_c.
Reference
---------
Fixing Max-Product: Convergent Message-Passing Algorithms for MAP LP Relaxations
by Amir Globerson and Tommi Jaakkola.
Section 6, Page: 5; Beyond pairwise potentials: Generalized MPLP
Later Modified by Sontag in "Introduction to Dual decomposition for Inference" Pg: 7 & 17
### Response:
def _update_message(self, sending_cluster):
"""
This is the message-update method.
Parameters
----------
sending_cluster: The resulting messages are lambda_{c-->s} from the given
cluster 'c' to all of its intersection_sets 's'.
Here 's' are the elements of intersection_sets_for_cluster_c.
Reference
---------
Fixing Max-Product: Convergent Message-Passing Algorithms for MAP LP Relaxations
by Amir Globerson and Tommi Jaakkola.
Section 6, Page: 5; Beyond pairwise potentials: Generalized MPLP
Later Modified by Sontag in "Introduction to Dual decomposition for Inference" Pg: 7 & 17
"""
# The new updates will take place for the intersection_sets of this cluster.
# The new updates are:
# \delta_{f \rightarrow i}(x_i) = - \delta_i^{-f} +
# 1/{\| f \|} max_{x_{f-i}}\left[{\theta_f(x_f) + \sum_{i' in f}{\delta_{i'}^{-f}}(x_i')} \right ]
# Step. 1) Calculate {\theta_f(x_f) + \sum_{i' in f}{\delta_{i'}^{-f}}(x_i')}
objective_cluster = self.objective[sending_cluster.cluster_variables]
for current_intersect in sending_cluster.intersection_sets_for_cluster_c:
objective_cluster += self.objective[current_intersect]
updated_results = []
objective = []
for current_intersect in sending_cluster.intersection_sets_for_cluster_c:
# Step. 2) Maximize step.1 result wrt variables present in the cluster but not in the current intersect.
phi = objective_cluster.maximize(list(sending_cluster.cluster_variables - current_intersect),
inplace=False)
# Step. 3) Multiply 1/{\| f \|}
intersection_length = len(sending_cluster.intersection_sets_for_cluster_c)
phi *= (1 / intersection_length)
objective.append(phi)
# Step. 4) Subtract \delta_i^{-f}
# These are the messages not emanating from the sending cluster but going into the current intersect.
# which is = Objective[current_intersect_node] - messages from the cluster to the current intersect node.
updated_results.append(phi + -1 * (self.objective[current_intersect] + -1 * sending_cluster.
message_from_cluster[current_intersect]))
# This loop is primarily for simultaneous updating:
# 1. This cluster's message to each of the intersects.
# 2. The value of the Objective for intersection_nodes.
index = -1
cluster_potential = copy.deepcopy(sending_cluster.cluster_potential)
for current_intersect in sending_cluster.intersection_sets_for_cluster_c:
index += 1
sending_cluster.message_from_cluster[current_intersect] = updated_results[index]
self.objective[current_intersect] = objective[index]
cluster_potential += (-1) * updated_results[index]
# Here we update the Objective for the current factor.
self.objective[sending_cluster.cluster_variables] = cluster_potential |
def distance_to_semi_arc(alon, alat, aazimuth, plons, plats):
"""
In this method we use a reference system centerd on (alon, alat) and with
the y-axis corresponding to aazimuth direction to calculate the minimum
distance from a semiarc with generates in (alon, alat).
Parameters are the same as for :func:`distance_to_arc`.
"""
if type(plons) is float:
plons = numpy.array([plons])
plats = numpy.array([plats])
azimuth_to_target = azimuth(alon, alat, plons, plats)
# Find the indexes of the points in the positive y halfspace
idx = numpy.nonzero(numpy.cos(
numpy.radians((aazimuth-azimuth_to_target))) > 0.0)
# Find the indexes of the points in the negative y halfspace
idx_not = numpy.nonzero(numpy.cos(
numpy.radians((aazimuth-azimuth_to_target))) <= 0.0)
idx_ll_quadr = numpy.nonzero(
(numpy.cos(numpy.radians((aazimuth-azimuth_to_target))) <= 0.0) &
(numpy.sin(numpy.radians((aazimuth-azimuth_to_target))) > 0.0))
# Initialise the array containing the final distances
distance = numpy.zeros_like(plons)
# Compute the distance between the semi-arc with 'aazimuth' direction
# and the set of sites in the positive half-space. The shortest distance to
# the semi-arc in this case can be computed using the function
# :func:`openquake.hazardlib.geo.geodetic.distance_to_arc`.
if len(idx):
distance_to_target = geodetic_distance(alon, alat,
plons[idx], plats[idx])
t_angle = (azimuth_to_target[idx] - aazimuth + 360) % 360
angle = numpy.arccos((numpy.sin(numpy.radians(t_angle)) *
numpy.sin(distance_to_target /
EARTH_RADIUS)))
distance[idx] = (numpy.pi / 2 - angle) * EARTH_RADIUS
# Compute the distance between the reference point and the set of sites
# in the negative half-space. The shortest distance for the semi-arc for
# all the points in the negative semi-space simply corresponds to the
# shortest distance to its origin.
if len(idx_not):
distance[idx_not] = geodetic_distance(alon, alat,
plons[idx_not], plats[idx_not])
distance[idx_ll_quadr] = -1 * distance[idx_ll_quadr]
return distance | In this method we use a reference system centerd on (alon, alat) and with
the y-axis corresponding to aazimuth direction to calculate the minimum
distance from a semiarc with generates in (alon, alat).
Parameters are the same as for :func:`distance_to_arc`. | Below is the the instruction that describes the task:
### Input:
In this method we use a reference system centerd on (alon, alat) and with
the y-axis corresponding to aazimuth direction to calculate the minimum
distance from a semiarc with generates in (alon, alat).
Parameters are the same as for :func:`distance_to_arc`.
### Response:
def distance_to_semi_arc(alon, alat, aazimuth, plons, plats):
"""
In this method we use a reference system centerd on (alon, alat) and with
the y-axis corresponding to aazimuth direction to calculate the minimum
distance from a semiarc with generates in (alon, alat).
Parameters are the same as for :func:`distance_to_arc`.
"""
if type(plons) is float:
plons = numpy.array([plons])
plats = numpy.array([plats])
azimuth_to_target = azimuth(alon, alat, plons, plats)
# Find the indexes of the points in the positive y halfspace
idx = numpy.nonzero(numpy.cos(
numpy.radians((aazimuth-azimuth_to_target))) > 0.0)
# Find the indexes of the points in the negative y halfspace
idx_not = numpy.nonzero(numpy.cos(
numpy.radians((aazimuth-azimuth_to_target))) <= 0.0)
idx_ll_quadr = numpy.nonzero(
(numpy.cos(numpy.radians((aazimuth-azimuth_to_target))) <= 0.0) &
(numpy.sin(numpy.radians((aazimuth-azimuth_to_target))) > 0.0))
# Initialise the array containing the final distances
distance = numpy.zeros_like(plons)
# Compute the distance between the semi-arc with 'aazimuth' direction
# and the set of sites in the positive half-space. The shortest distance to
# the semi-arc in this case can be computed using the function
# :func:`openquake.hazardlib.geo.geodetic.distance_to_arc`.
if len(idx):
distance_to_target = geodetic_distance(alon, alat,
plons[idx], plats[idx])
t_angle = (azimuth_to_target[idx] - aazimuth + 360) % 360
angle = numpy.arccos((numpy.sin(numpy.radians(t_angle)) *
numpy.sin(distance_to_target /
EARTH_RADIUS)))
distance[idx] = (numpy.pi / 2 - angle) * EARTH_RADIUS
# Compute the distance between the reference point and the set of sites
# in the negative half-space. The shortest distance for the semi-arc for
# all the points in the negative semi-space simply corresponds to the
# shortest distance to its origin.
if len(idx_not):
distance[idx_not] = geodetic_distance(alon, alat,
plons[idx_not], plats[idx_not])
distance[idx_ll_quadr] = -1 * distance[idx_ll_quadr]
return distance |
def set_timeout(name, value, power='ac', scheme=None):
'''
Set the sleep timeouts of specific items such as disk, monitor, etc.
Args:
name (str)
The setting to change, can be one of the following:
- ``monitor``
- ``disk``
- ``standby``
- ``hibernate``
value (int):
The amount of time in minutes before the item will timeout
power (str):
Set the value for AC or DC power. Default is ``ac``. Valid options
are:
- ``ac`` (AC Power)
- ``dc`` (Battery)
scheme (str):
The scheme to use, leave as ``None`` to use the current. Default is
``None``. This can be the GUID or the Alias for the Scheme. Known
Aliases are:
- ``SCHEME_BALANCED`` - Balanced
- ``SCHEME_MAX`` - Power saver
- ``SCHEME_MIN`` - High performance
CLI Example:
.. code-block:: yaml
# Set monitor timeout to 30 minutes on Battery
monitor:
powercfg.set_timeout:
- value: 30
- power: dc
# Set disk timeout to 10 minutes on AC Power
disk:
powercfg.set_timeout:
- value: 10
- power: ac
'''
ret = {'name': name,
'result': True,
'comment': '',
'changes': {}}
# Validate name values
name = name.lower()
if name not in ['monitor', 'disk', 'standby', 'hibernate']:
ret['result'] = False
ret['comment'] = '"{0}" is not a valid setting'.format(name)
log.debug(ret['comment'])
return ret
# Validate power values
power = power.lower()
if power not in ['ac', 'dc']:
ret['result'] = False
ret['comment'] = '"{0}" is not a power type'.format(power)
log.debug(ret['comment'])
return ret
# Get current settings
old = __salt__['powercfg.get_{0}_timeout'.format(name)](scheme=scheme)
# Check current settings
if old[power] == value:
ret['comment'] = '{0} timeout on {1} power is already set to {2}' \
''.format(name.capitalize(), power.upper(), value)
return ret
else:
ret['comment'] = '{0} timeout on {1} power will be set to {2}' \
''.format(name.capitalize(), power.upper(), value)
# Check for test=True
if __opts__['test']:
ret['result'] = None
return ret
# Set the timeout value
__salt__['powercfg.set_{0}_timeout'.format(name)](
timeout=value,
power=power,
scheme=scheme)
# Get the setting after the change
new = __salt__['powercfg.get_{0}_timeout'.format(name)](scheme=scheme)
changes = salt.utils.data.compare_dicts(old, new)
if changes:
ret['changes'] = {name: changes}
ret['comment'] = '{0} timeout on {1} power set to {2}' \
''.format(name.capitalize(), power.upper(), value)
log.debug(ret['comment'])
else:
ret['changes'] = {}
ret['comment'] = 'Failed to set {0} timeout on {1} power to {2}' \
''.format(name, power.upper(), value)
log.debug(ret['comment'])
ret['result'] = False
return ret | Set the sleep timeouts of specific items such as disk, monitor, etc.
Args:
name (str)
The setting to change, can be one of the following:
- ``monitor``
- ``disk``
- ``standby``
- ``hibernate``
value (int):
The amount of time in minutes before the item will timeout
power (str):
Set the value for AC or DC power. Default is ``ac``. Valid options
are:
- ``ac`` (AC Power)
- ``dc`` (Battery)
scheme (str):
The scheme to use, leave as ``None`` to use the current. Default is
``None``. This can be the GUID or the Alias for the Scheme. Known
Aliases are:
- ``SCHEME_BALANCED`` - Balanced
- ``SCHEME_MAX`` - Power saver
- ``SCHEME_MIN`` - High performance
CLI Example:
.. code-block:: yaml
# Set monitor timeout to 30 minutes on Battery
monitor:
powercfg.set_timeout:
- value: 30
- power: dc
# Set disk timeout to 10 minutes on AC Power
disk:
powercfg.set_timeout:
- value: 10
- power: ac | Below is the the instruction that describes the task:
### Input:
Set the sleep timeouts of specific items such as disk, monitor, etc.
Args:
name (str)
The setting to change, can be one of the following:
- ``monitor``
- ``disk``
- ``standby``
- ``hibernate``
value (int):
The amount of time in minutes before the item will timeout
power (str):
Set the value for AC or DC power. Default is ``ac``. Valid options
are:
- ``ac`` (AC Power)
- ``dc`` (Battery)
scheme (str):
The scheme to use, leave as ``None`` to use the current. Default is
``None``. This can be the GUID or the Alias for the Scheme. Known
Aliases are:
- ``SCHEME_BALANCED`` - Balanced
- ``SCHEME_MAX`` - Power saver
- ``SCHEME_MIN`` - High performance
CLI Example:
.. code-block:: yaml
# Set monitor timeout to 30 minutes on Battery
monitor:
powercfg.set_timeout:
- value: 30
- power: dc
# Set disk timeout to 10 minutes on AC Power
disk:
powercfg.set_timeout:
- value: 10
- power: ac
### Response:
def set_timeout(name, value, power='ac', scheme=None):
'''
Set the sleep timeouts of specific items such as disk, monitor, etc.
Args:
name (str)
The setting to change, can be one of the following:
- ``monitor``
- ``disk``
- ``standby``
- ``hibernate``
value (int):
The amount of time in minutes before the item will timeout
power (str):
Set the value for AC or DC power. Default is ``ac``. Valid options
are:
- ``ac`` (AC Power)
- ``dc`` (Battery)
scheme (str):
The scheme to use, leave as ``None`` to use the current. Default is
``None``. This can be the GUID or the Alias for the Scheme. Known
Aliases are:
- ``SCHEME_BALANCED`` - Balanced
- ``SCHEME_MAX`` - Power saver
- ``SCHEME_MIN`` - High performance
CLI Example:
.. code-block:: yaml
# Set monitor timeout to 30 minutes on Battery
monitor:
powercfg.set_timeout:
- value: 30
- power: dc
# Set disk timeout to 10 minutes on AC Power
disk:
powercfg.set_timeout:
- value: 10
- power: ac
'''
ret = {'name': name,
'result': True,
'comment': '',
'changes': {}}
# Validate name values
name = name.lower()
if name not in ['monitor', 'disk', 'standby', 'hibernate']:
ret['result'] = False
ret['comment'] = '"{0}" is not a valid setting'.format(name)
log.debug(ret['comment'])
return ret
# Validate power values
power = power.lower()
if power not in ['ac', 'dc']:
ret['result'] = False
ret['comment'] = '"{0}" is not a power type'.format(power)
log.debug(ret['comment'])
return ret
# Get current settings
old = __salt__['powercfg.get_{0}_timeout'.format(name)](scheme=scheme)
# Check current settings
if old[power] == value:
ret['comment'] = '{0} timeout on {1} power is already set to {2}' \
''.format(name.capitalize(), power.upper(), value)
return ret
else:
ret['comment'] = '{0} timeout on {1} power will be set to {2}' \
''.format(name.capitalize(), power.upper(), value)
# Check for test=True
if __opts__['test']:
ret['result'] = None
return ret
# Set the timeout value
__salt__['powercfg.set_{0}_timeout'.format(name)](
timeout=value,
power=power,
scheme=scheme)
# Get the setting after the change
new = __salt__['powercfg.get_{0}_timeout'.format(name)](scheme=scheme)
changes = salt.utils.data.compare_dicts(old, new)
if changes:
ret['changes'] = {name: changes}
ret['comment'] = '{0} timeout on {1} power set to {2}' \
''.format(name.capitalize(), power.upper(), value)
log.debug(ret['comment'])
else:
ret['changes'] = {}
ret['comment'] = 'Failed to set {0} timeout on {1} power to {2}' \
''.format(name, power.upper(), value)
log.debug(ret['comment'])
ret['result'] = False
return ret |
def fast_compare(tree1, tree2):
""" This is optimized to compare two AST trees for equality.
It makes several assumptions that are currently true for
AST trees used by rtrip, and it doesn't examine the _attributes.
"""
geta = ast.AST.__getattribute__
work = [(tree1, tree2)]
pop = work.pop
extend = work.extend
# TypeError in cPython, AttributeError in PyPy
exception = TypeError, AttributeError
zipl = zip_longest
type_ = type
list_ = list
while work:
n1, n2 = pop()
try:
f1 = geta(n1, '_fields')
f2 = geta(n2, '_fields')
except exception:
if type_(n1) is list_:
extend(zipl(n1, n2))
continue
if n1 == n2:
continue
return False
else:
f1 = [x for x in f1 if x != 'ctx']
if f1 != [x for x in f2 if x != 'ctx']:
return False
extend((geta(n1, fname), geta(n2, fname)) for fname in f1)
return True | This is optimized to compare two AST trees for equality.
It makes several assumptions that are currently true for
AST trees used by rtrip, and it doesn't examine the _attributes. | Below is the the instruction that describes the task:
### Input:
This is optimized to compare two AST trees for equality.
It makes several assumptions that are currently true for
AST trees used by rtrip, and it doesn't examine the _attributes.
### Response:
def fast_compare(tree1, tree2):
""" This is optimized to compare two AST trees for equality.
It makes several assumptions that are currently true for
AST trees used by rtrip, and it doesn't examine the _attributes.
"""
geta = ast.AST.__getattribute__
work = [(tree1, tree2)]
pop = work.pop
extend = work.extend
# TypeError in cPython, AttributeError in PyPy
exception = TypeError, AttributeError
zipl = zip_longest
type_ = type
list_ = list
while work:
n1, n2 = pop()
try:
f1 = geta(n1, '_fields')
f2 = geta(n2, '_fields')
except exception:
if type_(n1) is list_:
extend(zipl(n1, n2))
continue
if n1 == n2:
continue
return False
else:
f1 = [x for x in f1 if x != 'ctx']
if f1 != [x for x in f2 if x != 'ctx']:
return False
extend((geta(n1, fname), geta(n2, fname)) for fname in f1)
return True |
def _call_cmd_line(self):
"""Run the command line tool."""
try:
logging.info("Calling Popen with: {}".format(self.args))
p = Popen(self.args, stdin=PIPE, stdout=PIPE, stderr=PIPE)
except OSError:
raise(RuntimeError("No such command found in PATH"))
# Calling this command with newline as stdin as the
# iCommnads hangs waiting for user input if the password
# has not been set or has timed out.
self.stdout, self.stderr = p.communicate("\n".encode())
self.stdout = self.stdout.decode("utf-8")
self.stderr = self.stderr.decode("utf-8")
self.returncode = p.returncode | Run the command line tool. | Below is the the instruction that describes the task:
### Input:
Run the command line tool.
### Response:
def _call_cmd_line(self):
"""Run the command line tool."""
try:
logging.info("Calling Popen with: {}".format(self.args))
p = Popen(self.args, stdin=PIPE, stdout=PIPE, stderr=PIPE)
except OSError:
raise(RuntimeError("No such command found in PATH"))
# Calling this command with newline as stdin as the
# iCommnads hangs waiting for user input if the password
# has not been set or has timed out.
self.stdout, self.stderr = p.communicate("\n".encode())
self.stdout = self.stdout.decode("utf-8")
self.stderr = self.stderr.decode("utf-8")
self.returncode = p.returncode |
def wait_for_operation_to_complete(
has_operation_completed, retries=10, delay_bw_retries=5,
delay_before_attempts=10, failover_exc=exception.IloError,
failover_msg=("Operation did not complete even after multiple "
"attempts."), is_silent_loop_exit=False):
"""Attempts the provided operation for a specified number of times.
If it runs out of attempts, then it raises an exception. On success,
it breaks out of the loop.
:param has_operation_completed: the method to retry and it needs to return
a boolean to indicate success or failure.
:param retries: number of times the operation to be (re)tried, default 10
:param delay_bw_retries: delay in seconds before attempting after
each failure, default 5.
:param delay_before_attempts: delay in seconds before beginning any
operation attempt, default 10.
:param failover_exc: the exception which gets raised in case of failure
upon exhausting all the attempts, default IloError.
:param failover_msg: the msg with which the exception gets raised in case
of failure upon exhausting all the attempts.
:param is_silent_loop_exit: decides if exception has to be raised (in case
of failure upon exhausting all the attempts)
or not, default False (will be raised).
:raises: failover_exc, if failure happens even after all the attempts,
default IloError.
"""
retry_count = retries
# Delay for ``delay_before_attempts`` secs, before beginning any attempt
time.sleep(delay_before_attempts)
while retry_count:
try:
LOG.debug("Calling '%s', retries left: %d",
has_operation_completed.__name__, retry_count)
if has_operation_completed():
break
except exception.IloError:
pass
time.sleep(delay_bw_retries)
retry_count -= 1
else:
LOG.debug("Max retries exceeded with: '%s'",
has_operation_completed.__name__)
if not is_silent_loop_exit:
raise failover_exc(failover_msg) | Attempts the provided operation for a specified number of times.
If it runs out of attempts, then it raises an exception. On success,
it breaks out of the loop.
:param has_operation_completed: the method to retry and it needs to return
a boolean to indicate success or failure.
:param retries: number of times the operation to be (re)tried, default 10
:param delay_bw_retries: delay in seconds before attempting after
each failure, default 5.
:param delay_before_attempts: delay in seconds before beginning any
operation attempt, default 10.
:param failover_exc: the exception which gets raised in case of failure
upon exhausting all the attempts, default IloError.
:param failover_msg: the msg with which the exception gets raised in case
of failure upon exhausting all the attempts.
:param is_silent_loop_exit: decides if exception has to be raised (in case
of failure upon exhausting all the attempts)
or not, default False (will be raised).
:raises: failover_exc, if failure happens even after all the attempts,
default IloError. | Below is the the instruction that describes the task:
### Input:
Attempts the provided operation for a specified number of times.
If it runs out of attempts, then it raises an exception. On success,
it breaks out of the loop.
:param has_operation_completed: the method to retry and it needs to return
a boolean to indicate success or failure.
:param retries: number of times the operation to be (re)tried, default 10
:param delay_bw_retries: delay in seconds before attempting after
each failure, default 5.
:param delay_before_attempts: delay in seconds before beginning any
operation attempt, default 10.
:param failover_exc: the exception which gets raised in case of failure
upon exhausting all the attempts, default IloError.
:param failover_msg: the msg with which the exception gets raised in case
of failure upon exhausting all the attempts.
:param is_silent_loop_exit: decides if exception has to be raised (in case
of failure upon exhausting all the attempts)
or not, default False (will be raised).
:raises: failover_exc, if failure happens even after all the attempts,
default IloError.
### Response:
def wait_for_operation_to_complete(
has_operation_completed, retries=10, delay_bw_retries=5,
delay_before_attempts=10, failover_exc=exception.IloError,
failover_msg=("Operation did not complete even after multiple "
"attempts."), is_silent_loop_exit=False):
"""Attempts the provided operation for a specified number of times.
If it runs out of attempts, then it raises an exception. On success,
it breaks out of the loop.
:param has_operation_completed: the method to retry and it needs to return
a boolean to indicate success or failure.
:param retries: number of times the operation to be (re)tried, default 10
:param delay_bw_retries: delay in seconds before attempting after
each failure, default 5.
:param delay_before_attempts: delay in seconds before beginning any
operation attempt, default 10.
:param failover_exc: the exception which gets raised in case of failure
upon exhausting all the attempts, default IloError.
:param failover_msg: the msg with which the exception gets raised in case
of failure upon exhausting all the attempts.
:param is_silent_loop_exit: decides if exception has to be raised (in case
of failure upon exhausting all the attempts)
or not, default False (will be raised).
:raises: failover_exc, if failure happens even after all the attempts,
default IloError.
"""
retry_count = retries
# Delay for ``delay_before_attempts`` secs, before beginning any attempt
time.sleep(delay_before_attempts)
while retry_count:
try:
LOG.debug("Calling '%s', retries left: %d",
has_operation_completed.__name__, retry_count)
if has_operation_completed():
break
except exception.IloError:
pass
time.sleep(delay_bw_retries)
retry_count -= 1
else:
LOG.debug("Max retries exceeded with: '%s'",
has_operation_completed.__name__)
if not is_silent_loop_exit:
raise failover_exc(failover_msg) |
def metadata(dataset, node, entityids, extended=False, api_key=None):
"""
Request metadata for a given scene in a USGS dataset.
:param dataset:
:param node:
:param entityids:
:param extended:
Send a second request to the metadata url to get extended metadata on the scene.
:param api_key:
"""
api_key = _get_api_key(api_key)
url = '{}/metadata'.format(USGS_API)
payload = {
"jsonRequest": payloads.metadata(dataset, node, entityids, api_key=api_key)
}
r = requests.post(url, payload)
response = r.json()
_check_for_usgs_error(response)
if extended:
metadata_urls = map(_get_metadata_url, response['data'])
results = _async_requests(metadata_urls)
data = map(lambda idx: _get_extended(response['data'][idx], results[idx]), range(len(response['data'])))
return response | Request metadata for a given scene in a USGS dataset.
:param dataset:
:param node:
:param entityids:
:param extended:
Send a second request to the metadata url to get extended metadata on the scene.
:param api_key: | Below is the the instruction that describes the task:
### Input:
Request metadata for a given scene in a USGS dataset.
:param dataset:
:param node:
:param entityids:
:param extended:
Send a second request to the metadata url to get extended metadata on the scene.
:param api_key:
### Response:
def metadata(dataset, node, entityids, extended=False, api_key=None):
"""
Request metadata for a given scene in a USGS dataset.
:param dataset:
:param node:
:param entityids:
:param extended:
Send a second request to the metadata url to get extended metadata on the scene.
:param api_key:
"""
api_key = _get_api_key(api_key)
url = '{}/metadata'.format(USGS_API)
payload = {
"jsonRequest": payloads.metadata(dataset, node, entityids, api_key=api_key)
}
r = requests.post(url, payload)
response = r.json()
_check_for_usgs_error(response)
if extended:
metadata_urls = map(_get_metadata_url, response['data'])
results = _async_requests(metadata_urls)
data = map(lambda idx: _get_extended(response['data'][idx], results[idx]), range(len(response['data'])))
return response |
def WriteOutput(self, output_file, feed_merger,
old_feed_path, new_feed_path, merged_feed_path):
"""Write the HTML output to a file.
Args:
output_file: The file object that the HTML output will be written to.
feed_merger: The FeedMerger instance.
old_feed_path: The path to the old feed file as a string.
new_feed_path: The path to the new feed file as a string
merged_feed_path: The path to the merged feed file as a string. This
may be None if no merged feed was written.
"""
if merged_feed_path is None:
html_merged_feed_path = ''
else:
html_merged_feed_path = '<p>Merged feed created: <code>%s</code></p>' % (
merged_feed_path)
html_header = """<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8"/>
<title>Feed Merger Results</title>
<style>
body {font-family: Georgia, serif; background-color: white}
.path {color: gray}
div.problem {max-width: 500px}
td,th {background-color: khaki; padding: 2px; font-family:monospace}
td.problem,th.problem {background-color: dc143c; color: white; padding: 2px;
font-family:monospace}
table {border-spacing: 5px 0px; margin-top: 3px}
h3.issueHeader {padding-left: 1em}
.notice {background-color: yellow}
span.pass {background-color: lightgreen}
span.fail {background-color: yellow}
.pass, .fail {font-size: 16pt; padding: 3px}
ol,.unused {padding-left: 40pt}
.header {background-color: white; font-family: Georgia, serif; padding: 0px}
th.header {text-align: right; font-weight: normal; color: gray}
.footer {font-size: 10pt}
</style>
</head>
<body>
<h1>Feed merger results</h1>
<p>Old feed: <code>%(old_feed_path)s</code></p>
<p>New feed: <code>%(new_feed_path)s</code></p>
%(html_merged_feed_path)s""" % locals()
html_stats = self._GenerateStatsTable(feed_merger)
html_summary = self._GenerateSummary()
html_notices = self._GenerateNotices()
html_errors = self._GenerateSection(transitfeed.TYPE_ERROR)
html_warnings = self._GenerateSection(transitfeed.TYPE_WARNING)
html_footer = """
<div class="footer">
Generated using transitfeed version %s on %s.
</div>
</body>
</html>""" % (transitfeed.__version__,
time.strftime('%B %d, %Y at %I:%M %p %Z'))
output_file.write(transitfeed.EncodeUnicode(html_header))
output_file.write(transitfeed.EncodeUnicode(html_stats))
output_file.write(transitfeed.EncodeUnicode(html_summary))
output_file.write(transitfeed.EncodeUnicode(html_notices))
output_file.write(transitfeed.EncodeUnicode(html_errors))
output_file.write(transitfeed.EncodeUnicode(html_warnings))
output_file.write(transitfeed.EncodeUnicode(html_footer)) | Write the HTML output to a file.
Args:
output_file: The file object that the HTML output will be written to.
feed_merger: The FeedMerger instance.
old_feed_path: The path to the old feed file as a string.
new_feed_path: The path to the new feed file as a string
merged_feed_path: The path to the merged feed file as a string. This
may be None if no merged feed was written. | Below is the the instruction that describes the task:
### Input:
Write the HTML output to a file.
Args:
output_file: The file object that the HTML output will be written to.
feed_merger: The FeedMerger instance.
old_feed_path: The path to the old feed file as a string.
new_feed_path: The path to the new feed file as a string
merged_feed_path: The path to the merged feed file as a string. This
may be None if no merged feed was written.
### Response:
def WriteOutput(self, output_file, feed_merger,
old_feed_path, new_feed_path, merged_feed_path):
"""Write the HTML output to a file.
Args:
output_file: The file object that the HTML output will be written to.
feed_merger: The FeedMerger instance.
old_feed_path: The path to the old feed file as a string.
new_feed_path: The path to the new feed file as a string
merged_feed_path: The path to the merged feed file as a string. This
may be None if no merged feed was written.
"""
if merged_feed_path is None:
html_merged_feed_path = ''
else:
html_merged_feed_path = '<p>Merged feed created: <code>%s</code></p>' % (
merged_feed_path)
html_header = """<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8"/>
<title>Feed Merger Results</title>
<style>
body {font-family: Georgia, serif; background-color: white}
.path {color: gray}
div.problem {max-width: 500px}
td,th {background-color: khaki; padding: 2px; font-family:monospace}
td.problem,th.problem {background-color: dc143c; color: white; padding: 2px;
font-family:monospace}
table {border-spacing: 5px 0px; margin-top: 3px}
h3.issueHeader {padding-left: 1em}
.notice {background-color: yellow}
span.pass {background-color: lightgreen}
span.fail {background-color: yellow}
.pass, .fail {font-size: 16pt; padding: 3px}
ol,.unused {padding-left: 40pt}
.header {background-color: white; font-family: Georgia, serif; padding: 0px}
th.header {text-align: right; font-weight: normal; color: gray}
.footer {font-size: 10pt}
</style>
</head>
<body>
<h1>Feed merger results</h1>
<p>Old feed: <code>%(old_feed_path)s</code></p>
<p>New feed: <code>%(new_feed_path)s</code></p>
%(html_merged_feed_path)s""" % locals()
html_stats = self._GenerateStatsTable(feed_merger)
html_summary = self._GenerateSummary()
html_notices = self._GenerateNotices()
html_errors = self._GenerateSection(transitfeed.TYPE_ERROR)
html_warnings = self._GenerateSection(transitfeed.TYPE_WARNING)
html_footer = """
<div class="footer">
Generated using transitfeed version %s on %s.
</div>
</body>
</html>""" % (transitfeed.__version__,
time.strftime('%B %d, %Y at %I:%M %p %Z'))
output_file.write(transitfeed.EncodeUnicode(html_header))
output_file.write(transitfeed.EncodeUnicode(html_stats))
output_file.write(transitfeed.EncodeUnicode(html_summary))
output_file.write(transitfeed.EncodeUnicode(html_notices))
output_file.write(transitfeed.EncodeUnicode(html_errors))
output_file.write(transitfeed.EncodeUnicode(html_warnings))
output_file.write(transitfeed.EncodeUnicode(html_footer)) |
def is_valid_line(self, line):
"""
Validates a given line against the associated "section" (e.g. 'global'
or 'frontend', etc.) of a stanza.
If a line represents a directive that shouldn't be within the stanza
it is rejected. See the `directives.json` file for a condensed look
at valid directives based on section.
"""
adjusted_line = line.strip().lower()
return any([
adjusted_line.startswith(directive)
for directive in directives_by_section[self.section_name]
]) | Validates a given line against the associated "section" (e.g. 'global'
or 'frontend', etc.) of a stanza.
If a line represents a directive that shouldn't be within the stanza
it is rejected. See the `directives.json` file for a condensed look
at valid directives based on section. | Below is the the instruction that describes the task:
### Input:
Validates a given line against the associated "section" (e.g. 'global'
or 'frontend', etc.) of a stanza.
If a line represents a directive that shouldn't be within the stanza
it is rejected. See the `directives.json` file for a condensed look
at valid directives based on section.
### Response:
def is_valid_line(self, line):
"""
Validates a given line against the associated "section" (e.g. 'global'
or 'frontend', etc.) of a stanza.
If a line represents a directive that shouldn't be within the stanza
it is rejected. See the `directives.json` file for a condensed look
at valid directives based on section.
"""
adjusted_line = line.strip().lower()
return any([
adjusted_line.startswith(directive)
for directive in directives_by_section[self.section_name]
]) |
def addMethod(self, m):
"""
Adds a L{Method} to the interface
"""
if m.nargs == -1:
m.nargs = len([a for a in marshal.genCompleteTypes(m.sigIn)])
m.nret = len([a for a in marshal.genCompleteTypes(m.sigOut)])
self.methods[m.name] = m
self._xml = None | Adds a L{Method} to the interface | Below is the the instruction that describes the task:
### Input:
Adds a L{Method} to the interface
### Response:
def addMethod(self, m):
"""
Adds a L{Method} to the interface
"""
if m.nargs == -1:
m.nargs = len([a for a in marshal.genCompleteTypes(m.sigIn)])
m.nret = len([a for a in marshal.genCompleteTypes(m.sigOut)])
self.methods[m.name] = m
self._xml = None |
def _callRestartAgent(self, ev_data: RestartLogData, failTimeout) -> None:
"""
Callback which is called when restart time come.
Writes restart record to restart log and asks
node control service to perform restart
:param ev_data: restart event data
:param version: version to restart to
"""
logger.info("{}'s restart calling agent for restart".format(self))
self._actionLog.append_started(ev_data)
self._action_start_callback()
self.scheduledAction = None
asyncio.ensure_future(
self._sendUpdateRequest(ev_data, failTimeout)) | Callback which is called when restart time come.
Writes restart record to restart log and asks
node control service to perform restart
:param ev_data: restart event data
:param version: version to restart to | Below is the the instruction that describes the task:
### Input:
Callback which is called when restart time come.
Writes restart record to restart log and asks
node control service to perform restart
:param ev_data: restart event data
:param version: version to restart to
### Response:
def _callRestartAgent(self, ev_data: RestartLogData, failTimeout) -> None:
"""
Callback which is called when restart time come.
Writes restart record to restart log and asks
node control service to perform restart
:param ev_data: restart event data
:param version: version to restart to
"""
logger.info("{}'s restart calling agent for restart".format(self))
self._actionLog.append_started(ev_data)
self._action_start_callback()
self.scheduledAction = None
asyncio.ensure_future(
self._sendUpdateRequest(ev_data, failTimeout)) |
def on_api_socket_reconnected(self):
"""for API socket reconnected"""
# auto subscriber
resub_count = 0
subtype_list = []
code_list = []
resub_dict = copy(self._ctx_subscribe)
subtype_all_cnt = len(resub_dict.keys())
subtype_cur_cnt = 0
ret_code = RET_OK
ret_msg = ''
for subtype in resub_dict.keys():
subtype_cur_cnt += 1
code_set = resub_dict[subtype]
code_list_new = [code for code in code_set]
if len(code_list_new) == 0:
continue
if len(code_list) == 0:
code_list = code_list_new
subtype_list = [subtype]
is_need_sub = False
if code_list == code_list_new:
if subtype not in subtype_list:
subtype_list.append(subtype) # 合并subtype请求
else:
ret_code, ret_msg = self._reconnect_subscribe(code_list, subtype_list)
logger.debug("reconnect subscribe code_count={} ret_code={} ret_msg={} subtype_list={} code_list={}".format(
len(code_list), ret_code, ret_msg, subtype_list, code_list))
if ret_code != RET_OK:
break
resub_count += len(code_list)
code_list = code_list_new
subtype_list = [subtype]
# 循环即将结束
if subtype_cur_cnt == subtype_all_cnt and len(code_list):
ret_code, ret_msg = self._reconnect_subscribe(code_list, subtype_list)
logger.debug("reconnect subscribe code_count={} ret_code={} ret_msg={} subtype_list={} code_list={}".format(len(code_list), ret_code, ret_msg, subtype_list, code_list))
if ret_code != RET_OK:
break
resub_count += len(code_list)
code_list = []
subtype_list = []
logger.debug("reconnect subscribe all code_count={} ret_code={} ret_msg={}".format(resub_count, ret_code, ret_msg))
# 重定阅失败,重连
if ret_code != RET_OK:
logger.error("reconnect subscribe error, close connect and retry!!")
self._status = ContextStatus.Start
self._wait_reconnect()
return ret_code, ret_msg | for API socket reconnected | Below is the the instruction that describes the task:
### Input:
for API socket reconnected
### Response:
def on_api_socket_reconnected(self):
"""for API socket reconnected"""
# auto subscriber
resub_count = 0
subtype_list = []
code_list = []
resub_dict = copy(self._ctx_subscribe)
subtype_all_cnt = len(resub_dict.keys())
subtype_cur_cnt = 0
ret_code = RET_OK
ret_msg = ''
for subtype in resub_dict.keys():
subtype_cur_cnt += 1
code_set = resub_dict[subtype]
code_list_new = [code for code in code_set]
if len(code_list_new) == 0:
continue
if len(code_list) == 0:
code_list = code_list_new
subtype_list = [subtype]
is_need_sub = False
if code_list == code_list_new:
if subtype not in subtype_list:
subtype_list.append(subtype) # 合并subtype请求
else:
ret_code, ret_msg = self._reconnect_subscribe(code_list, subtype_list)
logger.debug("reconnect subscribe code_count={} ret_code={} ret_msg={} subtype_list={} code_list={}".format(
len(code_list), ret_code, ret_msg, subtype_list, code_list))
if ret_code != RET_OK:
break
resub_count += len(code_list)
code_list = code_list_new
subtype_list = [subtype]
# 循环即将结束
if subtype_cur_cnt == subtype_all_cnt and len(code_list):
ret_code, ret_msg = self._reconnect_subscribe(code_list, subtype_list)
logger.debug("reconnect subscribe code_count={} ret_code={} ret_msg={} subtype_list={} code_list={}".format(len(code_list), ret_code, ret_msg, subtype_list, code_list))
if ret_code != RET_OK:
break
resub_count += len(code_list)
code_list = []
subtype_list = []
logger.debug("reconnect subscribe all code_count={} ret_code={} ret_msg={}".format(resub_count, ret_code, ret_msg))
# 重定阅失败,重连
if ret_code != RET_OK:
logger.error("reconnect subscribe error, close connect and retry!!")
self._status = ContextStatus.Start
self._wait_reconnect()
return ret_code, ret_msg |
def _execute(self,
native,
command,
data=None,
returning=True,
mapper=dict):
"""
Executes the inputted command into the current \
connection cursor.
:param command | <str>
data | <dict> || None
:return [{<str> key: <variant>, ..}, ..], <int> count
"""
if data is None:
data = {}
with native.cursor() as cursor:
log.debug('***********************')
log.debug(command % data)
log.debug('***********************')
try:
rowcount = 0
for cmd in command.split(';'):
cmd = cmd.strip()
if cmd:
cursor.execute(cmd.strip(';') + ';', data)
rowcount += cursor.rowcount
# look for a disconnection error
except pymysql.InterfaceError:
raise orb.errors.ConnectionLost()
# look for integrity errors
except (pymysql.IntegrityError, pymysql.OperationalError) as err:
native.rollback()
# look for a duplicate error
if err[0] == 1062:
raise orb.errors.DuplicateEntryFound(err[1])
# look for a reference error
reference_error = re.search('Key .* is still referenced from table ".*"', nstr(err))
if reference_error:
msg = 'Cannot remove this record, it is still being referenced.'
raise orb.errors.CannotDelete(msg)
# unknown error
log.debug(traceback.print_exc())
raise orb.errors.QueryFailed(command, data, nstr(err))
# connection has closed underneath the hood
except pymysql.Error as err:
native.rollback()
log.error(traceback.print_exc())
raise orb.errors.QueryFailed(command, data, nstr(err))
try:
raw = cursor.fetchall()
results = [mapper(record) for record in raw]
except pymysql.ProgrammingError:
results = []
return results, rowcount | Executes the inputted command into the current \
connection cursor.
:param command | <str>
data | <dict> || None
:return [{<str> key: <variant>, ..}, ..], <int> count | Below is the the instruction that describes the task:
### Input:
Executes the inputted command into the current \
connection cursor.
:param command | <str>
data | <dict> || None
:return [{<str> key: <variant>, ..}, ..], <int> count
### Response:
def _execute(self,
native,
command,
data=None,
returning=True,
mapper=dict):
"""
Executes the inputted command into the current \
connection cursor.
:param command | <str>
data | <dict> || None
:return [{<str> key: <variant>, ..}, ..], <int> count
"""
if data is None:
data = {}
with native.cursor() as cursor:
log.debug('***********************')
log.debug(command % data)
log.debug('***********************')
try:
rowcount = 0
for cmd in command.split(';'):
cmd = cmd.strip()
if cmd:
cursor.execute(cmd.strip(';') + ';', data)
rowcount += cursor.rowcount
# look for a disconnection error
except pymysql.InterfaceError:
raise orb.errors.ConnectionLost()
# look for integrity errors
except (pymysql.IntegrityError, pymysql.OperationalError) as err:
native.rollback()
# look for a duplicate error
if err[0] == 1062:
raise orb.errors.DuplicateEntryFound(err[1])
# look for a reference error
reference_error = re.search('Key .* is still referenced from table ".*"', nstr(err))
if reference_error:
msg = 'Cannot remove this record, it is still being referenced.'
raise orb.errors.CannotDelete(msg)
# unknown error
log.debug(traceback.print_exc())
raise orb.errors.QueryFailed(command, data, nstr(err))
# connection has closed underneath the hood
except pymysql.Error as err:
native.rollback()
log.error(traceback.print_exc())
raise orb.errors.QueryFailed(command, data, nstr(err))
try:
raw = cursor.fetchall()
results = [mapper(record) for record in raw]
except pymysql.ProgrammingError:
results = []
return results, rowcount |
def zeroing(dev):
""" zeroing last few blocks of device """
# this kills the crab
#
# sgdisk will wipe out the main copy of the GPT partition
# table (sorry), but it doesn't remove the backup copies, and
# subsequent commands will continue to complain and fail when
# they see those. zeroing the last few blocks of the device
# appears to do the trick.
lba_size = 4096
size = 33 * lba_size
return True
with open(dev, 'wb') as f:
f.seek(-size, os.SEEK_END)
f.write(size*b'\0') | zeroing last few blocks of device | Below is the the instruction that describes the task:
### Input:
zeroing last few blocks of device
### Response:
def zeroing(dev):
""" zeroing last few blocks of device """
# this kills the crab
#
# sgdisk will wipe out the main copy of the GPT partition
# table (sorry), but it doesn't remove the backup copies, and
# subsequent commands will continue to complain and fail when
# they see those. zeroing the last few blocks of the device
# appears to do the trick.
lba_size = 4096
size = 33 * lba_size
return True
with open(dev, 'wb') as f:
f.seek(-size, os.SEEK_END)
f.write(size*b'\0') |
def get(self, frame_to, frame_from=None):
"""
Get the transform from one frame to another, assuming they are connected
in the transform tree.
If the frames are not connected a NetworkXNoPath error will be raised.
Parameters
---------
frame_from: hashable object, usually a string (eg 'world').
If left as None it will be set to self.base_frame
frame_to: hashable object, usually a string (eg 'mesh_0')
Returns
---------
transform: (4,4) homogenous transformation matrix
"""
if frame_from is None:
frame_from = self.base_frame
cache_key = str(frame_from) + ':' + str(frame_to)
cached = self._cache[cache_key]
if cached is not None:
return cached
transform = np.eye(4)
path = self._get_path(frame_from, frame_to)
for i in range(len(path) - 1):
data, direction = self.transforms.get_edge_data_direction(
path[i], path[i + 1])
matrix = data['matrix']
if direction < 0:
matrix = np.linalg.inv(matrix)
transform = np.dot(transform, matrix)
geometry = None
if 'geometry' in self.transforms.node[frame_to]:
geometry = self.transforms.node[frame_to]['geometry']
self._cache[cache_key] = (transform, geometry)
return transform, geometry | Get the transform from one frame to another, assuming they are connected
in the transform tree.
If the frames are not connected a NetworkXNoPath error will be raised.
Parameters
---------
frame_from: hashable object, usually a string (eg 'world').
If left as None it will be set to self.base_frame
frame_to: hashable object, usually a string (eg 'mesh_0')
Returns
---------
transform: (4,4) homogenous transformation matrix | Below is the the instruction that describes the task:
### Input:
Get the transform from one frame to another, assuming they are connected
in the transform tree.
If the frames are not connected a NetworkXNoPath error will be raised.
Parameters
---------
frame_from: hashable object, usually a string (eg 'world').
If left as None it will be set to self.base_frame
frame_to: hashable object, usually a string (eg 'mesh_0')
Returns
---------
transform: (4,4) homogenous transformation matrix
### Response:
def get(self, frame_to, frame_from=None):
"""
Get the transform from one frame to another, assuming they are connected
in the transform tree.
If the frames are not connected a NetworkXNoPath error will be raised.
Parameters
---------
frame_from: hashable object, usually a string (eg 'world').
If left as None it will be set to self.base_frame
frame_to: hashable object, usually a string (eg 'mesh_0')
Returns
---------
transform: (4,4) homogenous transformation matrix
"""
if frame_from is None:
frame_from = self.base_frame
cache_key = str(frame_from) + ':' + str(frame_to)
cached = self._cache[cache_key]
if cached is not None:
return cached
transform = np.eye(4)
path = self._get_path(frame_from, frame_to)
for i in range(len(path) - 1):
data, direction = self.transforms.get_edge_data_direction(
path[i], path[i + 1])
matrix = data['matrix']
if direction < 0:
matrix = np.linalg.inv(matrix)
transform = np.dot(transform, matrix)
geometry = None
if 'geometry' in self.transforms.node[frame_to]:
geometry = self.transforms.node[frame_to]['geometry']
self._cache[cache_key] = (transform, geometry)
return transform, geometry |
def docker_pull(image):
'''Pulls an image'''
args = ['docker', 'pull', image]
# If this fails, the default docker stdout/stderr looks good to the user.
ret = call(args)
if ret != 0:
raise DockerError('Failed to pull image "{}"'.format(image)) | Pulls an image | Below is the the instruction that describes the task:
### Input:
Pulls an image
### Response:
def docker_pull(image):
'''Pulls an image'''
args = ['docker', 'pull', image]
# If this fails, the default docker stdout/stderr looks good to the user.
ret = call(args)
if ret != 0:
raise DockerError('Failed to pull image "{}"'.format(image)) |
def run_cli_options(args):
"""
Quick implementation of Python interpreter's -m, -c and file execution.
The resulting dictionary is imported into global namespace, just in case
someone is using interactive mode.
We try to keep argument order as to pass them correctly to the subcommands.
"""
if _interactive_mode(args.interactive):
os.environ['PYTHONINSPECT'] = '1'
if in_ipython():
return
exclusive_choices = [[None, args.command], ['-c', args.string], ['-m', args.module]]
for flag_choice in exclusive_choices:
try:
a = sys.argv.index(flag_choice[0] or flag_choice[1])
except ValueError:
a = 1000
flag_choice.append(a)
exclusive_choices.sort(key=lambda v: v[2])
for i, (flag, choice, _) in enumerate(exclusive_choices):
if not choice:
continue
sys.argv = [choice] + sys.argv[sys.argv.index(choice)+1:]
if not flag:
if choice == 'ipython':
launch_ipython(argv=sys.argv[1:])
elif choice == 'notebook':
launch_notebook()
else:
globals().update(runpy.run_path(choice, run_name="__main__"))
elif flag == '-m':
if '--' in sys.argv[1:2] : # -m syntax needs '--' for extra args
sys.argv.pop(1)
globals().update(runpy.run_module(choice, run_name="__main__"))
elif flag == '-c':
exec choice in globals(), locals() # workaround
else:
continue
break | Quick implementation of Python interpreter's -m, -c and file execution.
The resulting dictionary is imported into global namespace, just in case
someone is using interactive mode.
We try to keep argument order as to pass them correctly to the subcommands. | Below is the the instruction that describes the task:
### Input:
Quick implementation of Python interpreter's -m, -c and file execution.
The resulting dictionary is imported into global namespace, just in case
someone is using interactive mode.
We try to keep argument order as to pass them correctly to the subcommands.
### Response:
def run_cli_options(args):
"""
Quick implementation of Python interpreter's -m, -c and file execution.
The resulting dictionary is imported into global namespace, just in case
someone is using interactive mode.
We try to keep argument order as to pass them correctly to the subcommands.
"""
if _interactive_mode(args.interactive):
os.environ['PYTHONINSPECT'] = '1'
if in_ipython():
return
exclusive_choices = [[None, args.command], ['-c', args.string], ['-m', args.module]]
for flag_choice in exclusive_choices:
try:
a = sys.argv.index(flag_choice[0] or flag_choice[1])
except ValueError:
a = 1000
flag_choice.append(a)
exclusive_choices.sort(key=lambda v: v[2])
for i, (flag, choice, _) in enumerate(exclusive_choices):
if not choice:
continue
sys.argv = [choice] + sys.argv[sys.argv.index(choice)+1:]
if not flag:
if choice == 'ipython':
launch_ipython(argv=sys.argv[1:])
elif choice == 'notebook':
launch_notebook()
else:
globals().update(runpy.run_path(choice, run_name="__main__"))
elif flag == '-m':
if '--' in sys.argv[1:2] : # -m syntax needs '--' for extra args
sys.argv.pop(1)
globals().update(runpy.run_module(choice, run_name="__main__"))
elif flag == '-c':
exec choice in globals(), locals() # workaround
else:
continue
break |
def on_augassign(self, node): # ('target', 'op', 'value')
"""Augmented assign."""
return self.on_assign(ast.Assign(targets=[node.target],
value=ast.BinOp(left=node.target,
op=node.op,
right=node.value))) | Augmented assign. | Below is the the instruction that describes the task:
### Input:
Augmented assign.
### Response:
def on_augassign(self, node): # ('target', 'op', 'value')
"""Augmented assign."""
return self.on_assign(ast.Assign(targets=[node.target],
value=ast.BinOp(left=node.target,
op=node.op,
right=node.value))) |
def cls_build(inst, state):
"""
Apply the setstate protocol to initialize `inst` from `state`.
INPUT:
- ``inst`` -- a raw instance of a class
- ``state`` -- the state to restore; typically a dictionary mapping attribute names to their values
EXAMPLES::
>>> from openmath.convert_pickle import cls_build
>>> class A(object): pass
>>> inst = A.__new__(A)
>>> state = {"foo": 1, "bar": 4}
>>> inst2 = cls_build(inst,state)
>>> inst is inst2
True
>>> inst.foo
1
>>> inst.bar
4
"""
# Copied from Pickler.load_build
setstate = getattr(inst, "__setstate__", None)
if setstate:
setstate(state)
return inst
slotstate = None
if isinstance(state, tuple) and len(state) == 2:
state, slotstate = state
if state:
try:
d = inst.__dict__
try:
for k, v in six.iteritems(state):
d[six.moves.intern(k)] = v
# keys in state don't have to be strings
# don't blow up, but don't go out of our way
except TypeError:
d.update(state)
except RuntimeError:
# XXX In restricted execution, the instance's __dict__
# is not accessible. Use the old way of unpickling
# the instance variables. This is a semantic
# difference when unpickling in restricted
# vs. unrestricted modes.
# Note, however, that cPickle has never tried to do the
# .update() business, and always uses
# PyObject_SetItem(inst.__dict__, key, value) in a
# loop over state.items().
for k, v in state.items():
setattr(inst, k, v)
if slotstate:
for k, v in slotstate.items():
setattr(inst, k, v)
return inst | Apply the setstate protocol to initialize `inst` from `state`.
INPUT:
- ``inst`` -- a raw instance of a class
- ``state`` -- the state to restore; typically a dictionary mapping attribute names to their values
EXAMPLES::
>>> from openmath.convert_pickle import cls_build
>>> class A(object): pass
>>> inst = A.__new__(A)
>>> state = {"foo": 1, "bar": 4}
>>> inst2 = cls_build(inst,state)
>>> inst is inst2
True
>>> inst.foo
1
>>> inst.bar
4 | Below is the the instruction that describes the task:
### Input:
Apply the setstate protocol to initialize `inst` from `state`.
INPUT:
- ``inst`` -- a raw instance of a class
- ``state`` -- the state to restore; typically a dictionary mapping attribute names to their values
EXAMPLES::
>>> from openmath.convert_pickle import cls_build
>>> class A(object): pass
>>> inst = A.__new__(A)
>>> state = {"foo": 1, "bar": 4}
>>> inst2 = cls_build(inst,state)
>>> inst is inst2
True
>>> inst.foo
1
>>> inst.bar
4
### Response:
def cls_build(inst, state):
"""
Apply the setstate protocol to initialize `inst` from `state`.
INPUT:
- ``inst`` -- a raw instance of a class
- ``state`` -- the state to restore; typically a dictionary mapping attribute names to their values
EXAMPLES::
>>> from openmath.convert_pickle import cls_build
>>> class A(object): pass
>>> inst = A.__new__(A)
>>> state = {"foo": 1, "bar": 4}
>>> inst2 = cls_build(inst,state)
>>> inst is inst2
True
>>> inst.foo
1
>>> inst.bar
4
"""
# Copied from Pickler.load_build
setstate = getattr(inst, "__setstate__", None)
if setstate:
setstate(state)
return inst
slotstate = None
if isinstance(state, tuple) and len(state) == 2:
state, slotstate = state
if state:
try:
d = inst.__dict__
try:
for k, v in six.iteritems(state):
d[six.moves.intern(k)] = v
# keys in state don't have to be strings
# don't blow up, but don't go out of our way
except TypeError:
d.update(state)
except RuntimeError:
# XXX In restricted execution, the instance's __dict__
# is not accessible. Use the old way of unpickling
# the instance variables. This is a semantic
# difference when unpickling in restricted
# vs. unrestricted modes.
# Note, however, that cPickle has never tried to do the
# .update() business, and always uses
# PyObject_SetItem(inst.__dict__, key, value) in a
# loop over state.items().
for k, v in state.items():
setattr(inst, k, v)
if slotstate:
for k, v in slotstate.items():
setattr(inst, k, v)
return inst |
def rowsWithin(self, bbox):
'return list of deduped rows within bbox'
ret = {}
for y in range(bbox.ymin, bbox.ymax+1):
for x in range(bbox.xmin, bbox.xmax+1):
for attr, rows in self.pixels[y][x].items():
if attr not in self.hiddenAttrs:
for r in rows:
ret[id(r)] = r
return list(ret.values()) | return list of deduped rows within bbox | Below is the the instruction that describes the task:
### Input:
return list of deduped rows within bbox
### Response:
def rowsWithin(self, bbox):
'return list of deduped rows within bbox'
ret = {}
for y in range(bbox.ymin, bbox.ymax+1):
for x in range(bbox.xmin, bbox.xmax+1):
for attr, rows in self.pixels[y][x].items():
if attr not in self.hiddenAttrs:
for r in rows:
ret[id(r)] = r
return list(ret.values()) |
def get_credentials():
"""Gets valid user credentials from storage.
If nothing has been stored, or if the stored credentials are invalid,
the OAuth2 flow is completed to obtain the new credentials.
Returns:
Credentials, the obtained credential.
"""
home_dir = os.path.expanduser('~')
credential_dir = os.path.join(home_dir, '.credentials')
if not os.path.exists(credential_dir):
os.makedirs(credential_dir)
credential_path = os.path.join(credential_dir,
'calendar-python-quickstart.json')
store = Storage(credential_path)
credentials = store.get()
if not credentials or credentials.invalid:
flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)
flow.user_agent = APPLICATION_NAME
if flags:
credentials = tools.run_flow(flow, store, flags)
else: # Needed only for compatibility with Python 2.6
credentials = tools.run(flow, store)
print('Storing credentials to ' + credential_path)
return credentials | Gets valid user credentials from storage.
If nothing has been stored, or if the stored credentials are invalid,
the OAuth2 flow is completed to obtain the new credentials.
Returns:
Credentials, the obtained credential. | Below is the the instruction that describes the task:
### Input:
Gets valid user credentials from storage.
If nothing has been stored, or if the stored credentials are invalid,
the OAuth2 flow is completed to obtain the new credentials.
Returns:
Credentials, the obtained credential.
### Response:
def get_credentials():
"""Gets valid user credentials from storage.
If nothing has been stored, or if the stored credentials are invalid,
the OAuth2 flow is completed to obtain the new credentials.
Returns:
Credentials, the obtained credential.
"""
home_dir = os.path.expanduser('~')
credential_dir = os.path.join(home_dir, '.credentials')
if not os.path.exists(credential_dir):
os.makedirs(credential_dir)
credential_path = os.path.join(credential_dir,
'calendar-python-quickstart.json')
store = Storage(credential_path)
credentials = store.get()
if not credentials or credentials.invalid:
flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)
flow.user_agent = APPLICATION_NAME
if flags:
credentials = tools.run_flow(flow, store, flags)
else: # Needed only for compatibility with Python 2.6
credentials = tools.run(flow, store)
print('Storing credentials to ' + credential_path)
return credentials |
def _set_mark(self, v, load=False):
"""
Setter method for mark, mapped from YANG variable /qos/map/dscp_mutation/mark (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_mark is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_mark() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("dscp_in_values",mark.mark, yang_name="mark", rest_name="mark", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='dscp-in-values', extensions={u'tailf-common': {u'info': u'Map DSCP values to outbound DSCP value', u'cli-suppress-mode': None, u'callpoint': u'dscp_mark_list_mutation', u'cli-incomplete-command': None, u'cli-suppress-no': None}}), is_container='list', yang_name="mark", rest_name="mark", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Map DSCP values to outbound DSCP value', u'cli-suppress-mode': None, u'callpoint': u'dscp_mark_list_mutation', u'cli-incomplete-command': None, u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-qos', defining_module='brocade-qos', yang_type='list', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """mark must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("dscp_in_values",mark.mark, yang_name="mark", rest_name="mark", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='dscp-in-values', extensions={u'tailf-common': {u'info': u'Map DSCP values to outbound DSCP value', u'cli-suppress-mode': None, u'callpoint': u'dscp_mark_list_mutation', u'cli-incomplete-command': None, u'cli-suppress-no': None}}), is_container='list', yang_name="mark", rest_name="mark", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Map DSCP values to outbound DSCP value', u'cli-suppress-mode': None, u'callpoint': u'dscp_mark_list_mutation', u'cli-incomplete-command': None, u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-qos', defining_module='brocade-qos', yang_type='list', is_config=True)""",
})
self.__mark = t
if hasattr(self, '_set'):
self._set() | Setter method for mark, mapped from YANG variable /qos/map/dscp_mutation/mark (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_mark is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_mark() directly. | Below is the the instruction that describes the task:
### Input:
Setter method for mark, mapped from YANG variable /qos/map/dscp_mutation/mark (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_mark is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_mark() directly.
### Response:
def _set_mark(self, v, load=False):
"""
Setter method for mark, mapped from YANG variable /qos/map/dscp_mutation/mark (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_mark is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_mark() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("dscp_in_values",mark.mark, yang_name="mark", rest_name="mark", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='dscp-in-values', extensions={u'tailf-common': {u'info': u'Map DSCP values to outbound DSCP value', u'cli-suppress-mode': None, u'callpoint': u'dscp_mark_list_mutation', u'cli-incomplete-command': None, u'cli-suppress-no': None}}), is_container='list', yang_name="mark", rest_name="mark", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Map DSCP values to outbound DSCP value', u'cli-suppress-mode': None, u'callpoint': u'dscp_mark_list_mutation', u'cli-incomplete-command': None, u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-qos', defining_module='brocade-qos', yang_type='list', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """mark must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("dscp_in_values",mark.mark, yang_name="mark", rest_name="mark", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='dscp-in-values', extensions={u'tailf-common': {u'info': u'Map DSCP values to outbound DSCP value', u'cli-suppress-mode': None, u'callpoint': u'dscp_mark_list_mutation', u'cli-incomplete-command': None, u'cli-suppress-no': None}}), is_container='list', yang_name="mark", rest_name="mark", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Map DSCP values to outbound DSCP value', u'cli-suppress-mode': None, u'callpoint': u'dscp_mark_list_mutation', u'cli-incomplete-command': None, u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-qos', defining_module='brocade-qos', yang_type='list', is_config=True)""",
})
self.__mark = t
if hasattr(self, '_set'):
self._set() |
def hash_reference_links(text, hashes, markdown_obj):
"""Hashes an <a> link or an <img> link.
This function only converts reference link styles:
[text here][ref id]
![alt text here][ref id]
For inline style links, see hash_inline_links.
Reference ids can be defined anywhere in the Markdown text.
Reference ids can also be omitted, in which case te text in the
first box is used as the reference id:
[ref id][]
This is known as an "implicit link" reference.
"""
def sub(match):
is_img = match.group(1) != ''
content = match.group(2)
ref = match.group(3).strip().lower()
if not ref:
ref = content.strip().lower()
ref = ref.replace('\n', ' ')
if ref not in markdown_obj.references:
link, title = '', ''
else:
link, title = markdown_obj.references[ref]
if title:
title = ' title="{0}"'.format(title)
if is_img:
result = '<img src="{0}" alt="{1}"{2}>'.format(
link, content, title)
else:
result = '<a href="{0}"{2}>{1}</a>'.format(link,
markdown_obj.convert(content).replace('<p>', '').replace('</p>', '').strip(),
title)
hashed = hash_text(result, 'link')
hashes[hashed] = result
return hashed
return re_reference_link.sub(sub, text) | Hashes an <a> link or an <img> link.
This function only converts reference link styles:
[text here][ref id]
![alt text here][ref id]
For inline style links, see hash_inline_links.
Reference ids can be defined anywhere in the Markdown text.
Reference ids can also be omitted, in which case te text in the
first box is used as the reference id:
[ref id][]
This is known as an "implicit link" reference. | Below is the the instruction that describes the task:
### Input:
Hashes an <a> link or an <img> link.
This function only converts reference link styles:
[text here][ref id]
![alt text here][ref id]
For inline style links, see hash_inline_links.
Reference ids can be defined anywhere in the Markdown text.
Reference ids can also be omitted, in which case te text in the
first box is used as the reference id:
[ref id][]
This is known as an "implicit link" reference.
### Response:
def hash_reference_links(text, hashes, markdown_obj):
"""Hashes an <a> link or an <img> link.
This function only converts reference link styles:
[text here][ref id]
![alt text here][ref id]
For inline style links, see hash_inline_links.
Reference ids can be defined anywhere in the Markdown text.
Reference ids can also be omitted, in which case te text in the
first box is used as the reference id:
[ref id][]
This is known as an "implicit link" reference.
"""
def sub(match):
is_img = match.group(1) != ''
content = match.group(2)
ref = match.group(3).strip().lower()
if not ref:
ref = content.strip().lower()
ref = ref.replace('\n', ' ')
if ref not in markdown_obj.references:
link, title = '', ''
else:
link, title = markdown_obj.references[ref]
if title:
title = ' title="{0}"'.format(title)
if is_img:
result = '<img src="{0}" alt="{1}"{2}>'.format(
link, content, title)
else:
result = '<a href="{0}"{2}>{1}</a>'.format(link,
markdown_obj.convert(content).replace('<p>', '').replace('</p>', '').strip(),
title)
hashed = hash_text(result, 'link')
hashes[hashed] = result
return hashed
return re_reference_link.sub(sub, text) |
def nodes_to_check(self, docs):
"""\
returns a list of nodes we want to search
on like paragraphs and tables
"""
nodes_to_check = []
for doc in docs:
for tag in ['p', 'pre', 'td']:
items = self.parser.getElementsByTag(doc, tag=tag)
nodes_to_check += items
return nodes_to_check | \
returns a list of nodes we want to search
on like paragraphs and tables | Below is the the instruction that describes the task:
### Input:
\
returns a list of nodes we want to search
on like paragraphs and tables
### Response:
def nodes_to_check(self, docs):
"""\
returns a list of nodes we want to search
on like paragraphs and tables
"""
nodes_to_check = []
for doc in docs:
for tag in ['p', 'pre', 'td']:
items = self.parser.getElementsByTag(doc, tag=tag)
nodes_to_check += items
return nodes_to_check |
def prepare_method_call(self, method, args):
"""
Wraps a method so that method() will call ``method(*args)`` or ``method(**args)``,
depending of args type
:param method: a callable object (method)
:param args: dict or list with the parameters for the function
:return: a 'patched' callable
"""
if self._method_requires_handler_ref(method):
if isinstance(args, list):
args = [self] + args
elif isinstance(args, dict):
args["handler"] = self
if isinstance(args, list):
to_call = partial(method, *args)
elif isinstance(args, dict):
to_call = partial(method, **args)
else:
raise TypeError(
"args must be list or dict but got {} instead".format(type(args).__name__))
return to_call | Wraps a method so that method() will call ``method(*args)`` or ``method(**args)``,
depending of args type
:param method: a callable object (method)
:param args: dict or list with the parameters for the function
:return: a 'patched' callable | Below is the the instruction that describes the task:
### Input:
Wraps a method so that method() will call ``method(*args)`` or ``method(**args)``,
depending of args type
:param method: a callable object (method)
:param args: dict or list with the parameters for the function
:return: a 'patched' callable
### Response:
def prepare_method_call(self, method, args):
"""
Wraps a method so that method() will call ``method(*args)`` or ``method(**args)``,
depending of args type
:param method: a callable object (method)
:param args: dict or list with the parameters for the function
:return: a 'patched' callable
"""
if self._method_requires_handler_ref(method):
if isinstance(args, list):
args = [self] + args
elif isinstance(args, dict):
args["handler"] = self
if isinstance(args, list):
to_call = partial(method, *args)
elif isinstance(args, dict):
to_call = partial(method, **args)
else:
raise TypeError(
"args must be list or dict but got {} instead".format(type(args).__name__))
return to_call |
def jacobi( a, n ):
"""Jacobi symbol"""
# Based on the Handbook of Applied Cryptography (HAC), algorithm 2.149.
# This function has been tested by comparison with a small
# table printed in HAC, and by extensive use in calculating
# modular square roots.
assert n >= 3
assert n%2 == 1
a = a % n
if a == 0: return 0
if a == 1: return 1
a1, e = a, 0
while a1%2 == 0:
a1, e = a1//2, e+1
if e%2 == 0 or n%8 == 1 or n%8 == 7: s = 1
else: s = -1
if a1 == 1: return s
if n%4 == 3 and a1%4 == 3: s = -s
return s * jacobi( n % a1, a1 ) | Jacobi symbol | Below is the the instruction that describes the task:
### Input:
Jacobi symbol
### Response:
def jacobi( a, n ):
"""Jacobi symbol"""
# Based on the Handbook of Applied Cryptography (HAC), algorithm 2.149.
# This function has been tested by comparison with a small
# table printed in HAC, and by extensive use in calculating
# modular square roots.
assert n >= 3
assert n%2 == 1
a = a % n
if a == 0: return 0
if a == 1: return 1
a1, e = a, 0
while a1%2 == 0:
a1, e = a1//2, e+1
if e%2 == 0 or n%8 == 1 or n%8 == 7: s = 1
else: s = -1
if a1 == 1: return s
if n%4 == 3 and a1%4 == 3: s = -s
return s * jacobi( n % a1, a1 ) |
def get_value_for_datastore(self, model_instance):
"""Gets value for datastore.
Args:
model_instance: instance of the model class.
Returns:
datastore-compatible value.
"""
value = super(JsonProperty, self).get_value_for_datastore(model_instance)
if not value:
return None
json_value = value
if not isinstance(value, dict):
json_value = value.to_json()
if not json_value:
return None
return datastore_types.Text(json.dumps(
json_value, sort_keys=True, cls=JsonEncoder)) | Gets value for datastore.
Args:
model_instance: instance of the model class.
Returns:
datastore-compatible value. | Below is the the instruction that describes the task:
### Input:
Gets value for datastore.
Args:
model_instance: instance of the model class.
Returns:
datastore-compatible value.
### Response:
def get_value_for_datastore(self, model_instance):
"""Gets value for datastore.
Args:
model_instance: instance of the model class.
Returns:
datastore-compatible value.
"""
value = super(JsonProperty, self).get_value_for_datastore(model_instance)
if not value:
return None
json_value = value
if not isinstance(value, dict):
json_value = value.to_json()
if not json_value:
return None
return datastore_types.Text(json.dumps(
json_value, sort_keys=True, cls=JsonEncoder)) |
def get_conn(self):
"""
Returns a cassandra Session object
"""
if self.session and not self.session.is_shutdown:
return self.session
self.session = self.cluster.connect(self.keyspace)
return self.session | Returns a cassandra Session object | Below is the the instruction that describes the task:
### Input:
Returns a cassandra Session object
### Response:
def get_conn(self):
"""
Returns a cassandra Session object
"""
if self.session and not self.session.is_shutdown:
return self.session
self.session = self.cluster.connect(self.keyspace)
return self.session |
def get_form_field_dict(self, model_dict):
"""
Takes a model dictionary representation and creates a dictionary
keyed by form field. Each value is a keyed 4 tuple of:
(widget, mode_field_instance, model_field_type, field_key)
"""
return_dict = OrderedDict()
# Workaround: mongoengine doesn't preserve form fields ordering from metaclass __new__
if hasattr(self.model, 'Meta') and hasattr(self.model.Meta, 'form_fields_ordering'):
field_order_list = tuple(form_field for form_field
in self.model.Meta.form_fields_ordering
if form_field in model_dict.iterkeys())
order_dict = OrderedDict.fromkeys(field_order_list)
return_dict = order_dict
for field_key, field_dict in sorted(model_dict.items()):
if not field_key.startswith("_"):
widget = field_dict.get('_widget', None)
if widget is None:
return_dict[field_key] = self.get_form_field_dict(field_dict)
return_dict[field_key].update({'_field_type': field_dict.get('_field_type', None)})
else:
return_dict[field_key] = FieldTuple(widget,
field_dict.get('_document_field', None),
field_dict.get('_field_type', None),
field_dict.get('_key', None))
return return_dict | Takes a model dictionary representation and creates a dictionary
keyed by form field. Each value is a keyed 4 tuple of:
(widget, mode_field_instance, model_field_type, field_key) | Below is the the instruction that describes the task:
### Input:
Takes a model dictionary representation and creates a dictionary
keyed by form field. Each value is a keyed 4 tuple of:
(widget, mode_field_instance, model_field_type, field_key)
### Response:
def get_form_field_dict(self, model_dict):
"""
Takes a model dictionary representation and creates a dictionary
keyed by form field. Each value is a keyed 4 tuple of:
(widget, mode_field_instance, model_field_type, field_key)
"""
return_dict = OrderedDict()
# Workaround: mongoengine doesn't preserve form fields ordering from metaclass __new__
if hasattr(self.model, 'Meta') and hasattr(self.model.Meta, 'form_fields_ordering'):
field_order_list = tuple(form_field for form_field
in self.model.Meta.form_fields_ordering
if form_field in model_dict.iterkeys())
order_dict = OrderedDict.fromkeys(field_order_list)
return_dict = order_dict
for field_key, field_dict in sorted(model_dict.items()):
if not field_key.startswith("_"):
widget = field_dict.get('_widget', None)
if widget is None:
return_dict[field_key] = self.get_form_field_dict(field_dict)
return_dict[field_key].update({'_field_type': field_dict.get('_field_type', None)})
else:
return_dict[field_key] = FieldTuple(widget,
field_dict.get('_document_field', None),
field_dict.get('_field_type', None),
field_dict.get('_key', None))
return return_dict |
def add_my_api_key_to_groups(self, body, **kwargs): # noqa: E501
"""Add API key to a list of groups. # noqa: E501
An endpoint for adding API key to groups. **Example usage:** `curl -X POST https://api.us-east-1.mbedcloud.com/v3/api-keys/me/groups -d '[0162056a9a1586f30242590700000000,0117056a9a1586f30242590700000000]' -H 'content-type: application/json' -H 'Authorization: Bearer API_KEY'` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass asynchronous=True
>>> thread = api.add_my_api_key_to_groups(body, asynchronous=True)
>>> result = thread.get()
:param asynchronous bool
:param list[str] body: A list of IDs of the groups to be updated. (required)
:return: UpdatedResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('asynchronous'):
return self.add_my_api_key_to_groups_with_http_info(body, **kwargs) # noqa: E501
else:
(data) = self.add_my_api_key_to_groups_with_http_info(body, **kwargs) # noqa: E501
return data | Add API key to a list of groups. # noqa: E501
An endpoint for adding API key to groups. **Example usage:** `curl -X POST https://api.us-east-1.mbedcloud.com/v3/api-keys/me/groups -d '[0162056a9a1586f30242590700000000,0117056a9a1586f30242590700000000]' -H 'content-type: application/json' -H 'Authorization: Bearer API_KEY'` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass asynchronous=True
>>> thread = api.add_my_api_key_to_groups(body, asynchronous=True)
>>> result = thread.get()
:param asynchronous bool
:param list[str] body: A list of IDs of the groups to be updated. (required)
:return: UpdatedResponse
If the method is called asynchronously,
returns the request thread. | Below is the the instruction that describes the task:
### Input:
Add API key to a list of groups. # noqa: E501
An endpoint for adding API key to groups. **Example usage:** `curl -X POST https://api.us-east-1.mbedcloud.com/v3/api-keys/me/groups -d '[0162056a9a1586f30242590700000000,0117056a9a1586f30242590700000000]' -H 'content-type: application/json' -H 'Authorization: Bearer API_KEY'` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass asynchronous=True
>>> thread = api.add_my_api_key_to_groups(body, asynchronous=True)
>>> result = thread.get()
:param asynchronous bool
:param list[str] body: A list of IDs of the groups to be updated. (required)
:return: UpdatedResponse
If the method is called asynchronously,
returns the request thread.
### Response:
def add_my_api_key_to_groups(self, body, **kwargs): # noqa: E501
"""Add API key to a list of groups. # noqa: E501
An endpoint for adding API key to groups. **Example usage:** `curl -X POST https://api.us-east-1.mbedcloud.com/v3/api-keys/me/groups -d '[0162056a9a1586f30242590700000000,0117056a9a1586f30242590700000000]' -H 'content-type: application/json' -H 'Authorization: Bearer API_KEY'` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass asynchronous=True
>>> thread = api.add_my_api_key_to_groups(body, asynchronous=True)
>>> result = thread.get()
:param asynchronous bool
:param list[str] body: A list of IDs of the groups to be updated. (required)
:return: UpdatedResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('asynchronous'):
return self.add_my_api_key_to_groups_with_http_info(body, **kwargs) # noqa: E501
else:
(data) = self.add_my_api_key_to_groups_with_http_info(body, **kwargs) # noqa: E501
return data |
def read_from_file(path, file_type='text', exception=ScriptWorkerException):
"""Read from ``path``.
Small helper function to read from ``file``.
Args:
path (str): the path to read from.
file_type (str, optional): the type of file. Currently accepts
``text`` or ``binary``. Defaults to ``text``.
exception (Exception, optional): the exception to raise
if unable to read from the file. Defaults to ``ScriptWorkerException``.
Returns:
None: if unable to read from ``path`` and ``exception`` is ``None``
str or bytes: the contents of ``path``
Raises:
Exception: if ``exception`` is set.
"""
FILE_TYPE_MAP = {'text': 'r', 'binary': 'rb'}
if file_type not in FILE_TYPE_MAP:
raise exception("Unknown file_type {} not in {}!".format(file_type, FILE_TYPE_MAP))
try:
with open(path, FILE_TYPE_MAP[file_type]) as fh:
return fh.read()
except (OSError, FileNotFoundError) as exc:
raise exception("Can't read_from_file {}: {}".format(path, str(exc))) | Read from ``path``.
Small helper function to read from ``file``.
Args:
path (str): the path to read from.
file_type (str, optional): the type of file. Currently accepts
``text`` or ``binary``. Defaults to ``text``.
exception (Exception, optional): the exception to raise
if unable to read from the file. Defaults to ``ScriptWorkerException``.
Returns:
None: if unable to read from ``path`` and ``exception`` is ``None``
str or bytes: the contents of ``path``
Raises:
Exception: if ``exception`` is set. | Below is the the instruction that describes the task:
### Input:
Read from ``path``.
Small helper function to read from ``file``.
Args:
path (str): the path to read from.
file_type (str, optional): the type of file. Currently accepts
``text`` or ``binary``. Defaults to ``text``.
exception (Exception, optional): the exception to raise
if unable to read from the file. Defaults to ``ScriptWorkerException``.
Returns:
None: if unable to read from ``path`` and ``exception`` is ``None``
str or bytes: the contents of ``path``
Raises:
Exception: if ``exception`` is set.
### Response:
def read_from_file(path, file_type='text', exception=ScriptWorkerException):
"""Read from ``path``.
Small helper function to read from ``file``.
Args:
path (str): the path to read from.
file_type (str, optional): the type of file. Currently accepts
``text`` or ``binary``. Defaults to ``text``.
exception (Exception, optional): the exception to raise
if unable to read from the file. Defaults to ``ScriptWorkerException``.
Returns:
None: if unable to read from ``path`` and ``exception`` is ``None``
str or bytes: the contents of ``path``
Raises:
Exception: if ``exception`` is set.
"""
FILE_TYPE_MAP = {'text': 'r', 'binary': 'rb'}
if file_type not in FILE_TYPE_MAP:
raise exception("Unknown file_type {} not in {}!".format(file_type, FILE_TYPE_MAP))
try:
with open(path, FILE_TYPE_MAP[file_type]) as fh:
return fh.read()
except (OSError, FileNotFoundError) as exc:
raise exception("Can't read_from_file {}: {}".format(path, str(exc))) |
def method(self, value):
"""
Before assigning the value validate that is in one of the
HTTP methods we implement
"""
keys = self._methods.keys()
if value not in keys:
raise AttributeError("Method value not in " + str(keys))
else:
self._method = value | Before assigning the value validate that is in one of the
HTTP methods we implement | Below is the the instruction that describes the task:
### Input:
Before assigning the value validate that is in one of the
HTTP methods we implement
### Response:
def method(self, value):
"""
Before assigning the value validate that is in one of the
HTTP methods we implement
"""
keys = self._methods.keys()
if value not in keys:
raise AttributeError("Method value not in " + str(keys))
else:
self._method = value |
def createNotification(self, ulOverlayHandle, ulUserValue, type_, pchText, style):
"""
Create a notification and enqueue it to be shown to the user.
An overlay handle is required to create a notification, as otherwise it would be impossible for a user to act on it.
To create a two-line notification, use a line break ('\n') to split the text into two lines.
The pImage argument may be NULL, in which case the specified overlay's icon will be used instead.
"""
fn = self.function_table.createNotification
pImage = NotificationBitmap_t()
pNotificationId = VRNotificationId()
result = fn(ulOverlayHandle, ulUserValue, type_, pchText, style, byref(pImage), byref(pNotificationId))
return result, pImage, pNotificationId | Create a notification and enqueue it to be shown to the user.
An overlay handle is required to create a notification, as otherwise it would be impossible for a user to act on it.
To create a two-line notification, use a line break ('\n') to split the text into two lines.
The pImage argument may be NULL, in which case the specified overlay's icon will be used instead. | Below is the the instruction that describes the task:
### Input:
Create a notification and enqueue it to be shown to the user.
An overlay handle is required to create a notification, as otherwise it would be impossible for a user to act on it.
To create a two-line notification, use a line break ('\n') to split the text into two lines.
The pImage argument may be NULL, in which case the specified overlay's icon will be used instead.
### Response:
def createNotification(self, ulOverlayHandle, ulUserValue, type_, pchText, style):
"""
Create a notification and enqueue it to be shown to the user.
An overlay handle is required to create a notification, as otherwise it would be impossible for a user to act on it.
To create a two-line notification, use a line break ('\n') to split the text into two lines.
The pImage argument may be NULL, in which case the specified overlay's icon will be used instead.
"""
fn = self.function_table.createNotification
pImage = NotificationBitmap_t()
pNotificationId = VRNotificationId()
result = fn(ulOverlayHandle, ulUserValue, type_, pchText, style, byref(pImage), byref(pNotificationId))
return result, pImage, pNotificationId |
def _populate_lp(self, dataset, **kwargs):
"""
Populate columns necessary for an LP dataset
This should not be called directly, but rather via :meth:`Body.populate_observable`
or :meth:`System.populate_observables`
"""
logger.debug("{}._populate_lp(dataset={})".format(self.component, dataset))
profile_rest = kwargs.get('profile_rest', self.lp_profile_rest.get(dataset))
rv_cols = self._populate_rv(dataset, **kwargs)
cols = rv_cols
# rvs = (rv_cols['rvs']*u.solRad/u.d).to(u.m/u.s).value
# cols['dls'] = rv_cols['rvs']*profile_rest/c.c.si.value
return cols | Populate columns necessary for an LP dataset
This should not be called directly, but rather via :meth:`Body.populate_observable`
or :meth:`System.populate_observables` | Below is the the instruction that describes the task:
### Input:
Populate columns necessary for an LP dataset
This should not be called directly, but rather via :meth:`Body.populate_observable`
or :meth:`System.populate_observables`
### Response:
def _populate_lp(self, dataset, **kwargs):
"""
Populate columns necessary for an LP dataset
This should not be called directly, but rather via :meth:`Body.populate_observable`
or :meth:`System.populate_observables`
"""
logger.debug("{}._populate_lp(dataset={})".format(self.component, dataset))
profile_rest = kwargs.get('profile_rest', self.lp_profile_rest.get(dataset))
rv_cols = self._populate_rv(dataset, **kwargs)
cols = rv_cols
# rvs = (rv_cols['rvs']*u.solRad/u.d).to(u.m/u.s).value
# cols['dls'] = rv_cols['rvs']*profile_rest/c.c.si.value
return cols |
def pick_scalar_condition(pred, true_value, false_value, name=None):
"""Convenience function that chooses one of two values based on the predicate.
This utility is equivalent to a version of `tf.where` that accepts only a
scalar predicate and computes its result statically when possible. It may also
be used in place of `tf.cond` when both branches yield a `Tensor` of the same
shape; the operational difference is that `tf.cond` uses control flow to
evaluate only the branch that's needed, while `tf.where` (and thus
this method) may evaluate both branches before the predicate's truth is known.
This means that `tf.cond` is preferred when one of the branches is expensive
to evaluate (like performing a large matmul), while this method is preferred
when both branches are cheap, e.g., constants. In the latter case, we expect
this method to be substantially faster than `tf.cond` on GPU and to give
similar performance on CPU.
Args:
pred: Scalar `bool` `Tensor` predicate.
true_value: `Tensor` to return if `pred` is `True`.
false_value: `Tensor` to return if `pred` is `False`. Must have the same
shape as `true_value`.
name: Python `str` name given to ops managed by this object.
Returns:
result: a `Tensor` (or `Tensor`-convertible Python value) equal to
`true_value` if `pred` evaluates to `True` and `false_value` otherwise.
If the condition can be evaluated statically, the result returned is one
of the input Python values, with no graph side effects.
"""
with tf.name_scope(name or "pick_scalar_condition"):
pred = tf.convert_to_tensor(
value=pred, dtype_hint=tf.bool, name="pred")
true_value = tf.convert_to_tensor(value=true_value, name="true_value")
false_value = tf.convert_to_tensor(value=false_value, name="false_value")
pred_ = tf.get_static_value(pred)
if pred_ is None:
return tf.where(pred, true_value, false_value)
return true_value if pred_ else false_value | Convenience function that chooses one of two values based on the predicate.
This utility is equivalent to a version of `tf.where` that accepts only a
scalar predicate and computes its result statically when possible. It may also
be used in place of `tf.cond` when both branches yield a `Tensor` of the same
shape; the operational difference is that `tf.cond` uses control flow to
evaluate only the branch that's needed, while `tf.where` (and thus
this method) may evaluate both branches before the predicate's truth is known.
This means that `tf.cond` is preferred when one of the branches is expensive
to evaluate (like performing a large matmul), while this method is preferred
when both branches are cheap, e.g., constants. In the latter case, we expect
this method to be substantially faster than `tf.cond` on GPU and to give
similar performance on CPU.
Args:
pred: Scalar `bool` `Tensor` predicate.
true_value: `Tensor` to return if `pred` is `True`.
false_value: `Tensor` to return if `pred` is `False`. Must have the same
shape as `true_value`.
name: Python `str` name given to ops managed by this object.
Returns:
result: a `Tensor` (or `Tensor`-convertible Python value) equal to
`true_value` if `pred` evaluates to `True` and `false_value` otherwise.
If the condition can be evaluated statically, the result returned is one
of the input Python values, with no graph side effects. | Below is the the instruction that describes the task:
### Input:
Convenience function that chooses one of two values based on the predicate.
This utility is equivalent to a version of `tf.where` that accepts only a
scalar predicate and computes its result statically when possible. It may also
be used in place of `tf.cond` when both branches yield a `Tensor` of the same
shape; the operational difference is that `tf.cond` uses control flow to
evaluate only the branch that's needed, while `tf.where` (and thus
this method) may evaluate both branches before the predicate's truth is known.
This means that `tf.cond` is preferred when one of the branches is expensive
to evaluate (like performing a large matmul), while this method is preferred
when both branches are cheap, e.g., constants. In the latter case, we expect
this method to be substantially faster than `tf.cond` on GPU and to give
similar performance on CPU.
Args:
pred: Scalar `bool` `Tensor` predicate.
true_value: `Tensor` to return if `pred` is `True`.
false_value: `Tensor` to return if `pred` is `False`. Must have the same
shape as `true_value`.
name: Python `str` name given to ops managed by this object.
Returns:
result: a `Tensor` (or `Tensor`-convertible Python value) equal to
`true_value` if `pred` evaluates to `True` and `false_value` otherwise.
If the condition can be evaluated statically, the result returned is one
of the input Python values, with no graph side effects.
### Response:
def pick_scalar_condition(pred, true_value, false_value, name=None):
"""Convenience function that chooses one of two values based on the predicate.
This utility is equivalent to a version of `tf.where` that accepts only a
scalar predicate and computes its result statically when possible. It may also
be used in place of `tf.cond` when both branches yield a `Tensor` of the same
shape; the operational difference is that `tf.cond` uses control flow to
evaluate only the branch that's needed, while `tf.where` (and thus
this method) may evaluate both branches before the predicate's truth is known.
This means that `tf.cond` is preferred when one of the branches is expensive
to evaluate (like performing a large matmul), while this method is preferred
when both branches are cheap, e.g., constants. In the latter case, we expect
this method to be substantially faster than `tf.cond` on GPU and to give
similar performance on CPU.
Args:
pred: Scalar `bool` `Tensor` predicate.
true_value: `Tensor` to return if `pred` is `True`.
false_value: `Tensor` to return if `pred` is `False`. Must have the same
shape as `true_value`.
name: Python `str` name given to ops managed by this object.
Returns:
result: a `Tensor` (or `Tensor`-convertible Python value) equal to
`true_value` if `pred` evaluates to `True` and `false_value` otherwise.
If the condition can be evaluated statically, the result returned is one
of the input Python values, with no graph side effects.
"""
with tf.name_scope(name or "pick_scalar_condition"):
pred = tf.convert_to_tensor(
value=pred, dtype_hint=tf.bool, name="pred")
true_value = tf.convert_to_tensor(value=true_value, name="true_value")
false_value = tf.convert_to_tensor(value=false_value, name="false_value")
pred_ = tf.get_static_value(pred)
if pred_ is None:
return tf.where(pred, true_value, false_value)
return true_value if pred_ else false_value |
def get_day_and_year():
"""
Returns tuple (day, year).
Here be dragons!
The correct date is determined with introspection of the call stack, first
finding the filename of the module from which ``aocd`` was imported.
This means your filenames should be something sensible, which identify the
day and year unambiguously. The examples below should all parse correctly,
because they have unique digits in the file path that are recognisable as
AoC years (2015+) or days (1-25).
A filename like ``problem_one.py`` will not work, so don't do that. If you
don't like weird frame hacks, just use the ``aocd.get_data()`` function
directly instead and have a nice day!
"""
pattern_year = r"201[5-9]|202[0-9]"
pattern_day = r"2[0-5]|1[0-9]|[1-9]"
stack = [f[0] for f in traceback.extract_stack()]
for name in stack:
basename = os.path.basename(name)
reasons_to_skip_frame = [
not re.search(pattern_day, basename), # no digits in filename
name == __file__, # here
"importlib" in name, # Python 3 import machinery
"/IPython/" in name, # IPython adds a tonne of stack frames
name.startswith("<"), # crap like <decorator-gen-57>
name.endswith("ython3"), # ipython3 alias
]
if not any(reasons_to_skip_frame):
abspath = os.path.abspath(name)
break
log.debug("skipping frame %s", name)
else:
import __main__
try:
__main__.__file__
except AttributeError:
log.debug("running within REPL")
day = current_day()
year = most_recent_year()
return day, year
else:
log.debug("non-interactive")
raise AocdError("Failed introspection of filename")
years = {int(year) for year in re.findall(pattern_year, abspath)}
if len(years) > 1:
raise AocdError("Failed introspection of year")
year = years.pop() if years else None
basename_no_years = re.sub(pattern_year, "", basename)
try:
[day] = set(re.findall(pattern_day, basename_no_years))
except ValueError:
pass
else:
assert not day.startswith("0"), "regex pattern_day must prevent any leading 0"
day = int(day)
assert 1 <= day <= 25, "regex pattern_day must only match numbers in range 1-25"
log.debug("year=%d day=%d", year, day)
return day, year
log.debug("giving up introspection for %s", abspath)
raise AocdError("Failed introspection of day") | Returns tuple (day, year).
Here be dragons!
The correct date is determined with introspection of the call stack, first
finding the filename of the module from which ``aocd`` was imported.
This means your filenames should be something sensible, which identify the
day and year unambiguously. The examples below should all parse correctly,
because they have unique digits in the file path that are recognisable as
AoC years (2015+) or days (1-25).
A filename like ``problem_one.py`` will not work, so don't do that. If you
don't like weird frame hacks, just use the ``aocd.get_data()`` function
directly instead and have a nice day! | Below is the the instruction that describes the task:
### Input:
Returns tuple (day, year).
Here be dragons!
The correct date is determined with introspection of the call stack, first
finding the filename of the module from which ``aocd`` was imported.
This means your filenames should be something sensible, which identify the
day and year unambiguously. The examples below should all parse correctly,
because they have unique digits in the file path that are recognisable as
AoC years (2015+) or days (1-25).
A filename like ``problem_one.py`` will not work, so don't do that. If you
don't like weird frame hacks, just use the ``aocd.get_data()`` function
directly instead and have a nice day!
### Response:
def get_day_and_year():
"""
Returns tuple (day, year).
Here be dragons!
The correct date is determined with introspection of the call stack, first
finding the filename of the module from which ``aocd`` was imported.
This means your filenames should be something sensible, which identify the
day and year unambiguously. The examples below should all parse correctly,
because they have unique digits in the file path that are recognisable as
AoC years (2015+) or days (1-25).
A filename like ``problem_one.py`` will not work, so don't do that. If you
don't like weird frame hacks, just use the ``aocd.get_data()`` function
directly instead and have a nice day!
"""
pattern_year = r"201[5-9]|202[0-9]"
pattern_day = r"2[0-5]|1[0-9]|[1-9]"
stack = [f[0] for f in traceback.extract_stack()]
for name in stack:
basename = os.path.basename(name)
reasons_to_skip_frame = [
not re.search(pattern_day, basename), # no digits in filename
name == __file__, # here
"importlib" in name, # Python 3 import machinery
"/IPython/" in name, # IPython adds a tonne of stack frames
name.startswith("<"), # crap like <decorator-gen-57>
name.endswith("ython3"), # ipython3 alias
]
if not any(reasons_to_skip_frame):
abspath = os.path.abspath(name)
break
log.debug("skipping frame %s", name)
else:
import __main__
try:
__main__.__file__
except AttributeError:
log.debug("running within REPL")
day = current_day()
year = most_recent_year()
return day, year
else:
log.debug("non-interactive")
raise AocdError("Failed introspection of filename")
years = {int(year) for year in re.findall(pattern_year, abspath)}
if len(years) > 1:
raise AocdError("Failed introspection of year")
year = years.pop() if years else None
basename_no_years = re.sub(pattern_year, "", basename)
try:
[day] = set(re.findall(pattern_day, basename_no_years))
except ValueError:
pass
else:
assert not day.startswith("0"), "regex pattern_day must prevent any leading 0"
day = int(day)
assert 1 <= day <= 25, "regex pattern_day must only match numbers in range 1-25"
log.debug("year=%d day=%d", year, day)
return day, year
log.debug("giving up introspection for %s", abspath)
raise AocdError("Failed introspection of day") |
def search(cls,
query_string,
options=None,
enable_facet_discovery=False,
return_facets=None,
facet_options=None,
facet_refinements=None,
deadline=None,
**kwargs):
"""
Searches the index. Conveniently searches only for documents that belong to instances of this class.
:param query_string: The query to match against documents in the index. See search.Query() for details.
:param options: A QueryOptions describing post-processing of search results.
:param enable_facet_discovery: discovery top relevent facets to this search query and return them.
:param return_facets: An iterable of FacetRequest or basestring as facet name to
return specific facet with the result.
:param facet_options: A FacetOption describing processing of facets.
:param facet_refinements: An iterable of FacetRefinement objects or refinement
token strings used to filter out search results based on a facet value.
refinements for different facets will be conjunction and refinements for
the same facet will be disjunction.
:param deadline: Deadline for RPC call in seconds; if None use the default.
:param kwargs: A SearchResults containing a list of documents matched, number returned
and number matched by the query.
:return: A SearchResults containing a list of documents matched, number returned
and number matched by the query.
:raises: QueryError: If the query string is not parseable.
TypeError: If any of the parameters have invalid types, or an unknown
attribute is passed.
ValueError: If any of the parameters have invalid values (e.g., a
negative deadline).
"""
search_class = cls.search_get_class_names()[-1]
query_string += ' ' + 'class_name:%s' % (search_class,)
q = search.Query(
query_string=query_string,
options=options,
enable_facet_discovery=enable_facet_discovery,
return_facets=return_facets,
facet_options=facet_options,
facet_refinements=facet_refinements
)
index = cls.search_get_index()
return index.search(q, deadline=deadline, **kwargs) | Searches the index. Conveniently searches only for documents that belong to instances of this class.
:param query_string: The query to match against documents in the index. See search.Query() for details.
:param options: A QueryOptions describing post-processing of search results.
:param enable_facet_discovery: discovery top relevent facets to this search query and return them.
:param return_facets: An iterable of FacetRequest or basestring as facet name to
return specific facet with the result.
:param facet_options: A FacetOption describing processing of facets.
:param facet_refinements: An iterable of FacetRefinement objects or refinement
token strings used to filter out search results based on a facet value.
refinements for different facets will be conjunction and refinements for
the same facet will be disjunction.
:param deadline: Deadline for RPC call in seconds; if None use the default.
:param kwargs: A SearchResults containing a list of documents matched, number returned
and number matched by the query.
:return: A SearchResults containing a list of documents matched, number returned
and number matched by the query.
:raises: QueryError: If the query string is not parseable.
TypeError: If any of the parameters have invalid types, or an unknown
attribute is passed.
ValueError: If any of the parameters have invalid values (e.g., a
negative deadline). | Below is the the instruction that describes the task:
### Input:
Searches the index. Conveniently searches only for documents that belong to instances of this class.
:param query_string: The query to match against documents in the index. See search.Query() for details.
:param options: A QueryOptions describing post-processing of search results.
:param enable_facet_discovery: discovery top relevent facets to this search query and return them.
:param return_facets: An iterable of FacetRequest or basestring as facet name to
return specific facet with the result.
:param facet_options: A FacetOption describing processing of facets.
:param facet_refinements: An iterable of FacetRefinement objects or refinement
token strings used to filter out search results based on a facet value.
refinements for different facets will be conjunction and refinements for
the same facet will be disjunction.
:param deadline: Deadline for RPC call in seconds; if None use the default.
:param kwargs: A SearchResults containing a list of documents matched, number returned
and number matched by the query.
:return: A SearchResults containing a list of documents matched, number returned
and number matched by the query.
:raises: QueryError: If the query string is not parseable.
TypeError: If any of the parameters have invalid types, or an unknown
attribute is passed.
ValueError: If any of the parameters have invalid values (e.g., a
negative deadline).
### Response:
def search(cls,
query_string,
options=None,
enable_facet_discovery=False,
return_facets=None,
facet_options=None,
facet_refinements=None,
deadline=None,
**kwargs):
"""
Searches the index. Conveniently searches only for documents that belong to instances of this class.
:param query_string: The query to match against documents in the index. See search.Query() for details.
:param options: A QueryOptions describing post-processing of search results.
:param enable_facet_discovery: discovery top relevent facets to this search query and return them.
:param return_facets: An iterable of FacetRequest or basestring as facet name to
return specific facet with the result.
:param facet_options: A FacetOption describing processing of facets.
:param facet_refinements: An iterable of FacetRefinement objects or refinement
token strings used to filter out search results based on a facet value.
refinements for different facets will be conjunction and refinements for
the same facet will be disjunction.
:param deadline: Deadline for RPC call in seconds; if None use the default.
:param kwargs: A SearchResults containing a list of documents matched, number returned
and number matched by the query.
:return: A SearchResults containing a list of documents matched, number returned
and number matched by the query.
:raises: QueryError: If the query string is not parseable.
TypeError: If any of the parameters have invalid types, or an unknown
attribute is passed.
ValueError: If any of the parameters have invalid values (e.g., a
negative deadline).
"""
search_class = cls.search_get_class_names()[-1]
query_string += ' ' + 'class_name:%s' % (search_class,)
q = search.Query(
query_string=query_string,
options=options,
enable_facet_discovery=enable_facet_discovery,
return_facets=return_facets,
facet_options=facet_options,
facet_refinements=facet_refinements
)
index = cls.search_get_index()
return index.search(q, deadline=deadline, **kwargs) |
def _fetch_file(url, file_name, resume=True,
hash_=None, timeout=10., progressbar=True, verbose=True):
"""Load requested file, downloading it if needed or requested.
Parameters
----------
url: string
The url of file to be downloaded.
file_name: string
Name, along with the path, of where downloaded file will be saved.
resume: bool, optional
If true, try to resume partially downloaded files.
hash_ : str | None
The hash of the file to check. If None, no checking is
performed.
timeout : float
The URL open timeout.
verbose : bool
Whether to print download status.
"""
# Adapted from NISL and MNE-python:
# https://github.com/nisl/tutorial/blob/master/nisl/datasets.py
# https://martinos.org/mne
if hash_ is not None and (not isinstance(hash_, string_types) or
len(hash_) != 32):
raise ValueError('Bad hash value given, should be a 32-character '
'string:\n%s' % (hash_,))
temp_file_name = file_name + ".part"
try:
if 'dropbox.com' in url:
# Use requests to handle cookies.
# XXX In the future, we should probably use requests everywhere.
# Unless we want to minimize dependencies.
try:
import requests
except ModuleNotFoundError:
raise ValueError('To download Dropbox links, you need to '
'install the `requests` module.')
resp = requests.get(url)
chunk_size = 8192 # 2 ** 13
with open(temp_file_name, 'wb') as ff:
for chunk in resp.iter_content(chunk_size=chunk_size):
if chunk: # filter out keep-alive new chunks
ff.write(chunk)
else:
# Check file size and displaying it alongside the download url
u = urllib.request.urlopen(url, timeout=timeout)
u.close()
# this is necessary to follow any redirects
url = u.geturl()
u = urllib.request.urlopen(url, timeout=timeout)
try:
file_size = int(u.headers.get('Content-Length', '1').strip())
finally:
u.close()
del u
if verbose:
tqdm.write('Downloading data from %s (%s)\n'
% (url, sizeof_fmt(file_size)))
# Triage resume
if not os.path.exists(temp_file_name):
resume = False
if resume:
with open(temp_file_name, 'rb', buffering=0) as local_file:
local_file.seek(0, 2)
initial_size = local_file.tell()
del local_file
else:
initial_size = 0
# This should never happen if our functions work properly
if initial_size > file_size:
raise RuntimeError('Local file (%s) is larger than remote '
'file (%s), cannot resume download'
% (sizeof_fmt(initial_size),
sizeof_fmt(file_size)))
scheme = urllib.parse.urlparse(url).scheme
fun = _get_http if scheme in ('http', 'https') else _get_ftp
fun(url, temp_file_name, initial_size, file_size, verbose,
progressbar, ncols=80)
# check md5sum
if hash_ is not None:
if verbose:
tqdm.write('Verifying download hash.')
md5 = md5sum(temp_file_name)
if hash_ != md5:
raise RuntimeError('Hash mismatch for downloaded file %s, '
'expected %s but got %s'
% (temp_file_name, hash_, md5))
shutil.move(temp_file_name, file_name)
except Exception as ee:
raise RuntimeError('Error while fetching file %s.'
' Dataset fetching aborted.\nError: %s' % (url, ee)) | Load requested file, downloading it if needed or requested.
Parameters
----------
url: string
The url of file to be downloaded.
file_name: string
Name, along with the path, of where downloaded file will be saved.
resume: bool, optional
If true, try to resume partially downloaded files.
hash_ : str | None
The hash of the file to check. If None, no checking is
performed.
timeout : float
The URL open timeout.
verbose : bool
Whether to print download status. | Below is the the instruction that describes the task:
### Input:
Load requested file, downloading it if needed or requested.
Parameters
----------
url: string
The url of file to be downloaded.
file_name: string
Name, along with the path, of where downloaded file will be saved.
resume: bool, optional
If true, try to resume partially downloaded files.
hash_ : str | None
The hash of the file to check. If None, no checking is
performed.
timeout : float
The URL open timeout.
verbose : bool
Whether to print download status.
### Response:
def _fetch_file(url, file_name, resume=True,
hash_=None, timeout=10., progressbar=True, verbose=True):
"""Load requested file, downloading it if needed or requested.
Parameters
----------
url: string
The url of file to be downloaded.
file_name: string
Name, along with the path, of where downloaded file will be saved.
resume: bool, optional
If true, try to resume partially downloaded files.
hash_ : str | None
The hash of the file to check. If None, no checking is
performed.
timeout : float
The URL open timeout.
verbose : bool
Whether to print download status.
"""
# Adapted from NISL and MNE-python:
# https://github.com/nisl/tutorial/blob/master/nisl/datasets.py
# https://martinos.org/mne
if hash_ is not None and (not isinstance(hash_, string_types) or
len(hash_) != 32):
raise ValueError('Bad hash value given, should be a 32-character '
'string:\n%s' % (hash_,))
temp_file_name = file_name + ".part"
try:
if 'dropbox.com' in url:
# Use requests to handle cookies.
# XXX In the future, we should probably use requests everywhere.
# Unless we want to minimize dependencies.
try:
import requests
except ModuleNotFoundError:
raise ValueError('To download Dropbox links, you need to '
'install the `requests` module.')
resp = requests.get(url)
chunk_size = 8192 # 2 ** 13
with open(temp_file_name, 'wb') as ff:
for chunk in resp.iter_content(chunk_size=chunk_size):
if chunk: # filter out keep-alive new chunks
ff.write(chunk)
else:
# Check file size and displaying it alongside the download url
u = urllib.request.urlopen(url, timeout=timeout)
u.close()
# this is necessary to follow any redirects
url = u.geturl()
u = urllib.request.urlopen(url, timeout=timeout)
try:
file_size = int(u.headers.get('Content-Length', '1').strip())
finally:
u.close()
del u
if verbose:
tqdm.write('Downloading data from %s (%s)\n'
% (url, sizeof_fmt(file_size)))
# Triage resume
if not os.path.exists(temp_file_name):
resume = False
if resume:
with open(temp_file_name, 'rb', buffering=0) as local_file:
local_file.seek(0, 2)
initial_size = local_file.tell()
del local_file
else:
initial_size = 0
# This should never happen if our functions work properly
if initial_size > file_size:
raise RuntimeError('Local file (%s) is larger than remote '
'file (%s), cannot resume download'
% (sizeof_fmt(initial_size),
sizeof_fmt(file_size)))
scheme = urllib.parse.urlparse(url).scheme
fun = _get_http if scheme in ('http', 'https') else _get_ftp
fun(url, temp_file_name, initial_size, file_size, verbose,
progressbar, ncols=80)
# check md5sum
if hash_ is not None:
if verbose:
tqdm.write('Verifying download hash.')
md5 = md5sum(temp_file_name)
if hash_ != md5:
raise RuntimeError('Hash mismatch for downloaded file %s, '
'expected %s but got %s'
% (temp_file_name, hash_, md5))
shutil.move(temp_file_name, file_name)
except Exception as ee:
raise RuntimeError('Error while fetching file %s.'
' Dataset fetching aborted.\nError: %s' % (url, ee)) |
def list_locked(**kwargs):
'''
Query the package database those packages which are
locked against reinstallation, modification or deletion.
Returns returns a list of package names with version strings
CLI Example:
.. code-block:: bash
salt '*' pkg.list_locked
jail
List locked packages within the specified jail
CLI Example:
.. code-block:: bash
salt '*' pkg.list_locked jail=<jail name or id>
chroot
List locked packages within the specified chroot (ignored if ``jail`` is
specified)
CLI Example:
.. code-block:: bash
salt '*' pkg.list_locked chroot=/path/to/chroot
root
List locked packages within the specified root (ignored if ``jail`` is
specified)
CLI Example:
.. code-block:: bash
salt '*' pkg.list_locked root=/path/to/chroot
'''
return ['{0}-{1}'.format(pkgname, version(pkgname, **kwargs))
for pkgname in _lockcmd('lock', name=None, **kwargs)] | Query the package database those packages which are
locked against reinstallation, modification or deletion.
Returns returns a list of package names with version strings
CLI Example:
.. code-block:: bash
salt '*' pkg.list_locked
jail
List locked packages within the specified jail
CLI Example:
.. code-block:: bash
salt '*' pkg.list_locked jail=<jail name or id>
chroot
List locked packages within the specified chroot (ignored if ``jail`` is
specified)
CLI Example:
.. code-block:: bash
salt '*' pkg.list_locked chroot=/path/to/chroot
root
List locked packages within the specified root (ignored if ``jail`` is
specified)
CLI Example:
.. code-block:: bash
salt '*' pkg.list_locked root=/path/to/chroot | Below is the the instruction that describes the task:
### Input:
Query the package database those packages which are
locked against reinstallation, modification or deletion.
Returns returns a list of package names with version strings
CLI Example:
.. code-block:: bash
salt '*' pkg.list_locked
jail
List locked packages within the specified jail
CLI Example:
.. code-block:: bash
salt '*' pkg.list_locked jail=<jail name or id>
chroot
List locked packages within the specified chroot (ignored if ``jail`` is
specified)
CLI Example:
.. code-block:: bash
salt '*' pkg.list_locked chroot=/path/to/chroot
root
List locked packages within the specified root (ignored if ``jail`` is
specified)
CLI Example:
.. code-block:: bash
salt '*' pkg.list_locked root=/path/to/chroot
### Response:
def list_locked(**kwargs):
'''
Query the package database those packages which are
locked against reinstallation, modification or deletion.
Returns returns a list of package names with version strings
CLI Example:
.. code-block:: bash
salt '*' pkg.list_locked
jail
List locked packages within the specified jail
CLI Example:
.. code-block:: bash
salt '*' pkg.list_locked jail=<jail name or id>
chroot
List locked packages within the specified chroot (ignored if ``jail`` is
specified)
CLI Example:
.. code-block:: bash
salt '*' pkg.list_locked chroot=/path/to/chroot
root
List locked packages within the specified root (ignored if ``jail`` is
specified)
CLI Example:
.. code-block:: bash
salt '*' pkg.list_locked root=/path/to/chroot
'''
return ['{0}-{1}'.format(pkgname, version(pkgname, **kwargs))
for pkgname in _lockcmd('lock', name=None, **kwargs)] |
def create(self, repo_name, scm='git', private=True, **kwargs):
""" Creates a new repository on own Bitbucket account and return it."""
url = self.bitbucket.url('CREATE_REPO')
return self.bitbucket.dispatch('POST', url, auth=self.bitbucket.auth, name=repo_name, scm=scm, is_private=private, **kwargs) | Creates a new repository on own Bitbucket account and return it. | Below is the the instruction that describes the task:
### Input:
Creates a new repository on own Bitbucket account and return it.
### Response:
def create(self, repo_name, scm='git', private=True, **kwargs):
""" Creates a new repository on own Bitbucket account and return it."""
url = self.bitbucket.url('CREATE_REPO')
return self.bitbucket.dispatch('POST', url, auth=self.bitbucket.auth, name=repo_name, scm=scm, is_private=private, **kwargs) |
def _init_usrgos(self, goids):
"""Return user GO IDs which have GO Terms."""
usrgos = set()
goids_missing = set()
_go2obj = self.gosubdag.go2obj
for goid in goids:
if goid in _go2obj:
usrgos.add(goid)
else:
goids_missing.add(goid)
if goids_missing:
print("MISSING GO IDs: {GOs}".format(GOs=goids_missing))
print("{N} of {M} GO IDs ARE MISSING".format(N=len(goids_missing), M=len(goids)))
return usrgos | Return user GO IDs which have GO Terms. | Below is the the instruction that describes the task:
### Input:
Return user GO IDs which have GO Terms.
### Response:
def _init_usrgos(self, goids):
"""Return user GO IDs which have GO Terms."""
usrgos = set()
goids_missing = set()
_go2obj = self.gosubdag.go2obj
for goid in goids:
if goid in _go2obj:
usrgos.add(goid)
else:
goids_missing.add(goid)
if goids_missing:
print("MISSING GO IDs: {GOs}".format(GOs=goids_missing))
print("{N} of {M} GO IDs ARE MISSING".format(N=len(goids_missing), M=len(goids)))
return usrgos |
def finite_pixels(self):
""" Return an array of the finite pixels.
Returns
-------
:obj:`numpy.ndarray`
Nx2 array of the finite pixels
"""
finite_px = np.where(np.isfinite(self.data))
finite_px = np.c_[finite_px[0], finite_px[1]]
return finite_px | Return an array of the finite pixels.
Returns
-------
:obj:`numpy.ndarray`
Nx2 array of the finite pixels | Below is the the instruction that describes the task:
### Input:
Return an array of the finite pixels.
Returns
-------
:obj:`numpy.ndarray`
Nx2 array of the finite pixels
### Response:
def finite_pixels(self):
""" Return an array of the finite pixels.
Returns
-------
:obj:`numpy.ndarray`
Nx2 array of the finite pixels
"""
finite_px = np.where(np.isfinite(self.data))
finite_px = np.c_[finite_px[0], finite_px[1]]
return finite_px |
def get_strip_metadata(self, catID):
'''Retrieves the strip catalog metadata given a cat ID.
Args:
catID (str): The source catalog ID from the platform catalog.
Returns:
metadata (dict): A metadata dictionary .
TODO: have this return a class object with interesting information exposed.
'''
self.logger.debug('Retrieving strip catalog metadata')
url = '%(base_url)s/record/%(catID)s?includeRelationships=false' % {
'base_url': self.base_url, 'catID': catID
}
r = self.gbdx_connection.get(url)
if r.status_code == 200:
return r.json()['properties']
elif r.status_code == 404:
self.logger.debug('Strip not found: %s' % catID)
r.raise_for_status()
else:
self.logger.debug('There was a problem retrieving catid: %s' % catID)
r.raise_for_status() | Retrieves the strip catalog metadata given a cat ID.
Args:
catID (str): The source catalog ID from the platform catalog.
Returns:
metadata (dict): A metadata dictionary .
TODO: have this return a class object with interesting information exposed. | Below is the the instruction that describes the task:
### Input:
Retrieves the strip catalog metadata given a cat ID.
Args:
catID (str): The source catalog ID from the platform catalog.
Returns:
metadata (dict): A metadata dictionary .
TODO: have this return a class object with interesting information exposed.
### Response:
def get_strip_metadata(self, catID):
'''Retrieves the strip catalog metadata given a cat ID.
Args:
catID (str): The source catalog ID from the platform catalog.
Returns:
metadata (dict): A metadata dictionary .
TODO: have this return a class object with interesting information exposed.
'''
self.logger.debug('Retrieving strip catalog metadata')
url = '%(base_url)s/record/%(catID)s?includeRelationships=false' % {
'base_url': self.base_url, 'catID': catID
}
r = self.gbdx_connection.get(url)
if r.status_code == 200:
return r.json()['properties']
elif r.status_code == 404:
self.logger.debug('Strip not found: %s' % catID)
r.raise_for_status()
else:
self.logger.debug('There was a problem retrieving catid: %s' % catID)
r.raise_for_status() |
def max_pathlen(self):
"""The maximum pathlen for any intermediate CAs signed by this CA.
This value is either ``None``, if this and all parent CAs don't have a ``pathlen`` attribute, or an
``int`` if any parent CA has the attribute.
"""
pathlen = self.pathlen
if self.parent is None:
return pathlen
max_parent = self.parent.max_pathlen
if max_parent is None:
return pathlen
elif pathlen is None:
return max_parent - 1
else:
return min(self.pathlen, max_parent - 1) | The maximum pathlen for any intermediate CAs signed by this CA.
This value is either ``None``, if this and all parent CAs don't have a ``pathlen`` attribute, or an
``int`` if any parent CA has the attribute. | Below is the the instruction that describes the task:
### Input:
The maximum pathlen for any intermediate CAs signed by this CA.
This value is either ``None``, if this and all parent CAs don't have a ``pathlen`` attribute, or an
``int`` if any parent CA has the attribute.
### Response:
def max_pathlen(self):
"""The maximum pathlen for any intermediate CAs signed by this CA.
This value is either ``None``, if this and all parent CAs don't have a ``pathlen`` attribute, or an
``int`` if any parent CA has the attribute.
"""
pathlen = self.pathlen
if self.parent is None:
return pathlen
max_parent = self.parent.max_pathlen
if max_parent is None:
return pathlen
elif pathlen is None:
return max_parent - 1
else:
return min(self.pathlen, max_parent - 1) |
def add(self, rid, data, raise_on_error=True):
"""Write cache data to the data store.
Args:
rid (str): The record identifier.
data (dict): The record data.
raise_on_error (bool): If True and not r.ok this method will raise a RunTimeError.
Returns:
object : Python request response.
"""
cache_data = {'cache-date': self._dt_to_epoch(datetime.now()), 'cache-data': data}
return self.ds.post(rid, cache_data, raise_on_error) | Write cache data to the data store.
Args:
rid (str): The record identifier.
data (dict): The record data.
raise_on_error (bool): If True and not r.ok this method will raise a RunTimeError.
Returns:
object : Python request response. | Below is the the instruction that describes the task:
### Input:
Write cache data to the data store.
Args:
rid (str): The record identifier.
data (dict): The record data.
raise_on_error (bool): If True and not r.ok this method will raise a RunTimeError.
Returns:
object : Python request response.
### Response:
def add(self, rid, data, raise_on_error=True):
"""Write cache data to the data store.
Args:
rid (str): The record identifier.
data (dict): The record data.
raise_on_error (bool): If True and not r.ok this method will raise a RunTimeError.
Returns:
object : Python request response.
"""
cache_data = {'cache-date': self._dt_to_epoch(datetime.now()), 'cache-data': data}
return self.ds.post(rid, cache_data, raise_on_error) |
def GET_close_server(self) -> None:
"""Stop and close the *HydPy* server."""
def _close_server():
self.server.shutdown()
self.server.server_close()
shutter = threading.Thread(target=_close_server)
shutter.deamon = True
shutter.start() | Stop and close the *HydPy* server. | Below is the the instruction that describes the task:
### Input:
Stop and close the *HydPy* server.
### Response:
def GET_close_server(self) -> None:
"""Stop and close the *HydPy* server."""
def _close_server():
self.server.shutdown()
self.server.server_close()
shutter = threading.Thread(target=_close_server)
shutter.deamon = True
shutter.start() |
def info(vm, info_type='all', key='uuid'):
'''
Lookup info on running kvm
vm : string
vm to be targeted
info_type : string [all|block|blockstats|chardev|cpus|kvm|pci|spice|version|vnc]
info type to return
key : string [uuid|alias|hostname]
value type of 'vm' parameter
CLI Example:
.. code-block:: bash
salt '*' vmadm.info 186da9ab-7392-4f55-91a5-b8f1fe770543
salt '*' vmadm.info 186da9ab-7392-4f55-91a5-b8f1fe770543 vnc
salt '*' vmadm.info nacl key=alias
salt '*' vmadm.info nacl vnc key=alias
'''
ret = {}
if info_type not in ['all', 'block', 'blockstats', 'chardev', 'cpus', 'kvm', 'pci', 'spice', 'version', 'vnc']:
ret['Error'] = 'Requested info_type is not available'
return ret
if key not in ['uuid', 'alias', 'hostname']:
ret['Error'] = 'Key must be either uuid, alias or hostname'
return ret
vm = lookup('{0}={1}'.format(key, vm), one=True)
if 'Error' in vm:
return vm
# vmadm info <uuid> [type,...]
cmd = 'vmadm info {uuid} {type}'.format(
uuid=vm,
type=info_type
)
res = __salt__['cmd.run_all'](cmd)
retcode = res['retcode']
if retcode != 0:
ret['Error'] = res['stderr'] if 'stderr' in res else _exit_status(retcode)
return ret
return salt.utils.json.loads(res['stdout']) | Lookup info on running kvm
vm : string
vm to be targeted
info_type : string [all|block|blockstats|chardev|cpus|kvm|pci|spice|version|vnc]
info type to return
key : string [uuid|alias|hostname]
value type of 'vm' parameter
CLI Example:
.. code-block:: bash
salt '*' vmadm.info 186da9ab-7392-4f55-91a5-b8f1fe770543
salt '*' vmadm.info 186da9ab-7392-4f55-91a5-b8f1fe770543 vnc
salt '*' vmadm.info nacl key=alias
salt '*' vmadm.info nacl vnc key=alias | Below is the the instruction that describes the task:
### Input:
Lookup info on running kvm
vm : string
vm to be targeted
info_type : string [all|block|blockstats|chardev|cpus|kvm|pci|spice|version|vnc]
info type to return
key : string [uuid|alias|hostname]
value type of 'vm' parameter
CLI Example:
.. code-block:: bash
salt '*' vmadm.info 186da9ab-7392-4f55-91a5-b8f1fe770543
salt '*' vmadm.info 186da9ab-7392-4f55-91a5-b8f1fe770543 vnc
salt '*' vmadm.info nacl key=alias
salt '*' vmadm.info nacl vnc key=alias
### Response:
def info(vm, info_type='all', key='uuid'):
'''
Lookup info on running kvm
vm : string
vm to be targeted
info_type : string [all|block|blockstats|chardev|cpus|kvm|pci|spice|version|vnc]
info type to return
key : string [uuid|alias|hostname]
value type of 'vm' parameter
CLI Example:
.. code-block:: bash
salt '*' vmadm.info 186da9ab-7392-4f55-91a5-b8f1fe770543
salt '*' vmadm.info 186da9ab-7392-4f55-91a5-b8f1fe770543 vnc
salt '*' vmadm.info nacl key=alias
salt '*' vmadm.info nacl vnc key=alias
'''
ret = {}
if info_type not in ['all', 'block', 'blockstats', 'chardev', 'cpus', 'kvm', 'pci', 'spice', 'version', 'vnc']:
ret['Error'] = 'Requested info_type is not available'
return ret
if key not in ['uuid', 'alias', 'hostname']:
ret['Error'] = 'Key must be either uuid, alias or hostname'
return ret
vm = lookup('{0}={1}'.format(key, vm), one=True)
if 'Error' in vm:
return vm
# vmadm info <uuid> [type,...]
cmd = 'vmadm info {uuid} {type}'.format(
uuid=vm,
type=info_type
)
res = __salt__['cmd.run_all'](cmd)
retcode = res['retcode']
if retcode != 0:
ret['Error'] = res['stderr'] if 'stderr' in res else _exit_status(retcode)
return ret
return salt.utils.json.loads(res['stdout']) |
def get_ts_stats_significance(self, x, ts, stat_ts_func, null_ts_func, B=1000, permute_fast=False, label_ts=''):
""" Returns the statistics, pvalues and the actual number of bootstrap
samples. """
stats_ts, pvals, nums = ts_stats_significance(
ts, stat_ts_func, null_ts_func, B=B, permute_fast=permute_fast)
return stats_ts, pvals, nums | Returns the statistics, pvalues and the actual number of bootstrap
samples. | Below is the the instruction that describes the task:
### Input:
Returns the statistics, pvalues and the actual number of bootstrap
samples.
### Response:
def get_ts_stats_significance(self, x, ts, stat_ts_func, null_ts_func, B=1000, permute_fast=False, label_ts=''):
""" Returns the statistics, pvalues and the actual number of bootstrap
samples. """
stats_ts, pvals, nums = ts_stats_significance(
ts, stat_ts_func, null_ts_func, B=B, permute_fast=permute_fast)
return stats_ts, pvals, nums |
def synchelp(f):
'''
The synchelp decorator allows the transparent execution of
a coroutine using the global loop from a thread other than
the event loop. In both use cases, teh actual work is done
by the global event loop.
Examples:
Use as a decorator::
@s_glob.synchelp
async def stuff(x, y):
await dostuff()
Calling the stuff function as regular async code using the standard await syntax::
valu = await stuff(x, y)
Calling the stuff function as regular sync code outside of the event loop thread::
valu = stuff(x, y)
'''
def wrap(*args, **kwargs):
coro = f(*args, **kwargs)
if not iAmLoop():
return sync(coro)
return coro
return wrap | The synchelp decorator allows the transparent execution of
a coroutine using the global loop from a thread other than
the event loop. In both use cases, teh actual work is done
by the global event loop.
Examples:
Use as a decorator::
@s_glob.synchelp
async def stuff(x, y):
await dostuff()
Calling the stuff function as regular async code using the standard await syntax::
valu = await stuff(x, y)
Calling the stuff function as regular sync code outside of the event loop thread::
valu = stuff(x, y) | Below is the the instruction that describes the task:
### Input:
The synchelp decorator allows the transparent execution of
a coroutine using the global loop from a thread other than
the event loop. In both use cases, teh actual work is done
by the global event loop.
Examples:
Use as a decorator::
@s_glob.synchelp
async def stuff(x, y):
await dostuff()
Calling the stuff function as regular async code using the standard await syntax::
valu = await stuff(x, y)
Calling the stuff function as regular sync code outside of the event loop thread::
valu = stuff(x, y)
### Response:
def synchelp(f):
'''
The synchelp decorator allows the transparent execution of
a coroutine using the global loop from a thread other than
the event loop. In both use cases, teh actual work is done
by the global event loop.
Examples:
Use as a decorator::
@s_glob.synchelp
async def stuff(x, y):
await dostuff()
Calling the stuff function as regular async code using the standard await syntax::
valu = await stuff(x, y)
Calling the stuff function as regular sync code outside of the event loop thread::
valu = stuff(x, y)
'''
def wrap(*args, **kwargs):
coro = f(*args, **kwargs)
if not iAmLoop():
return sync(coro)
return coro
return wrap |
def on_message(self, fragment):
''' Process an individual wire protocol fragment.
The websocket RFC specifies opcodes for distinguishing text frames
from binary frames. Tornado passes us either a text or binary string
depending on that opcode, we have to look at the type of the fragment
to see what we got.
Args:
fragment (unicode or bytes) : wire fragment to process
'''
# We shouldn't throw exceptions from on_message because the caller is
# just Tornado and it doesn't know what to do with them other than
# report them as an unhandled Future
try:
message = yield self._receive(fragment)
except Exception as e:
# If you go look at self._receive, it's catching the
# expected error types... here we have something weird.
log.error("Unhandled exception receiving a message: %r: %r", e, fragment, exc_info=True)
self._internal_error("server failed to parse a message")
try:
if message:
if _message_test_port is not None:
_message_test_port.received.append(message)
work = yield self._handle(message)
if work:
yield self._schedule(work)
except Exception as e:
log.error("Handler or its work threw an exception: %r: %r", e, message, exc_info=True)
self._internal_error("server failed to handle a message")
raise gen.Return(None) | Process an individual wire protocol fragment.
The websocket RFC specifies opcodes for distinguishing text frames
from binary frames. Tornado passes us either a text or binary string
depending on that opcode, we have to look at the type of the fragment
to see what we got.
Args:
fragment (unicode or bytes) : wire fragment to process | Below is the the instruction that describes the task:
### Input:
Process an individual wire protocol fragment.
The websocket RFC specifies opcodes for distinguishing text frames
from binary frames. Tornado passes us either a text or binary string
depending on that opcode, we have to look at the type of the fragment
to see what we got.
Args:
fragment (unicode or bytes) : wire fragment to process
### Response:
def on_message(self, fragment):
''' Process an individual wire protocol fragment.
The websocket RFC specifies opcodes for distinguishing text frames
from binary frames. Tornado passes us either a text or binary string
depending on that opcode, we have to look at the type of the fragment
to see what we got.
Args:
fragment (unicode or bytes) : wire fragment to process
'''
# We shouldn't throw exceptions from on_message because the caller is
# just Tornado and it doesn't know what to do with them other than
# report them as an unhandled Future
try:
message = yield self._receive(fragment)
except Exception as e:
# If you go look at self._receive, it's catching the
# expected error types... here we have something weird.
log.error("Unhandled exception receiving a message: %r: %r", e, fragment, exc_info=True)
self._internal_error("server failed to parse a message")
try:
if message:
if _message_test_port is not None:
_message_test_port.received.append(message)
work = yield self._handle(message)
if work:
yield self._schedule(work)
except Exception as e:
log.error("Handler or its work threw an exception: %r: %r", e, message, exc_info=True)
self._internal_error("server failed to handle a message")
raise gen.Return(None) |
def weld_combine_scalars(scalars, weld_type):
"""Combine column-wise aggregations (so resulting scalars) into a single array.
Parameters
----------
scalars : tuple of WeldObjects
WeldObjects to combine.
weld_type : WeldType
The Weld type of the result. Currently expecting scalars to be of the same type.
Returns
-------
WeldObject
Representation of this computation.
"""
weld_obj = create_empty_weld_object()
obj_ids = (get_weld_obj_id(weld_obj, scalar) for scalar in scalars)
merges = '\n'.join(('let res = merge(res, {});'.format(obj_id) for obj_id in obj_ids))
weld_template = """let res = appender[{type}];
{merges}
result(res)
"""
weld_obj.weld_code = weld_template.format(type=weld_type,
merges=merges)
return weld_obj | Combine column-wise aggregations (so resulting scalars) into a single array.
Parameters
----------
scalars : tuple of WeldObjects
WeldObjects to combine.
weld_type : WeldType
The Weld type of the result. Currently expecting scalars to be of the same type.
Returns
-------
WeldObject
Representation of this computation. | Below is the the instruction that describes the task:
### Input:
Combine column-wise aggregations (so resulting scalars) into a single array.
Parameters
----------
scalars : tuple of WeldObjects
WeldObjects to combine.
weld_type : WeldType
The Weld type of the result. Currently expecting scalars to be of the same type.
Returns
-------
WeldObject
Representation of this computation.
### Response:
def weld_combine_scalars(scalars, weld_type):
"""Combine column-wise aggregations (so resulting scalars) into a single array.
Parameters
----------
scalars : tuple of WeldObjects
WeldObjects to combine.
weld_type : WeldType
The Weld type of the result. Currently expecting scalars to be of the same type.
Returns
-------
WeldObject
Representation of this computation.
"""
weld_obj = create_empty_weld_object()
obj_ids = (get_weld_obj_id(weld_obj, scalar) for scalar in scalars)
merges = '\n'.join(('let res = merge(res, {});'.format(obj_id) for obj_id in obj_ids))
weld_template = """let res = appender[{type}];
{merges}
result(res)
"""
weld_obj.weld_code = weld_template.format(type=weld_type,
merges=merges)
return weld_obj |
def send_response(self, transaction):
"""
updates the cache with the response if there was a cache miss
:param transaction:
:return:
"""
if transaction.cacheHit is False:
"""
handling response based on the code
"""
logger.debug("handling response")
self._handle_response(transaction)
return transaction | updates the cache with the response if there was a cache miss
:param transaction:
:return: | Below is the the instruction that describes the task:
### Input:
updates the cache with the response if there was a cache miss
:param transaction:
:return:
### Response:
def send_response(self, transaction):
"""
updates the cache with the response if there was a cache miss
:param transaction:
:return:
"""
if transaction.cacheHit is False:
"""
handling response based on the code
"""
logger.debug("handling response")
self._handle_response(transaction)
return transaction |
def arg(self, state, index, stack_base=None):
"""
Returns a bitvector expression representing the nth argument of a function.
`stack_base` is an optional pointer to the top of the stack at the function start. If it is not
specified, use the current stack pointer.
WARNING: this assumes that none of the arguments are floating-point and they're all single-word-sized, unless
you've customized this CC.
"""
session = self.arg_session
if self.args is None:
arg_loc = [session.next_arg(False) for _ in range(index + 1)][-1]
else:
arg_loc = self.args[index]
return arg_loc.get_value(state, stack_base=stack_base) | Returns a bitvector expression representing the nth argument of a function.
`stack_base` is an optional pointer to the top of the stack at the function start. If it is not
specified, use the current stack pointer.
WARNING: this assumes that none of the arguments are floating-point and they're all single-word-sized, unless
you've customized this CC. | Below is the the instruction that describes the task:
### Input:
Returns a bitvector expression representing the nth argument of a function.
`stack_base` is an optional pointer to the top of the stack at the function start. If it is not
specified, use the current stack pointer.
WARNING: this assumes that none of the arguments are floating-point and they're all single-word-sized, unless
you've customized this CC.
### Response:
def arg(self, state, index, stack_base=None):
"""
Returns a bitvector expression representing the nth argument of a function.
`stack_base` is an optional pointer to the top of the stack at the function start. If it is not
specified, use the current stack pointer.
WARNING: this assumes that none of the arguments are floating-point and they're all single-word-sized, unless
you've customized this CC.
"""
session = self.arg_session
if self.args is None:
arg_loc = [session.next_arg(False) for _ in range(index + 1)][-1]
else:
arg_loc = self.args[index]
return arg_loc.get_value(state, stack_base=stack_base) |
def UpsertStoredProcedure(self, collection_link, sproc, options=None):
"""Upserts a stored procedure in a collection.
:param str collection_link:
The link to the document collection.
:param str sproc:
:param dict options:
The request options for the request.
:return:
The upserted Stored Procedure.
:rtype:
dict
"""
if options is None:
options = {}
collection_id, path, sproc = self._GetContainerIdWithPathForSproc(collection_link, sproc)
return self.Upsert(sproc,
path,
'sprocs',
collection_id,
None,
options) | Upserts a stored procedure in a collection.
:param str collection_link:
The link to the document collection.
:param str sproc:
:param dict options:
The request options for the request.
:return:
The upserted Stored Procedure.
:rtype:
dict | Below is the the instruction that describes the task:
### Input:
Upserts a stored procedure in a collection.
:param str collection_link:
The link to the document collection.
:param str sproc:
:param dict options:
The request options for the request.
:return:
The upserted Stored Procedure.
:rtype:
dict
### Response:
def UpsertStoredProcedure(self, collection_link, sproc, options=None):
"""Upserts a stored procedure in a collection.
:param str collection_link:
The link to the document collection.
:param str sproc:
:param dict options:
The request options for the request.
:return:
The upserted Stored Procedure.
:rtype:
dict
"""
if options is None:
options = {}
collection_id, path, sproc = self._GetContainerIdWithPathForSproc(collection_link, sproc)
return self.Upsert(sproc,
path,
'sprocs',
collection_id,
None,
options) |
def GetOobResult(self, param, user_ip, gitkit_token=None):
"""Gets out-of-band code for ResetPassword/ChangeEmail request.
Args:
param: dict of HTTP POST params
user_ip: string, end user's IP address
gitkit_token: string, the gitkit token if user logged in
Returns:
A dict of {
email: user email who initializes the request
new_email: the requested new email, for ChangeEmail action only
oob_link: the generated link to be send to user's email
oob_code: the one time out-of-band code
action: OobAction
response_body: the http body to be returned to Gitkit widget
}
"""
if 'action' in param:
try:
if param['action'] == GitkitClient.RESET_PASSWORD_ACTION:
request = self._PasswordResetRequest(param, user_ip)
oob_code, oob_link = self._BuildOobLink(request,
param['action'])
return {
'action': GitkitClient.RESET_PASSWORD_ACTION,
'email': param['email'],
'oob_link': oob_link,
'oob_code': oob_code,
'response_body': simplejson.dumps({'success': True})
}
elif param['action'] == GitkitClient.CHANGE_EMAIL_ACTION:
if not gitkit_token:
return self._FailureOobResponse('login is required')
request = self._ChangeEmailRequest(param, user_ip, gitkit_token)
oob_code, oob_link = self._BuildOobLink(request,
param['action'])
return {
'action': GitkitClient.CHANGE_EMAIL_ACTION,
'email': param['oldEmail'],
'new_email': param['newEmail'],
'oob_link': oob_link,
'oob_code': oob_code,
'response_body': simplejson.dumps({'success': True})
}
except errors.GitkitClientError as error:
return self._FailureOobResponse(error.value)
return self._FailureOobResponse('unknown request type') | Gets out-of-band code for ResetPassword/ChangeEmail request.
Args:
param: dict of HTTP POST params
user_ip: string, end user's IP address
gitkit_token: string, the gitkit token if user logged in
Returns:
A dict of {
email: user email who initializes the request
new_email: the requested new email, for ChangeEmail action only
oob_link: the generated link to be send to user's email
oob_code: the one time out-of-band code
action: OobAction
response_body: the http body to be returned to Gitkit widget
} | Below is the the instruction that describes the task:
### Input:
Gets out-of-band code for ResetPassword/ChangeEmail request.
Args:
param: dict of HTTP POST params
user_ip: string, end user's IP address
gitkit_token: string, the gitkit token if user logged in
Returns:
A dict of {
email: user email who initializes the request
new_email: the requested new email, for ChangeEmail action only
oob_link: the generated link to be send to user's email
oob_code: the one time out-of-band code
action: OobAction
response_body: the http body to be returned to Gitkit widget
}
### Response:
def GetOobResult(self, param, user_ip, gitkit_token=None):
"""Gets out-of-band code for ResetPassword/ChangeEmail request.
Args:
param: dict of HTTP POST params
user_ip: string, end user's IP address
gitkit_token: string, the gitkit token if user logged in
Returns:
A dict of {
email: user email who initializes the request
new_email: the requested new email, for ChangeEmail action only
oob_link: the generated link to be send to user's email
oob_code: the one time out-of-band code
action: OobAction
response_body: the http body to be returned to Gitkit widget
}
"""
if 'action' in param:
try:
if param['action'] == GitkitClient.RESET_PASSWORD_ACTION:
request = self._PasswordResetRequest(param, user_ip)
oob_code, oob_link = self._BuildOobLink(request,
param['action'])
return {
'action': GitkitClient.RESET_PASSWORD_ACTION,
'email': param['email'],
'oob_link': oob_link,
'oob_code': oob_code,
'response_body': simplejson.dumps({'success': True})
}
elif param['action'] == GitkitClient.CHANGE_EMAIL_ACTION:
if not gitkit_token:
return self._FailureOobResponse('login is required')
request = self._ChangeEmailRequest(param, user_ip, gitkit_token)
oob_code, oob_link = self._BuildOobLink(request,
param['action'])
return {
'action': GitkitClient.CHANGE_EMAIL_ACTION,
'email': param['oldEmail'],
'new_email': param['newEmail'],
'oob_link': oob_link,
'oob_code': oob_code,
'response_body': simplejson.dumps({'success': True})
}
except errors.GitkitClientError as error:
return self._FailureOobResponse(error.value)
return self._FailureOobResponse('unknown request type') |
def reset_rammbock(self):
"""Closes all connections, deletes all servers, clients, and protocols.
You should call this method before exiting your test run. This will
close all the connections and the ports will therefore be available for
reuse faster.
"""
for client in self._clients:
client.close()
for server in self._servers:
server.close()
self._init_caches() | Closes all connections, deletes all servers, clients, and protocols.
You should call this method before exiting your test run. This will
close all the connections and the ports will therefore be available for
reuse faster. | Below is the the instruction that describes the task:
### Input:
Closes all connections, deletes all servers, clients, and protocols.
You should call this method before exiting your test run. This will
close all the connections and the ports will therefore be available for
reuse faster.
### Response:
def reset_rammbock(self):
"""Closes all connections, deletes all servers, clients, and protocols.
You should call this method before exiting your test run. This will
close all the connections and the ports will therefore be available for
reuse faster.
"""
for client in self._clients:
client.close()
for server in self._servers:
server.close()
self._init_caches() |
def _run_vardict_paired(align_bams, items, ref_file, assoc_files,
region=None, out_file=None):
"""Detect variants with Vardict.
This is used for paired tumor / normal samples.
"""
config = items[0]["config"]
if out_file is None:
out_file = "%s-paired-variants.vcf.gz" % os.path.splitext(align_bams[0])[0]
if not utils.file_exists(out_file):
with file_transaction(items[0], out_file) as tx_out_file:
vrs = bedutils.population_variant_regions(items)
target = shared.subset_variant_regions(vrs, region,
out_file, items=items, do_merge=True)
paired = vcfutils.get_paired_bams(align_bams, items)
if not _is_bed_file(target):
vcfutils.write_empty_vcf(tx_out_file, config,
samples=[x for x in [paired.tumor_name, paired.normal_name] if x])
else:
if not paired.normal_bam:
ann_file = _run_vardict_caller(align_bams, items, ref_file,
assoc_files, region, out_file)
return ann_file
vardict = get_vardict_command(items[0])
vcfstreamsort = config_utils.get_program("vcfstreamsort", config)
compress_cmd = "| bgzip -c" if out_file.endswith("gz") else ""
freq = float(utils.get_in(config, ("algorithm", "min_allele_fraction"), 10)) / 100.0
# merge bed file regions as amplicon VarDict is only supported in single sample mode
opts, var2vcf_opts = _vardict_options_from_config(items, config, out_file, target)
fix_ambig_ref = vcfutils.fix_ambiguous_cl()
fix_ambig_alt = vcfutils.fix_ambiguous_cl(5)
remove_dup = vcfutils.remove_dup_cl()
if any("vardict_somatic_filter" in tz.get_in(("config", "algorithm", "tools_off"), data, [])
for data in items):
somatic_filter = ""
freq_filter = ""
else:
var2vcf_opts += " -M " # this makes VarDict soft filter non-differential variants
somatic_filter = ("| sed 's/\\\\.*Somatic\\\\/Somatic/' "
"| sed 's/REJECT,Description=\".*\">/REJECT,Description=\"Not Somatic via VarDict\">/' "
"""| %s -c 'from bcbio.variation import freebayes; """
"""freebayes.call_somatic("%s", "%s")' """
% (sys.executable, paired.tumor_name, paired.normal_name))
freq_filter = ("| bcftools filter -m '+' -s 'REJECT' -e 'STATUS !~ \".*Somatic\"' 2> /dev/null "
"| %s -x 'bcbio.variation.vardict.add_db_germline_flag(x)' "
"| %s "
"| %s -x 'bcbio.variation.vardict.depth_freq_filter(x, %s, \"%s\")'" %
(os.path.join(os.path.dirname(sys.executable), "py"),
_lowfreq_linear_filter(0, True),
os.path.join(os.path.dirname(sys.executable), "py"),
0, bam.aligner_from_header(paired.tumor_bam)))
jvm_opts = _get_jvm_opts(items[0], tx_out_file)
py_cl = os.path.join(utils.get_bcbio_bin(), "py")
setup = ("%s && unset JAVA_HOME &&" % utils.get_R_exports())
contig_cl = vcfutils.add_contig_to_header_cl(ref_file, tx_out_file)
cmd = ("{setup}{jvm_opts}{vardict} -G {ref_file} "
"-N {paired.tumor_name} -b \"{paired.tumor_bam}|{paired.normal_bam}\" {opts} "
"| awk 'NF>=48' | testsomatic.R "
"| var2vcf_paired.pl -P 0.9 -m 4.25 {var2vcf_opts} "
"-N \"{paired.tumor_name}|{paired.normal_name}\" "
"| {contig_cl} {freq_filter} "
"| bcftools filter -i 'QUAL >= 0' "
"{somatic_filter} | {fix_ambig_ref} | {fix_ambig_alt} | {remove_dup} | {vcfstreamsort} "
"{compress_cmd} > {tx_out_file}")
do.run(cmd.format(**locals()), "Genotyping with VarDict: Inference", {})
return out_file | Detect variants with Vardict.
This is used for paired tumor / normal samples. | Below is the the instruction that describes the task:
### Input:
Detect variants with Vardict.
This is used for paired tumor / normal samples.
### Response:
def _run_vardict_paired(align_bams, items, ref_file, assoc_files,
region=None, out_file=None):
"""Detect variants with Vardict.
This is used for paired tumor / normal samples.
"""
config = items[0]["config"]
if out_file is None:
out_file = "%s-paired-variants.vcf.gz" % os.path.splitext(align_bams[0])[0]
if not utils.file_exists(out_file):
with file_transaction(items[0], out_file) as tx_out_file:
vrs = bedutils.population_variant_regions(items)
target = shared.subset_variant_regions(vrs, region,
out_file, items=items, do_merge=True)
paired = vcfutils.get_paired_bams(align_bams, items)
if not _is_bed_file(target):
vcfutils.write_empty_vcf(tx_out_file, config,
samples=[x for x in [paired.tumor_name, paired.normal_name] if x])
else:
if not paired.normal_bam:
ann_file = _run_vardict_caller(align_bams, items, ref_file,
assoc_files, region, out_file)
return ann_file
vardict = get_vardict_command(items[0])
vcfstreamsort = config_utils.get_program("vcfstreamsort", config)
compress_cmd = "| bgzip -c" if out_file.endswith("gz") else ""
freq = float(utils.get_in(config, ("algorithm", "min_allele_fraction"), 10)) / 100.0
# merge bed file regions as amplicon VarDict is only supported in single sample mode
opts, var2vcf_opts = _vardict_options_from_config(items, config, out_file, target)
fix_ambig_ref = vcfutils.fix_ambiguous_cl()
fix_ambig_alt = vcfutils.fix_ambiguous_cl(5)
remove_dup = vcfutils.remove_dup_cl()
if any("vardict_somatic_filter" in tz.get_in(("config", "algorithm", "tools_off"), data, [])
for data in items):
somatic_filter = ""
freq_filter = ""
else:
var2vcf_opts += " -M " # this makes VarDict soft filter non-differential variants
somatic_filter = ("| sed 's/\\\\.*Somatic\\\\/Somatic/' "
"| sed 's/REJECT,Description=\".*\">/REJECT,Description=\"Not Somatic via VarDict\">/' "
"""| %s -c 'from bcbio.variation import freebayes; """
"""freebayes.call_somatic("%s", "%s")' """
% (sys.executable, paired.tumor_name, paired.normal_name))
freq_filter = ("| bcftools filter -m '+' -s 'REJECT' -e 'STATUS !~ \".*Somatic\"' 2> /dev/null "
"| %s -x 'bcbio.variation.vardict.add_db_germline_flag(x)' "
"| %s "
"| %s -x 'bcbio.variation.vardict.depth_freq_filter(x, %s, \"%s\")'" %
(os.path.join(os.path.dirname(sys.executable), "py"),
_lowfreq_linear_filter(0, True),
os.path.join(os.path.dirname(sys.executable), "py"),
0, bam.aligner_from_header(paired.tumor_bam)))
jvm_opts = _get_jvm_opts(items[0], tx_out_file)
py_cl = os.path.join(utils.get_bcbio_bin(), "py")
setup = ("%s && unset JAVA_HOME &&" % utils.get_R_exports())
contig_cl = vcfutils.add_contig_to_header_cl(ref_file, tx_out_file)
cmd = ("{setup}{jvm_opts}{vardict} -G {ref_file} "
"-N {paired.tumor_name} -b \"{paired.tumor_bam}|{paired.normal_bam}\" {opts} "
"| awk 'NF>=48' | testsomatic.R "
"| var2vcf_paired.pl -P 0.9 -m 4.25 {var2vcf_opts} "
"-N \"{paired.tumor_name}|{paired.normal_name}\" "
"| {contig_cl} {freq_filter} "
"| bcftools filter -i 'QUAL >= 0' "
"{somatic_filter} | {fix_ambig_ref} | {fix_ambig_alt} | {remove_dup} | {vcfstreamsort} "
"{compress_cmd} > {tx_out_file}")
do.run(cmd.format(**locals()), "Genotyping with VarDict: Inference", {})
return out_file |
def locked_execute(self, sql, parameters = None, cursorClass = DictCursor, quiet = False):
'''We are lock-happy here but SQL performance is not currently an issue daemon-side.'''
return self.execute(sql, parameters, cursorClass, quiet = quiet, locked = True) | We are lock-happy here but SQL performance is not currently an issue daemon-side. | Below is the the instruction that describes the task:
### Input:
We are lock-happy here but SQL performance is not currently an issue daemon-side.
### Response:
def locked_execute(self, sql, parameters = None, cursorClass = DictCursor, quiet = False):
'''We are lock-happy here but SQL performance is not currently an issue daemon-side.'''
return self.execute(sql, parameters, cursorClass, quiet = quiet, locked = True) |
def _handle_args(self, cmd, args):
"""
We need to support deprecated behaviour for now which makes this
quite complicated
Current behaviour:
- install: Installs a new server, existing server causes an error
- install --upgrade: Installs or upgrades a server
- install --managedb: Automatically initialise or upgrade the db
Deprecated:
- install --upgradedb --initdb: Replaced by install --managedb
- install --upgradedb: upgrade the db, must exist
- install --initdb: initialise the db
- upgrade: Upgrades a server, must already exist
- upgrade --upgradedb: Automatically upgrade the db
returns:
- Modified args object, flag to indicate new/existing/auto install
"""
if cmd == 'install':
if args.upgrade:
# Current behaviour: install or upgrade
if args.initdb or args.upgradedb:
raise Stop(10, (
'Deprecated --initdb --upgradedb flags '
'are incompatible with --upgrade'))
newinstall = None
else:
# Current behaviour: Server must not exist
newinstall = True
if args.managedb:
# Current behaviour
if args.initdb or args.upgradedb:
raise Stop(10, (
'Deprecated --initdb --upgradedb flags '
'are incompatible with --managedb'))
args.initdb = True
args.upgradedb = True
else:
if args.initdb or args.upgradedb:
log.warn('--initdb and --upgradedb are deprecated, '
'use --managedb')
elif cmd == 'upgrade':
# Deprecated behaviour
log.warn(
'"omero upgrade" is deprecated, use "omego install --upgrade"')
cmd = 'install'
args.upgrade = True
# Deprecated behaviour: Server must exist
newinstall = False
else:
raise Exception('Unexpected command: %s' % cmd)
return args, newinstall | We need to support deprecated behaviour for now which makes this
quite complicated
Current behaviour:
- install: Installs a new server, existing server causes an error
- install --upgrade: Installs or upgrades a server
- install --managedb: Automatically initialise or upgrade the db
Deprecated:
- install --upgradedb --initdb: Replaced by install --managedb
- install --upgradedb: upgrade the db, must exist
- install --initdb: initialise the db
- upgrade: Upgrades a server, must already exist
- upgrade --upgradedb: Automatically upgrade the db
returns:
- Modified args object, flag to indicate new/existing/auto install | Below is the the instruction that describes the task:
### Input:
We need to support deprecated behaviour for now which makes this
quite complicated
Current behaviour:
- install: Installs a new server, existing server causes an error
- install --upgrade: Installs or upgrades a server
- install --managedb: Automatically initialise or upgrade the db
Deprecated:
- install --upgradedb --initdb: Replaced by install --managedb
- install --upgradedb: upgrade the db, must exist
- install --initdb: initialise the db
- upgrade: Upgrades a server, must already exist
- upgrade --upgradedb: Automatically upgrade the db
returns:
- Modified args object, flag to indicate new/existing/auto install
### Response:
def _handle_args(self, cmd, args):
"""
We need to support deprecated behaviour for now which makes this
quite complicated
Current behaviour:
- install: Installs a new server, existing server causes an error
- install --upgrade: Installs or upgrades a server
- install --managedb: Automatically initialise or upgrade the db
Deprecated:
- install --upgradedb --initdb: Replaced by install --managedb
- install --upgradedb: upgrade the db, must exist
- install --initdb: initialise the db
- upgrade: Upgrades a server, must already exist
- upgrade --upgradedb: Automatically upgrade the db
returns:
- Modified args object, flag to indicate new/existing/auto install
"""
if cmd == 'install':
if args.upgrade:
# Current behaviour: install or upgrade
if args.initdb or args.upgradedb:
raise Stop(10, (
'Deprecated --initdb --upgradedb flags '
'are incompatible with --upgrade'))
newinstall = None
else:
# Current behaviour: Server must not exist
newinstall = True
if args.managedb:
# Current behaviour
if args.initdb or args.upgradedb:
raise Stop(10, (
'Deprecated --initdb --upgradedb flags '
'are incompatible with --managedb'))
args.initdb = True
args.upgradedb = True
else:
if args.initdb or args.upgradedb:
log.warn('--initdb and --upgradedb are deprecated, '
'use --managedb')
elif cmd == 'upgrade':
# Deprecated behaviour
log.warn(
'"omero upgrade" is deprecated, use "omego install --upgrade"')
cmd = 'install'
args.upgrade = True
# Deprecated behaviour: Server must exist
newinstall = False
else:
raise Exception('Unexpected command: %s' % cmd)
return args, newinstall |
def log_message(self, format, *args):
"""
overrides the ``log_message`` method from the wsgiref server so that
normal logging works with whatever configuration the application has
been set to.
Levels are inferred from the HTTP status code, 4XX codes are treated as
warnings, 5XX as errors and everything else as INFO level.
"""
code = args[1][0]
levels = {
'4': 'warning',
'5': 'error'
}
log_handler = getattr(logger, levels.get(code, 'info'))
log_handler(format % args) | overrides the ``log_message`` method from the wsgiref server so that
normal logging works with whatever configuration the application has
been set to.
Levels are inferred from the HTTP status code, 4XX codes are treated as
warnings, 5XX as errors and everything else as INFO level. | Below is the the instruction that describes the task:
### Input:
overrides the ``log_message`` method from the wsgiref server so that
normal logging works with whatever configuration the application has
been set to.
Levels are inferred from the HTTP status code, 4XX codes are treated as
warnings, 5XX as errors and everything else as INFO level.
### Response:
def log_message(self, format, *args):
"""
overrides the ``log_message`` method from the wsgiref server so that
normal logging works with whatever configuration the application has
been set to.
Levels are inferred from the HTTP status code, 4XX codes are treated as
warnings, 5XX as errors and everything else as INFO level.
"""
code = args[1][0]
levels = {
'4': 'warning',
'5': 'error'
}
log_handler = getattr(logger, levels.get(code, 'info'))
log_handler(format % args) |
def get_transaction_result(
self,
transaction: BaseOrSpoofTransaction,
at_header: BlockHeader) -> bytes:
"""
Return the result of running the given transaction.
This is referred to as a `call()` in web3.
"""
with self.get_vm(at_header).state_in_temp_block() as state:
computation = state.costless_execute_transaction(transaction)
computation.raise_if_error()
return computation.output | Return the result of running the given transaction.
This is referred to as a `call()` in web3. | Below is the the instruction that describes the task:
### Input:
Return the result of running the given transaction.
This is referred to as a `call()` in web3.
### Response:
def get_transaction_result(
self,
transaction: BaseOrSpoofTransaction,
at_header: BlockHeader) -> bytes:
"""
Return the result of running the given transaction.
This is referred to as a `call()` in web3.
"""
with self.get_vm(at_header).state_in_temp_block() as state:
computation = state.costless_execute_transaction(transaction)
computation.raise_if_error()
return computation.output |
def _ScanFileSystem(self, scan_node, base_path_specs):
"""Scans a file system scan node for file systems.
Args:
scan_node (SourceScanNode): file system scan node.
base_path_specs (list[PathSpec]): file system base path specifications.
Raises:
SourceScannerError: if the scan node is invalid.
"""
if not scan_node or not scan_node.path_spec:
raise errors.SourceScannerError(
'Invalid or missing file system scan node.')
base_path_specs.append(scan_node.path_spec) | Scans a file system scan node for file systems.
Args:
scan_node (SourceScanNode): file system scan node.
base_path_specs (list[PathSpec]): file system base path specifications.
Raises:
SourceScannerError: if the scan node is invalid. | Below is the the instruction that describes the task:
### Input:
Scans a file system scan node for file systems.
Args:
scan_node (SourceScanNode): file system scan node.
base_path_specs (list[PathSpec]): file system base path specifications.
Raises:
SourceScannerError: if the scan node is invalid.
### Response:
def _ScanFileSystem(self, scan_node, base_path_specs):
"""Scans a file system scan node for file systems.
Args:
scan_node (SourceScanNode): file system scan node.
base_path_specs (list[PathSpec]): file system base path specifications.
Raises:
SourceScannerError: if the scan node is invalid.
"""
if not scan_node or not scan_node.path_spec:
raise errors.SourceScannerError(
'Invalid or missing file system scan node.')
base_path_specs.append(scan_node.path_spec) |
def decode(data):
"""
Handles decoding of the CSV `data`.
Args:
data (str): Data which will be decoded.
Returns:
dict: Dictionary with decoded data.
"""
# try to guess dialect of the csv file
dialect = None
try:
dialect = csv.Sniffer().sniff(data)
except Exception:
pass
# parse data with csv parser
handler = None
try:
data = data.splitlines() # used later
handler = csv.reader(data, dialect)
except Exception, e:
raise MetaParsingException("Can't parse your CSV data: %s" % e.message)
# make sure, that data are meaningful
decoded = []
for cnt, line in enumerate(handler):
usable_data = filter(lambda x: x.strip(), line)
if not usable_data:
continue
if len(usable_data) != 2:
raise MetaParsingException(
"Bad number of elements - line %d:\n\t%s\n" % (cnt, data[cnt])
)
# remove trailing spaces, decode to utf-8
usable_data = map(lambda x: x.strip().decode("utf-8"), usable_data)
# remove quotes if the csv.Sniffer failed to decode right `dialect`
usable_data = map(lambda x: _remove_quotes(x), usable_data)
decoded.append(usable_data)
# apply another checks to data
decoded = validator.check_structure(decoded)
return decoded | Handles decoding of the CSV `data`.
Args:
data (str): Data which will be decoded.
Returns:
dict: Dictionary with decoded data. | Below is the the instruction that describes the task:
### Input:
Handles decoding of the CSV `data`.
Args:
data (str): Data which will be decoded.
Returns:
dict: Dictionary with decoded data.
### Response:
def decode(data):
"""
Handles decoding of the CSV `data`.
Args:
data (str): Data which will be decoded.
Returns:
dict: Dictionary with decoded data.
"""
# try to guess dialect of the csv file
dialect = None
try:
dialect = csv.Sniffer().sniff(data)
except Exception:
pass
# parse data with csv parser
handler = None
try:
data = data.splitlines() # used later
handler = csv.reader(data, dialect)
except Exception, e:
raise MetaParsingException("Can't parse your CSV data: %s" % e.message)
# make sure, that data are meaningful
decoded = []
for cnt, line in enumerate(handler):
usable_data = filter(lambda x: x.strip(), line)
if not usable_data:
continue
if len(usable_data) != 2:
raise MetaParsingException(
"Bad number of elements - line %d:\n\t%s\n" % (cnt, data[cnt])
)
# remove trailing spaces, decode to utf-8
usable_data = map(lambda x: x.strip().decode("utf-8"), usable_data)
# remove quotes if the csv.Sniffer failed to decode right `dialect`
usable_data = map(lambda x: _remove_quotes(x), usable_data)
decoded.append(usable_data)
# apply another checks to data
decoded = validator.check_structure(decoded)
return decoded |
def dump(self):
"""
Dump the output to json.
"""
report_as_json_string = utils.dict_to_json(self.report)
if self.out_file:
utils.string_to_file(self.out_file, report_as_json_string)
else:
print report_as_json_string | Dump the output to json. | Below is the the instruction that describes the task:
### Input:
Dump the output to json.
### Response:
def dump(self):
"""
Dump the output to json.
"""
report_as_json_string = utils.dict_to_json(self.report)
if self.out_file:
utils.string_to_file(self.out_file, report_as_json_string)
else:
print report_as_json_string |
def get_siblings_score(self, top_node):
"""\
we could have long articles that have tons of paragraphs
so if we tried to calculate the base score against
the total text score of those paragraphs it would be unfair.
So we need to normalize the score based on the average scoring
of the paragraphs within the top node.
For example if our total score of 10 paragraphs was 1000
but each had an average value of 100 then 100 should be our base.
"""
base = 100000
paragraphs_number = 0
paragraphs_score = 0
nodes_to_check = self.parser.getElementsByTag(top_node, tag='p')
for node in nodes_to_check:
text_node = self.parser.getText(node)
word_stats = self.stopwords_class(language=self.get_language()).get_stopword_count(text_node)
high_link_density = self.is_highlink_density(node)
if word_stats.get_stopword_count() > 2 and not high_link_density:
paragraphs_number += 1
paragraphs_score += word_stats.get_stopword_count()
if paragraphs_number > 0:
base = paragraphs_score / paragraphs_number
return base | \
we could have long articles that have tons of paragraphs
so if we tried to calculate the base score against
the total text score of those paragraphs it would be unfair.
So we need to normalize the score based on the average scoring
of the paragraphs within the top node.
For example if our total score of 10 paragraphs was 1000
but each had an average value of 100 then 100 should be our base. | Below is the the instruction that describes the task:
### Input:
\
we could have long articles that have tons of paragraphs
so if we tried to calculate the base score against
the total text score of those paragraphs it would be unfair.
So we need to normalize the score based on the average scoring
of the paragraphs within the top node.
For example if our total score of 10 paragraphs was 1000
but each had an average value of 100 then 100 should be our base.
### Response:
def get_siblings_score(self, top_node):
"""\
we could have long articles that have tons of paragraphs
so if we tried to calculate the base score against
the total text score of those paragraphs it would be unfair.
So we need to normalize the score based on the average scoring
of the paragraphs within the top node.
For example if our total score of 10 paragraphs was 1000
but each had an average value of 100 then 100 should be our base.
"""
base = 100000
paragraphs_number = 0
paragraphs_score = 0
nodes_to_check = self.parser.getElementsByTag(top_node, tag='p')
for node in nodes_to_check:
text_node = self.parser.getText(node)
word_stats = self.stopwords_class(language=self.get_language()).get_stopword_count(text_node)
high_link_density = self.is_highlink_density(node)
if word_stats.get_stopword_count() > 2 and not high_link_density:
paragraphs_number += 1
paragraphs_score += word_stats.get_stopword_count()
if paragraphs_number > 0:
base = paragraphs_score / paragraphs_number
return base |
def _next_slide_partname(self):
"""
Return |PackURI| instance containing the partname for a slide to be
appended to this slide collection, e.g. ``/ppt/slides/slide9.xml``
for a slide collection containing 8 slides.
"""
sldIdLst = self._element.get_or_add_sldIdLst()
partname_str = '/ppt/slides/slide%d.xml' % (len(sldIdLst)+1)
return PackURI(partname_str) | Return |PackURI| instance containing the partname for a slide to be
appended to this slide collection, e.g. ``/ppt/slides/slide9.xml``
for a slide collection containing 8 slides. | Below is the the instruction that describes the task:
### Input:
Return |PackURI| instance containing the partname for a slide to be
appended to this slide collection, e.g. ``/ppt/slides/slide9.xml``
for a slide collection containing 8 slides.
### Response:
def _next_slide_partname(self):
"""
Return |PackURI| instance containing the partname for a slide to be
appended to this slide collection, e.g. ``/ppt/slides/slide9.xml``
for a slide collection containing 8 slides.
"""
sldIdLst = self._element.get_or_add_sldIdLst()
partname_str = '/ppt/slides/slide%d.xml' % (len(sldIdLst)+1)
return PackURI(partname_str) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.