code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def _cell_scalar(self, name=None):
"""
Returns the cell scalars of a vtk object
Parameters
----------
name : str
Name of cell scalars to retrive.
Returns
-------
scalars : np.ndarray
Numpy array of scalars
"""
if name is None:
# use active scalar array
field, name = self.active_scalar_info
if field != CELL_DATA_FIELD:
raise RuntimeError('Must specify an array to fetch.')
vtkarr = self.GetCellData().GetArray(name)
if vtkarr is None:
raise AssertionError('({}) is not a cell scalar'.format(name))
# numpy does not support bit array data types
if isinstance(vtkarr, vtk.vtkBitArray):
vtkarr = vtk_bit_array_to_char(vtkarr)
if name not in self._cell_bool_array_names:
self._cell_bool_array_names.append(name)
array = vtk_to_numpy(vtkarr)
if array.dtype == np.uint8 and name in self._cell_bool_array_names:
array = array.view(np.bool)
return array | Returns the cell scalars of a vtk object
Parameters
----------
name : str
Name of cell scalars to retrive.
Returns
-------
scalars : np.ndarray
Numpy array of scalars | Below is the the instruction that describes the task:
### Input:
Returns the cell scalars of a vtk object
Parameters
----------
name : str
Name of cell scalars to retrive.
Returns
-------
scalars : np.ndarray
Numpy array of scalars
### Response:
def _cell_scalar(self, name=None):
"""
Returns the cell scalars of a vtk object
Parameters
----------
name : str
Name of cell scalars to retrive.
Returns
-------
scalars : np.ndarray
Numpy array of scalars
"""
if name is None:
# use active scalar array
field, name = self.active_scalar_info
if field != CELL_DATA_FIELD:
raise RuntimeError('Must specify an array to fetch.')
vtkarr = self.GetCellData().GetArray(name)
if vtkarr is None:
raise AssertionError('({}) is not a cell scalar'.format(name))
# numpy does not support bit array data types
if isinstance(vtkarr, vtk.vtkBitArray):
vtkarr = vtk_bit_array_to_char(vtkarr)
if name not in self._cell_bool_array_names:
self._cell_bool_array_names.append(name)
array = vtk_to_numpy(vtkarr)
if array.dtype == np.uint8 and name in self._cell_bool_array_names:
array = array.view(np.bool)
return array |
def _normalize_select_no_context(select, schema=None):
"""
SAME NORMALIZE, BUT NO SOURCE OF COLUMNS
"""
if not _Column:
_late_import()
if is_text(select):
select = Data(value=select)
else:
select = wrap(select)
output = select.copy()
if not select.value:
output.name = coalesce(select.name, select.aggregate)
if output.name:
output.value = jx_expression(".", schema=schema)
else:
return Null
elif is_text(select.value):
if select.value.endswith(".*"):
name = select.value[:-2].lstrip(".")
output.name = coalesce(select.name, name)
output.value = LeavesOp(Variable(name), prefix=coalesce(select.prefix, name))
else:
if select.value == ".":
output.name = coalesce(select.name, select.aggregate, ".")
output.value = jx_expression(select.value, schema=schema)
elif select.value == "*":
output.name = coalesce(select.name, select.aggregate, ".")
output.value = LeavesOp(Variable("."))
else:
output.name = coalesce(select.name, select.value.lstrip("."), select.aggregate)
output.value = jx_expression(select.value, schema=schema)
elif is_number(output.value):
if not output.name:
output.name = text_type(output.value)
output.value = jx_expression(select.value, schema=schema)
else:
output.value = jx_expression(select.value, schema=schema)
if not output.name:
Log.error("expecting select to have a name: {{select}}", select= select)
if output.name.endswith(".*"):
Log.error("{{name|quote}} is invalid select", name=output.name)
output.aggregate = coalesce(canonical_aggregates[select.aggregate].name, select.aggregate, "none")
output.default = coalesce(select.default, canonical_aggregates[output.aggregate].default)
return output | SAME NORMALIZE, BUT NO SOURCE OF COLUMNS | Below is the the instruction that describes the task:
### Input:
SAME NORMALIZE, BUT NO SOURCE OF COLUMNS
### Response:
def _normalize_select_no_context(select, schema=None):
"""
SAME NORMALIZE, BUT NO SOURCE OF COLUMNS
"""
if not _Column:
_late_import()
if is_text(select):
select = Data(value=select)
else:
select = wrap(select)
output = select.copy()
if not select.value:
output.name = coalesce(select.name, select.aggregate)
if output.name:
output.value = jx_expression(".", schema=schema)
else:
return Null
elif is_text(select.value):
if select.value.endswith(".*"):
name = select.value[:-2].lstrip(".")
output.name = coalesce(select.name, name)
output.value = LeavesOp(Variable(name), prefix=coalesce(select.prefix, name))
else:
if select.value == ".":
output.name = coalesce(select.name, select.aggregate, ".")
output.value = jx_expression(select.value, schema=schema)
elif select.value == "*":
output.name = coalesce(select.name, select.aggregate, ".")
output.value = LeavesOp(Variable("."))
else:
output.name = coalesce(select.name, select.value.lstrip("."), select.aggregate)
output.value = jx_expression(select.value, schema=schema)
elif is_number(output.value):
if not output.name:
output.name = text_type(output.value)
output.value = jx_expression(select.value, schema=schema)
else:
output.value = jx_expression(select.value, schema=schema)
if not output.name:
Log.error("expecting select to have a name: {{select}}", select= select)
if output.name.endswith(".*"):
Log.error("{{name|quote}} is invalid select", name=output.name)
output.aggregate = coalesce(canonical_aggregates[select.aggregate].name, select.aggregate, "none")
output.default = coalesce(select.default, canonical_aggregates[output.aggregate].default)
return output |
def initialize(self, config, context):
"""We initialize the window duration and slide interval
"""
if SlidingWindowBolt.WINDOW_DURATION_SECS in config:
self.window_duration = int(config[SlidingWindowBolt.WINDOW_DURATION_SECS])
else:
self.logger.fatal("Window Duration has to be specified in the config")
if SlidingWindowBolt.WINDOW_SLIDEINTERVAL_SECS in config:
self.slide_interval = int(config[SlidingWindowBolt.WINDOW_SLIDEINTERVAL_SECS])
else:
self.slide_interval = self.window_duration
if self.slide_interval > self.window_duration:
self.logger.fatal("Slide Interval should be <= Window Duration")
# By modifying the config, we are able to setup the tick timer
config[api_constants.TOPOLOGY_TICK_TUPLE_FREQ_SECS] = str(self.slide_interval)
self.current_tuples = deque()
if hasattr(self, 'saved_state'):
if 'tuples' in self.saved_state:
self.current_tuples = self.saved_state['tuples'] | We initialize the window duration and slide interval | Below is the the instruction that describes the task:
### Input:
We initialize the window duration and slide interval
### Response:
def initialize(self, config, context):
"""We initialize the window duration and slide interval
"""
if SlidingWindowBolt.WINDOW_DURATION_SECS in config:
self.window_duration = int(config[SlidingWindowBolt.WINDOW_DURATION_SECS])
else:
self.logger.fatal("Window Duration has to be specified in the config")
if SlidingWindowBolt.WINDOW_SLIDEINTERVAL_SECS in config:
self.slide_interval = int(config[SlidingWindowBolt.WINDOW_SLIDEINTERVAL_SECS])
else:
self.slide_interval = self.window_duration
if self.slide_interval > self.window_duration:
self.logger.fatal("Slide Interval should be <= Window Duration")
# By modifying the config, we are able to setup the tick timer
config[api_constants.TOPOLOGY_TICK_TUPLE_FREQ_SECS] = str(self.slide_interval)
self.current_tuples = deque()
if hasattr(self, 'saved_state'):
if 'tuples' in self.saved_state:
self.current_tuples = self.saved_state['tuples'] |
def _encrypt_xor(a, b, aes):
""" Returns encrypt(a ^ b). """
a = unhexlify("%0.32x" % (int((a), 16) ^ int(hexlify(b), 16)))
return aes.encrypt(a) | Returns encrypt(a ^ b). | Below is the the instruction that describes the task:
### Input:
Returns encrypt(a ^ b).
### Response:
def _encrypt_xor(a, b, aes):
""" Returns encrypt(a ^ b). """
a = unhexlify("%0.32x" % (int((a), 16) ^ int(hexlify(b), 16)))
return aes.encrypt(a) |
def add_event(self, name, time, chan):
"""Action: add a single event."""
self.annot.add_event(name, time, chan=chan)
self.update_annotations() | Action: add a single event. | Below is the the instruction that describes the task:
### Input:
Action: add a single event.
### Response:
def add_event(self, name, time, chan):
"""Action: add a single event."""
self.annot.add_event(name, time, chan=chan)
self.update_annotations() |
def push(self):
"""
Push the changes back to the remote(s) after fetching
"""
print('pushing...')
push_kwargs = {}
push_args = []
if self.settings['push.tags']:
push_kwargs['push'] = True
if self.settings['push.all']:
push_kwargs['all'] = True
else:
if '.' in self.remotes:
self.remotes.remove('.')
if not self.remotes:
# Only local target branches,
# `git push` will fail
return
push_args.append(self.remotes)
try:
self.git.push(*push_args, **push_kwargs)
self.pushed = True
except GitError as error:
error.message = "`git push` failed"
raise error | Push the changes back to the remote(s) after fetching | Below is the the instruction that describes the task:
### Input:
Push the changes back to the remote(s) after fetching
### Response:
def push(self):
"""
Push the changes back to the remote(s) after fetching
"""
print('pushing...')
push_kwargs = {}
push_args = []
if self.settings['push.tags']:
push_kwargs['push'] = True
if self.settings['push.all']:
push_kwargs['all'] = True
else:
if '.' in self.remotes:
self.remotes.remove('.')
if not self.remotes:
# Only local target branches,
# `git push` will fail
return
push_args.append(self.remotes)
try:
self.git.push(*push_args, **push_kwargs)
self.pushed = True
except GitError as error:
error.message = "`git push` failed"
raise error |
def add_flow(self, flow):
"""
Add an :class:`Flow` flow to the scheduler.
"""
if hasattr(self, "_flow"):
raise self.Error("Only one flow can be added to the scheduler.")
# Check if we are already using a scheduler to run this flow
flow.check_pid_file()
flow.set_spectator_mode(False)
# Build dirs and files (if not yet done)
flow.build()
with open(flow.pid_file, "wt") as fh:
fh.write(str(self.pid))
self._pid_file = flow.pid_file
self._flow = flow | Add an :class:`Flow` flow to the scheduler. | Below is the the instruction that describes the task:
### Input:
Add an :class:`Flow` flow to the scheduler.
### Response:
def add_flow(self, flow):
"""
Add an :class:`Flow` flow to the scheduler.
"""
if hasattr(self, "_flow"):
raise self.Error("Only one flow can be added to the scheduler.")
# Check if we are already using a scheduler to run this flow
flow.check_pid_file()
flow.set_spectator_mode(False)
# Build dirs and files (if not yet done)
flow.build()
with open(flow.pid_file, "wt") as fh:
fh.write(str(self.pid))
self._pid_file = flow.pid_file
self._flow = flow |
def share_project(project_id, usernames, read_only, share,**kwargs):
"""
Share an entire project with a list of users, identifed by
their usernames.
The read_only flag ('Y' or 'N') must be set
to 'Y' to allow write access or sharing.
The share flat ('Y' or 'N') must be set to 'Y' to allow the
project to be shared with other users
"""
user_id = kwargs.get('user_id')
proj_i = _get_project(project_id)
#Is the sharing user allowed to share this project?
proj_i.check_share_permission(int(user_id))
user_id = int(user_id)
for owner in proj_i.owners:
if user_id == owner.user_id:
break
else:
raise HydraError("Permission Denied. Cannot share project.")
if read_only == 'Y':
write = 'N'
share = 'N'
else:
write = 'Y'
if proj_i.created_by != user_id and share == 'Y':
raise HydraError("Cannot share the 'sharing' ability as user %s is not"
" the owner of project %s"%
(user_id, project_id))
for username in usernames:
user_i = _get_user(username)
proj_i.set_owner(user_i.id, write=write, share=share)
for net_i in proj_i.networks:
net_i.set_owner(user_i.id, write=write, share=share)
db.DBSession.flush() | Share an entire project with a list of users, identifed by
their usernames.
The read_only flag ('Y' or 'N') must be set
to 'Y' to allow write access or sharing.
The share flat ('Y' or 'N') must be set to 'Y' to allow the
project to be shared with other users | Below is the the instruction that describes the task:
### Input:
Share an entire project with a list of users, identifed by
their usernames.
The read_only flag ('Y' or 'N') must be set
to 'Y' to allow write access or sharing.
The share flat ('Y' or 'N') must be set to 'Y' to allow the
project to be shared with other users
### Response:
def share_project(project_id, usernames, read_only, share,**kwargs):
"""
Share an entire project with a list of users, identifed by
their usernames.
The read_only flag ('Y' or 'N') must be set
to 'Y' to allow write access or sharing.
The share flat ('Y' or 'N') must be set to 'Y' to allow the
project to be shared with other users
"""
user_id = kwargs.get('user_id')
proj_i = _get_project(project_id)
#Is the sharing user allowed to share this project?
proj_i.check_share_permission(int(user_id))
user_id = int(user_id)
for owner in proj_i.owners:
if user_id == owner.user_id:
break
else:
raise HydraError("Permission Denied. Cannot share project.")
if read_only == 'Y':
write = 'N'
share = 'N'
else:
write = 'Y'
if proj_i.created_by != user_id and share == 'Y':
raise HydraError("Cannot share the 'sharing' ability as user %s is not"
" the owner of project %s"%
(user_id, project_id))
for username in usernames:
user_i = _get_user(username)
proj_i.set_owner(user_i.id, write=write, share=share)
for net_i in proj_i.networks:
net_i.set_owner(user_i.id, write=write, share=share)
db.DBSession.flush() |
def update_experiment(self, id, key, value):
'''Update experiment'''
if id not in self.experiments:
return False
self.experiments[id][key] = value
self.write_file()
return True | Update experiment | Below is the the instruction that describes the task:
### Input:
Update experiment
### Response:
def update_experiment(self, id, key, value):
'''Update experiment'''
if id not in self.experiments:
return False
self.experiments[id][key] = value
self.write_file()
return True |
def _update_proxy(self, change):
""" An observer which sends state change to the proxy.
"""
if change['name'] in ['row', 'column']:
super(AbstractWidgetItem, self)._update_proxy(change)
else:
self.proxy.data_changed(change) | An observer which sends state change to the proxy. | Below is the the instruction that describes the task:
### Input:
An observer which sends state change to the proxy.
### Response:
def _update_proxy(self, change):
""" An observer which sends state change to the proxy.
"""
if change['name'] in ['row', 'column']:
super(AbstractWidgetItem, self)._update_proxy(change)
else:
self.proxy.data_changed(change) |
def async_atomic(on_exception=None, raise_exception=True, **kwargs):
'''
first argument will be a conn object
:param func:
:return:
'''
if not raise_exception and not on_exception:
async def default_on_exception(exc):
resp_dict = {}
resp_dict['status'] = type(exc)
resp_dict['message'] = str(exc)
return resp_dict
on_exception = default_on_exception
elif raise_exception and not on_exception:
async def raise_exception(exp_args):
raise exp_args
on_exception = raise_exception
_db_adapter = get_db_adapter()
def decorator(func):
@functools.wraps(func)
async def wrapped(self, *args, **kwargs):
conn = None
for i in itertools.chain(args, kwargs.values()):
if type(i) is Connection:
conn = i
break
if not conn:
pool = await _db_adapter.get_pool()
async with pool.acquire() as conn:
try:
async with conn.transaction():
kwargs['conn'] = conn
return await func(self, *args, **kwargs)
except Exception as e:
return await on_exception(e)
else:
try:
async with conn.transaction():
kwargs['conn'] = conn
return await func(self, *args, **kwargs)
except Exception as e:
return await on_exception(e)
return wrapped
return decorator | first argument will be a conn object
:param func:
:return: | Below is the the instruction that describes the task:
### Input:
first argument will be a conn object
:param func:
:return:
### Response:
def async_atomic(on_exception=None, raise_exception=True, **kwargs):
'''
first argument will be a conn object
:param func:
:return:
'''
if not raise_exception and not on_exception:
async def default_on_exception(exc):
resp_dict = {}
resp_dict['status'] = type(exc)
resp_dict['message'] = str(exc)
return resp_dict
on_exception = default_on_exception
elif raise_exception and not on_exception:
async def raise_exception(exp_args):
raise exp_args
on_exception = raise_exception
_db_adapter = get_db_adapter()
def decorator(func):
@functools.wraps(func)
async def wrapped(self, *args, **kwargs):
conn = None
for i in itertools.chain(args, kwargs.values()):
if type(i) is Connection:
conn = i
break
if not conn:
pool = await _db_adapter.get_pool()
async with pool.acquire() as conn:
try:
async with conn.transaction():
kwargs['conn'] = conn
return await func(self, *args, **kwargs)
except Exception as e:
return await on_exception(e)
else:
try:
async with conn.transaction():
kwargs['conn'] = conn
return await func(self, *args, **kwargs)
except Exception as e:
return await on_exception(e)
return wrapped
return decorator |
def mode(data):
"""Compute an intelligent value for the mode
The most common value in experimental is not very useful if there
are a lot of digits after the comma. This method approaches this
issue by rounding to bin size that is determined by the
Freedman–Diaconis rule.
Parameters
----------
data: 1d ndarray
The data for which the mode should be computed.
Returns
-------
mode: float
The mode computed with the Freedman-Diaconis rule.
"""
# size
n = data.shape[0]
# interquartile range
iqr = np.percentile(data, 75)-np.percentile(data, 25)
# Freedman–Diaconis
bin_size = 2 * iqr / n**(1/3)
if bin_size == 0:
return np.nan
# Add bin_size/2, because we want the center of the bin and
# not the left corner of the bin.
databin = np.round(data/bin_size)*bin_size + bin_size/2
u, indices = np.unique(databin, return_inverse=True)
mode = u[np.argmax(np.bincount(indices))]
return mode | Compute an intelligent value for the mode
The most common value in experimental is not very useful if there
are a lot of digits after the comma. This method approaches this
issue by rounding to bin size that is determined by the
Freedman–Diaconis rule.
Parameters
----------
data: 1d ndarray
The data for which the mode should be computed.
Returns
-------
mode: float
The mode computed with the Freedman-Diaconis rule. | Below is the the instruction that describes the task:
### Input:
Compute an intelligent value for the mode
The most common value in experimental is not very useful if there
are a lot of digits after the comma. This method approaches this
issue by rounding to bin size that is determined by the
Freedman–Diaconis rule.
Parameters
----------
data: 1d ndarray
The data for which the mode should be computed.
Returns
-------
mode: float
The mode computed with the Freedman-Diaconis rule.
### Response:
def mode(data):
"""Compute an intelligent value for the mode
The most common value in experimental is not very useful if there
are a lot of digits after the comma. This method approaches this
issue by rounding to bin size that is determined by the
Freedman–Diaconis rule.
Parameters
----------
data: 1d ndarray
The data for which the mode should be computed.
Returns
-------
mode: float
The mode computed with the Freedman-Diaconis rule.
"""
# size
n = data.shape[0]
# interquartile range
iqr = np.percentile(data, 75)-np.percentile(data, 25)
# Freedman–Diaconis
bin_size = 2 * iqr / n**(1/3)
if bin_size == 0:
return np.nan
# Add bin_size/2, because we want the center of the bin and
# not the left corner of the bin.
databin = np.round(data/bin_size)*bin_size + bin_size/2
u, indices = np.unique(databin, return_inverse=True)
mode = u[np.argmax(np.bincount(indices))]
return mode |
def calculate_error(self):
"""Estimate the numerical error based on the fluxes calculated
by the current and the last method.
>>> from hydpy.models.test_v1 import *
>>> parameterstep()
>>> model.numvars.idx_method = 2
>>> results = numpy.asarray(fluxes.fastaccess._q_results)
>>> results[:4] = 0., 3., 4., 0.
>>> model.calculate_error()
>>> from hydpy import round_
>>> round_(model.numvars.error)
1.0
"""
self.numvars.error = 0.
fluxes = self.sequences.fluxes
for flux in fluxes.numerics:
results = getattr(fluxes.fastaccess, '_%s_results' % flux.name)
diff = (results[self.numvars.idx_method] -
results[self.numvars.idx_method-1])
self.numvars.error = max(self.numvars.error,
numpy.max(numpy.abs(diff))) | Estimate the numerical error based on the fluxes calculated
by the current and the last method.
>>> from hydpy.models.test_v1 import *
>>> parameterstep()
>>> model.numvars.idx_method = 2
>>> results = numpy.asarray(fluxes.fastaccess._q_results)
>>> results[:4] = 0., 3., 4., 0.
>>> model.calculate_error()
>>> from hydpy import round_
>>> round_(model.numvars.error)
1.0 | Below is the the instruction that describes the task:
### Input:
Estimate the numerical error based on the fluxes calculated
by the current and the last method.
>>> from hydpy.models.test_v1 import *
>>> parameterstep()
>>> model.numvars.idx_method = 2
>>> results = numpy.asarray(fluxes.fastaccess._q_results)
>>> results[:4] = 0., 3., 4., 0.
>>> model.calculate_error()
>>> from hydpy import round_
>>> round_(model.numvars.error)
1.0
### Response:
def calculate_error(self):
"""Estimate the numerical error based on the fluxes calculated
by the current and the last method.
>>> from hydpy.models.test_v1 import *
>>> parameterstep()
>>> model.numvars.idx_method = 2
>>> results = numpy.asarray(fluxes.fastaccess._q_results)
>>> results[:4] = 0., 3., 4., 0.
>>> model.calculate_error()
>>> from hydpy import round_
>>> round_(model.numvars.error)
1.0
"""
self.numvars.error = 0.
fluxes = self.sequences.fluxes
for flux in fluxes.numerics:
results = getattr(fluxes.fastaccess, '_%s_results' % flux.name)
diff = (results[self.numvars.idx_method] -
results[self.numvars.idx_method-1])
self.numvars.error = max(self.numvars.error,
numpy.max(numpy.abs(diff))) |
def clean_already_reported(self, comments, file_name, position,
message):
"""
message is potentially a list of messages to post. This is later
converted into a string.
"""
for comment in comments:
if ((comment['path'] == file_name and
comment['position'] == position and
comment['user']['login'] == self.requester.username)):
return [m for m in message if m not in comment['body']]
return message | message is potentially a list of messages to post. This is later
converted into a string. | Below is the the instruction that describes the task:
### Input:
message is potentially a list of messages to post. This is later
converted into a string.
### Response:
def clean_already_reported(self, comments, file_name, position,
message):
"""
message is potentially a list of messages to post. This is later
converted into a string.
"""
for comment in comments:
if ((comment['path'] == file_name and
comment['position'] == position and
comment['user']['login'] == self.requester.username)):
return [m for m in message if m not in comment['body']]
return message |
def __ssh_gateway_config_dict(gateway):
'''
Return a dictionary with gateway options. The result is used
to provide arguments to __ssh_gateway_arguments method.
'''
extended_kwargs = {}
if gateway:
extended_kwargs['ssh_gateway'] = gateway['ssh_gateway']
extended_kwargs['ssh_gateway_key'] = gateway['ssh_gateway_key']
extended_kwargs['ssh_gateway_user'] = gateway['ssh_gateway_user']
extended_kwargs['ssh_gateway_command'] = gateway['ssh_gateway_command']
return extended_kwargs | Return a dictionary with gateway options. The result is used
to provide arguments to __ssh_gateway_arguments method. | Below is the the instruction that describes the task:
### Input:
Return a dictionary with gateway options. The result is used
to provide arguments to __ssh_gateway_arguments method.
### Response:
def __ssh_gateway_config_dict(gateway):
'''
Return a dictionary with gateway options. The result is used
to provide arguments to __ssh_gateway_arguments method.
'''
extended_kwargs = {}
if gateway:
extended_kwargs['ssh_gateway'] = gateway['ssh_gateway']
extended_kwargs['ssh_gateway_key'] = gateway['ssh_gateway_key']
extended_kwargs['ssh_gateway_user'] = gateway['ssh_gateway_user']
extended_kwargs['ssh_gateway_command'] = gateway['ssh_gateway_command']
return extended_kwargs |
def export_losses_by_event(ekey, dstore):
"""
:param ekey: export key, i.e. a pair (datastore key, fmt)
:param dstore: datastore object
"""
oq = dstore['oqparam']
writer = writers.CsvWriter(fmt=writers.FIVEDIGITS)
dest = dstore.build_fname('losses_by_event', '', 'csv')
if oq.calculation_mode.startswith('scenario'):
dtlist = [('eid', U64)] + oq.loss_dt_list()
arr = dstore['losses_by_event'].value[['eid', 'loss']]
writer.save(arr.copy().view(dtlist), dest)
elif oq.calculation_mode == 'ebrisk':
tagcol = dstore['assetcol/tagcol']
lbe = dstore['losses_by_event'].value
lbe.sort(order='eid')
dic = dict(tagnames=['event_id', 'loss_type'] + oq.aggregate_by)
for tagname in oq.aggregate_by:
dic[tagname] = getattr(tagcol, tagname)
dic['event_id'] = ['?'] + list(lbe['eid'])
dic['loss_type'] = ('?',) + oq.loss_dt().names
aw = hdf5.ArrayWrapper(lbe['loss'], dic) # shape (E, L, T...)
writer.save(aw.to_table(), dest)
else:
dtlist = [('event_id', U64), ('rup_id', U32), ('year', U32)] + \
oq.loss_dt_list()
eids = dstore['losses_by_event']['eid']
year_of = year_dict(dstore['events']['eid'],
oq.investigation_time, oq.ses_seed)
arr = numpy.zeros(len(dstore['losses_by_event']), dtlist)
arr['event_id'] = eids
arr['rup_id'] = arr['event_id'] / TWO32
arr['year'] = [year_of[eid] for eid in eids]
loss = dstore['losses_by_event']['loss'].T # shape (L, E)
for losses, loss_type in zip(loss, oq.loss_dt().names):
arr[loss_type] = losses
writer.save(arr, dest)
return writer.getsaved() | :param ekey: export key, i.e. a pair (datastore key, fmt)
:param dstore: datastore object | Below is the the instruction that describes the task:
### Input:
:param ekey: export key, i.e. a pair (datastore key, fmt)
:param dstore: datastore object
### Response:
def export_losses_by_event(ekey, dstore):
"""
:param ekey: export key, i.e. a pair (datastore key, fmt)
:param dstore: datastore object
"""
oq = dstore['oqparam']
writer = writers.CsvWriter(fmt=writers.FIVEDIGITS)
dest = dstore.build_fname('losses_by_event', '', 'csv')
if oq.calculation_mode.startswith('scenario'):
dtlist = [('eid', U64)] + oq.loss_dt_list()
arr = dstore['losses_by_event'].value[['eid', 'loss']]
writer.save(arr.copy().view(dtlist), dest)
elif oq.calculation_mode == 'ebrisk':
tagcol = dstore['assetcol/tagcol']
lbe = dstore['losses_by_event'].value
lbe.sort(order='eid')
dic = dict(tagnames=['event_id', 'loss_type'] + oq.aggregate_by)
for tagname in oq.aggregate_by:
dic[tagname] = getattr(tagcol, tagname)
dic['event_id'] = ['?'] + list(lbe['eid'])
dic['loss_type'] = ('?',) + oq.loss_dt().names
aw = hdf5.ArrayWrapper(lbe['loss'], dic) # shape (E, L, T...)
writer.save(aw.to_table(), dest)
else:
dtlist = [('event_id', U64), ('rup_id', U32), ('year', U32)] + \
oq.loss_dt_list()
eids = dstore['losses_by_event']['eid']
year_of = year_dict(dstore['events']['eid'],
oq.investigation_time, oq.ses_seed)
arr = numpy.zeros(len(dstore['losses_by_event']), dtlist)
arr['event_id'] = eids
arr['rup_id'] = arr['event_id'] / TWO32
arr['year'] = [year_of[eid] for eid in eids]
loss = dstore['losses_by_event']['loss'].T # shape (L, E)
for losses, loss_type in zip(loss, oq.loss_dt().names):
arr[loss_type] = losses
writer.save(arr, dest)
return writer.getsaved() |
def delete_table(self, instance_id, table_id, project_id=None):
"""
Deletes the specified table in Cloud Bigtable.
Raises google.api_core.exceptions.NotFound if the table does not exist.
:type instance_id: str
:param instance_id: The ID of the Cloud Bigtable instance.
:type table_id: str
:param table_id: The ID of the table in Cloud Bigtable.
:type project_id: str
:param project_id: Optional, Google Cloud Platform project ID where the
BigTable exists. If set to None or missing,
the default project_id from the GCP connection is used.
"""
table = self.get_instance(instance_id=instance_id, project_id=project_id).table(table_id=table_id)
table.delete() | Deletes the specified table in Cloud Bigtable.
Raises google.api_core.exceptions.NotFound if the table does not exist.
:type instance_id: str
:param instance_id: The ID of the Cloud Bigtable instance.
:type table_id: str
:param table_id: The ID of the table in Cloud Bigtable.
:type project_id: str
:param project_id: Optional, Google Cloud Platform project ID where the
BigTable exists. If set to None or missing,
the default project_id from the GCP connection is used. | Below is the the instruction that describes the task:
### Input:
Deletes the specified table in Cloud Bigtable.
Raises google.api_core.exceptions.NotFound if the table does not exist.
:type instance_id: str
:param instance_id: The ID of the Cloud Bigtable instance.
:type table_id: str
:param table_id: The ID of the table in Cloud Bigtable.
:type project_id: str
:param project_id: Optional, Google Cloud Platform project ID where the
BigTable exists. If set to None or missing,
the default project_id from the GCP connection is used.
### Response:
def delete_table(self, instance_id, table_id, project_id=None):
"""
Deletes the specified table in Cloud Bigtable.
Raises google.api_core.exceptions.NotFound if the table does not exist.
:type instance_id: str
:param instance_id: The ID of the Cloud Bigtable instance.
:type table_id: str
:param table_id: The ID of the table in Cloud Bigtable.
:type project_id: str
:param project_id: Optional, Google Cloud Platform project ID where the
BigTable exists. If set to None or missing,
the default project_id from the GCP connection is used.
"""
table = self.get_instance(instance_id=instance_id, project_id=project_id).table(table_id=table_id)
table.delete() |
def patch_priority_class(self, name, body, **kwargs):
"""
partially update the specified PriorityClass
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_priority_class(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the PriorityClass (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
:param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
:return: V1PriorityClass
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.patch_priority_class_with_http_info(name, body, **kwargs)
else:
(data) = self.patch_priority_class_with_http_info(name, body, **kwargs)
return data | partially update the specified PriorityClass
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_priority_class(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the PriorityClass (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
:param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
:return: V1PriorityClass
If the method is called asynchronously,
returns the request thread. | Below is the the instruction that describes the task:
### Input:
partially update the specified PriorityClass
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_priority_class(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the PriorityClass (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
:param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
:return: V1PriorityClass
If the method is called asynchronously,
returns the request thread.
### Response:
def patch_priority_class(self, name, body, **kwargs):
"""
partially update the specified PriorityClass
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_priority_class(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the PriorityClass (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
:param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
:return: V1PriorityClass
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.patch_priority_class_with_http_info(name, body, **kwargs)
else:
(data) = self.patch_priority_class_with_http_info(name, body, **kwargs)
return data |
def create(self, to, channel, custom_message=values.unset):
"""
Create a new VerificationInstance
:param unicode to: To phonenumber
:param unicode channel: sms or call
:param unicode custom_message: A custom message for this verification
:returns: Newly created VerificationInstance
:rtype: twilio.rest.preview.acc_security.service.verification.VerificationInstance
"""
data = values.of({'To': to, 'Channel': channel, 'CustomMessage': custom_message, })
payload = self._version.create(
'POST',
self._uri,
data=data,
)
return VerificationInstance(self._version, payload, service_sid=self._solution['service_sid'], ) | Create a new VerificationInstance
:param unicode to: To phonenumber
:param unicode channel: sms or call
:param unicode custom_message: A custom message for this verification
:returns: Newly created VerificationInstance
:rtype: twilio.rest.preview.acc_security.service.verification.VerificationInstance | Below is the the instruction that describes the task:
### Input:
Create a new VerificationInstance
:param unicode to: To phonenumber
:param unicode channel: sms or call
:param unicode custom_message: A custom message for this verification
:returns: Newly created VerificationInstance
:rtype: twilio.rest.preview.acc_security.service.verification.VerificationInstance
### Response:
def create(self, to, channel, custom_message=values.unset):
"""
Create a new VerificationInstance
:param unicode to: To phonenumber
:param unicode channel: sms or call
:param unicode custom_message: A custom message for this verification
:returns: Newly created VerificationInstance
:rtype: twilio.rest.preview.acc_security.service.verification.VerificationInstance
"""
data = values.of({'To': to, 'Channel': channel, 'CustomMessage': custom_message, })
payload = self._version.create(
'POST',
self._uri,
data=data,
)
return VerificationInstance(self._version, payload, service_sid=self._solution['service_sid'], ) |
def set_default_viewport(self):
"""
Calculates the viewport based on the configured aspect ratio.
Will add black borders and center the viewport if the window
do not match the configured viewport.
If aspect ratio is None the viewport will be scaled
to the entire window size regardless of size.
"""
if self.aspect_ratio:
expected_width = int(self.buffer_height * self.aspect_ratio)
expected_height = int(expected_width / self.aspect_ratio)
if expected_width > self.buffer_width:
expected_width = self.buffer_width
expected_height = int(expected_width / self.aspect_ratio)
blank_space_x = self.buffer_width - expected_width
blank_space_y = self.buffer_height - expected_height
self.ctx.viewport = (
blank_space_x // 2,
blank_space_y // 2,
expected_width,
expected_height,
)
else:
self.ctx.viewport = (0, 0, self.buffer_width, self.buffer_height) | Calculates the viewport based on the configured aspect ratio.
Will add black borders and center the viewport if the window
do not match the configured viewport.
If aspect ratio is None the viewport will be scaled
to the entire window size regardless of size. | Below is the the instruction that describes the task:
### Input:
Calculates the viewport based on the configured aspect ratio.
Will add black borders and center the viewport if the window
do not match the configured viewport.
If aspect ratio is None the viewport will be scaled
to the entire window size regardless of size.
### Response:
def set_default_viewport(self):
"""
Calculates the viewport based on the configured aspect ratio.
Will add black borders and center the viewport if the window
do not match the configured viewport.
If aspect ratio is None the viewport will be scaled
to the entire window size regardless of size.
"""
if self.aspect_ratio:
expected_width = int(self.buffer_height * self.aspect_ratio)
expected_height = int(expected_width / self.aspect_ratio)
if expected_width > self.buffer_width:
expected_width = self.buffer_width
expected_height = int(expected_width / self.aspect_ratio)
blank_space_x = self.buffer_width - expected_width
blank_space_y = self.buffer_height - expected_height
self.ctx.viewport = (
blank_space_x // 2,
blank_space_y // 2,
expected_width,
expected_height,
)
else:
self.ctx.viewport = (0, 0, self.buffer_width, self.buffer_height) |
def get_measurements(
self, measurement='Weight', lower_bound=None, upper_bound=None
):
""" Returns measurements of a given name between two dates."""
if upper_bound is None:
upper_bound = datetime.date.today()
if lower_bound is None:
lower_bound = upper_bound - datetime.timedelta(days=30)
# If they entered the dates in the opposite order, let's
# just flip them around for them as a convenience
if lower_bound > upper_bound:
lower_bound, upper_bound = upper_bound, lower_bound
# get the URL for the main check in page
document = self._get_document_for_url(
self._get_url_for_measurements()
)
# gather the IDs for all measurement types
measurement_ids = self._get_measurement_ids(document)
# select the measurement ID based on the input
if measurement in measurement_ids.keys():
measurement_id = measurement_ids[measurement]
else:
raise ValueError(
"Measurement '%s' does not exist." % measurement
)
page = 1
measurements = OrderedDict()
# retrieve entries until finished
while True:
# retrieve the HTML from MyFitnessPal
document = self._get_document_for_url(
self._get_url_for_measurements(page, measurement_id)
)
# parse the HTML for measurement entries and add to dictionary
results = self._get_measurements(document)
measurements.update(results)
# stop if there are no more entries
if len(results) == 0:
break
# continue if the lower bound has not been reached
elif list(results.keys())[-1] > lower_bound:
page += 1
continue
# otherwise stop
else:
break
# remove entries that are not within the dates specified
for date in list(measurements.keys()):
if not upper_bound >= date >= lower_bound:
del measurements[date]
return measurements | Returns measurements of a given name between two dates. | Below is the the instruction that describes the task:
### Input:
Returns measurements of a given name between two dates.
### Response:
def get_measurements(
self, measurement='Weight', lower_bound=None, upper_bound=None
):
""" Returns measurements of a given name between two dates."""
if upper_bound is None:
upper_bound = datetime.date.today()
if lower_bound is None:
lower_bound = upper_bound - datetime.timedelta(days=30)
# If they entered the dates in the opposite order, let's
# just flip them around for them as a convenience
if lower_bound > upper_bound:
lower_bound, upper_bound = upper_bound, lower_bound
# get the URL for the main check in page
document = self._get_document_for_url(
self._get_url_for_measurements()
)
# gather the IDs for all measurement types
measurement_ids = self._get_measurement_ids(document)
# select the measurement ID based on the input
if measurement in measurement_ids.keys():
measurement_id = measurement_ids[measurement]
else:
raise ValueError(
"Measurement '%s' does not exist." % measurement
)
page = 1
measurements = OrderedDict()
# retrieve entries until finished
while True:
# retrieve the HTML from MyFitnessPal
document = self._get_document_for_url(
self._get_url_for_measurements(page, measurement_id)
)
# parse the HTML for measurement entries and add to dictionary
results = self._get_measurements(document)
measurements.update(results)
# stop if there are no more entries
if len(results) == 0:
break
# continue if the lower bound has not been reached
elif list(results.keys())[-1] > lower_bound:
page += 1
continue
# otherwise stop
else:
break
# remove entries that are not within the dates specified
for date in list(measurements.keys()):
if not upper_bound >= date >= lower_bound:
del measurements[date]
return measurements |
async def delete_reply_markup(self):
"""
Use this method to delete reply markup of messages sent by the bot or via the bot (for inline bots).
:return: On success, if edited message is sent by the bot, the edited Message is returned,
otherwise True is returned.
:rtype: :obj:`typing.Union[types.Message, base.Boolean]`
"""
return await self.bot.edit_message_reply_markup(chat_id=self.chat.id, message_id=self.message_id) | Use this method to delete reply markup of messages sent by the bot or via the bot (for inline bots).
:return: On success, if edited message is sent by the bot, the edited Message is returned,
otherwise True is returned.
:rtype: :obj:`typing.Union[types.Message, base.Boolean]` | Below is the the instruction that describes the task:
### Input:
Use this method to delete reply markup of messages sent by the bot or via the bot (for inline bots).
:return: On success, if edited message is sent by the bot, the edited Message is returned,
otherwise True is returned.
:rtype: :obj:`typing.Union[types.Message, base.Boolean]`
### Response:
async def delete_reply_markup(self):
"""
Use this method to delete reply markup of messages sent by the bot or via the bot (for inline bots).
:return: On success, if edited message is sent by the bot, the edited Message is returned,
otherwise True is returned.
:rtype: :obj:`typing.Union[types.Message, base.Boolean]`
"""
return await self.bot.edit_message_reply_markup(chat_id=self.chat.id, message_id=self.message_id) |
def isnumber(self, string, *args):
"""Is number
args:
string (str): match
returns:
bool
"""
try:
n, u = utility.analyze_number(string)
except SyntaxError:
return False
return True | Is number
args:
string (str): match
returns:
bool | Below is the the instruction that describes the task:
### Input:
Is number
args:
string (str): match
returns:
bool
### Response:
def isnumber(self, string, *args):
"""Is number
args:
string (str): match
returns:
bool
"""
try:
n, u = utility.analyze_number(string)
except SyntaxError:
return False
return True |
def MatrixSolveLs(a, rhs, l2_reg):
"""
Matrix least-squares solve op.
"""
r = np.empty(rhs.shape).astype(a.dtype)
for coord in np.ndindex(a.shape[:-2]):
pos = coord + (Ellipsis,)
r[pos] = np.linalg.lstsq(a[pos], rhs[pos])[0]
return r, | Matrix least-squares solve op. | Below is the the instruction that describes the task:
### Input:
Matrix least-squares solve op.
### Response:
def MatrixSolveLs(a, rhs, l2_reg):
"""
Matrix least-squares solve op.
"""
r = np.empty(rhs.shape).astype(a.dtype)
for coord in np.ndindex(a.shape[:-2]):
pos = coord + (Ellipsis,)
r[pos] = np.linalg.lstsq(a[pos], rhs[pos])[0]
return r, |
def load_shellcode(shellcode, arch, start_offset=0, load_address=0):
"""
Load a new project based on a string of raw bytecode.
:param shellcode: The data to load
:param arch: The name of the arch to use, or an archinfo class
:param start_offset: The offset into the data to start analysis (default 0)
:param load_address: The address to place the data in memory (default 0)
"""
return Project(
BytesIO(shellcode),
main_opts={
'backend': 'blob',
'arch': arch,
'entry_point': start_offset,
'base_addr': load_address,
}
) | Load a new project based on a string of raw bytecode.
:param shellcode: The data to load
:param arch: The name of the arch to use, or an archinfo class
:param start_offset: The offset into the data to start analysis (default 0)
:param load_address: The address to place the data in memory (default 0) | Below is the the instruction that describes the task:
### Input:
Load a new project based on a string of raw bytecode.
:param shellcode: The data to load
:param arch: The name of the arch to use, or an archinfo class
:param start_offset: The offset into the data to start analysis (default 0)
:param load_address: The address to place the data in memory (default 0)
### Response:
def load_shellcode(shellcode, arch, start_offset=0, load_address=0):
"""
Load a new project based on a string of raw bytecode.
:param shellcode: The data to load
:param arch: The name of the arch to use, or an archinfo class
:param start_offset: The offset into the data to start analysis (default 0)
:param load_address: The address to place the data in memory (default 0)
"""
return Project(
BytesIO(shellcode),
main_opts={
'backend': 'blob',
'arch': arch,
'entry_point': start_offset,
'base_addr': load_address,
}
) |
def start(self):
"""
Creates a SSL connection to the iDigi Server and sends a
ConnectionRequest message.
"""
self.log.info("Starting SSL Session for Monitor %s."
% self.monitor_id)
if self.socket is not None:
raise Exception("Socket already established for %s." % self)
try:
# Create socket, wrap in SSL and connect.
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Validate that certificate server uses matches what we expect.
if self.ca_certs is not None:
self.socket = ssl.wrap_socket(self.socket,
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=self.ca_certs)
else:
self.socket = ssl.wrap_socket(self.socket)
self.socket.connect((self.client.hostname, PUSH_SECURE_PORT))
self.socket.setblocking(0)
except Exception as exception:
self.socket.close()
self.socket = None
raise exception
self.send_connection_request() | Creates a SSL connection to the iDigi Server and sends a
ConnectionRequest message. | Below is the the instruction that describes the task:
### Input:
Creates a SSL connection to the iDigi Server and sends a
ConnectionRequest message.
### Response:
def start(self):
"""
Creates a SSL connection to the iDigi Server and sends a
ConnectionRequest message.
"""
self.log.info("Starting SSL Session for Monitor %s."
% self.monitor_id)
if self.socket is not None:
raise Exception("Socket already established for %s." % self)
try:
# Create socket, wrap in SSL and connect.
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Validate that certificate server uses matches what we expect.
if self.ca_certs is not None:
self.socket = ssl.wrap_socket(self.socket,
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=self.ca_certs)
else:
self.socket = ssl.wrap_socket(self.socket)
self.socket.connect((self.client.hostname, PUSH_SECURE_PORT))
self.socket.setblocking(0)
except Exception as exception:
self.socket.close()
self.socket = None
raise exception
self.send_connection_request() |
def filter_t(func):
"""
Transformation for Sequence.filter
:param func: filter function
:return: transformation
"""
return Transformation('filter({0})'.format(name(func)),
partial(filter, func),
{ExecutionStrategies.PARALLEL}) | Transformation for Sequence.filter
:param func: filter function
:return: transformation | Below is the the instruction that describes the task:
### Input:
Transformation for Sequence.filter
:param func: filter function
:return: transformation
### Response:
def filter_t(func):
"""
Transformation for Sequence.filter
:param func: filter function
:return: transformation
"""
return Transformation('filter({0})'.format(name(func)),
partial(filter, func),
{ExecutionStrategies.PARALLEL}) |
def Client(api_version, *args, **kwargs):
"""Return an neutron client.
@param api_version: only 2.0 is supported now
"""
neutron_client = utils.get_client_class(
API_NAME,
api_version,
API_VERSIONS,
)
return neutron_client(*args, **kwargs) | Return an neutron client.
@param api_version: only 2.0 is supported now | Below is the the instruction that describes the task:
### Input:
Return an neutron client.
@param api_version: only 2.0 is supported now
### Response:
def Client(api_version, *args, **kwargs):
"""Return an neutron client.
@param api_version: only 2.0 is supported now
"""
neutron_client = utils.get_client_class(
API_NAME,
api_version,
API_VERSIONS,
)
return neutron_client(*args, **kwargs) |
def get_colour(index):
""" get color number index. """
colours = [
'red', 'blue', 'green', 'pink',
'yellow', 'magenta', 'orange', 'cyan',
]
default_colour = 'purple'
if index < len(colours):
return colours[index]
else:
return default_colour | get color number index. | Below is the the instruction that describes the task:
### Input:
get color number index.
### Response:
def get_colour(index):
""" get color number index. """
colours = [
'red', 'blue', 'green', 'pink',
'yellow', 'magenta', 'orange', 'cyan',
]
default_colour = 'purple'
if index < len(colours):
return colours[index]
else:
return default_colour |
def t_BOOL(self, t):
r'true|false'
t.value = True if t.value == 'true' else False
return t | r'true|false | Below is the the instruction that describes the task:
### Input:
r'true|false
### Response:
def t_BOOL(self, t):
r'true|false'
t.value = True if t.value == 'true' else False
return t |
def listdir(p, match='*', exclude='', listtype='file', matchfun=None):
"""
list file(or folder) for this path (NOT recursive)
:param p:
:param match:
:param exclude:
:param listtype: ('file' | 'filepath' |'dir' | 'all')
:param matchfun: match fun (default fnmatch.fnmatch) True/False = matchfun(name, pattern)
:rtype:
"""
if listtype == 'file':
gen = listfile(p)
elif listtype == 'filepath':
gen = listfilepath(p)
elif listtype == 'dir':
gen = listfolder(p)
elif listtype == 'dirpath':
gen = listfolderpath(p)
else: # list file or folder
gen = (entry.name for entry in scandir.scandir(p))
return filter_pattern(gen, match, exclude, matchfun) | list file(or folder) for this path (NOT recursive)
:param p:
:param match:
:param exclude:
:param listtype: ('file' | 'filepath' |'dir' | 'all')
:param matchfun: match fun (default fnmatch.fnmatch) True/False = matchfun(name, pattern)
:rtype: | Below is the the instruction that describes the task:
### Input:
list file(or folder) for this path (NOT recursive)
:param p:
:param match:
:param exclude:
:param listtype: ('file' | 'filepath' |'dir' | 'all')
:param matchfun: match fun (default fnmatch.fnmatch) True/False = matchfun(name, pattern)
:rtype:
### Response:
def listdir(p, match='*', exclude='', listtype='file', matchfun=None):
"""
list file(or folder) for this path (NOT recursive)
:param p:
:param match:
:param exclude:
:param listtype: ('file' | 'filepath' |'dir' | 'all')
:param matchfun: match fun (default fnmatch.fnmatch) True/False = matchfun(name, pattern)
:rtype:
"""
if listtype == 'file':
gen = listfile(p)
elif listtype == 'filepath':
gen = listfilepath(p)
elif listtype == 'dir':
gen = listfolder(p)
elif listtype == 'dirpath':
gen = listfolderpath(p)
else: # list file or folder
gen = (entry.name for entry in scandir.scandir(p))
return filter_pattern(gen, match, exclude, matchfun) |
def get_listing(self):
'''
This function gets all of the information that we need to either render or
validate the form. It is structured to avoid duplicate DB queries
'''
if not hasattr(self,'listing'):
allEvents = self.get_allEvents()
openEvents = allEvents.filter(registrationOpen=True)
closedEvents = allEvents.filter(registrationOpen=False)
publicEvents = allEvents.instance_of(PublicEvent)
allSeries = allEvents.instance_of(Series)
self.listing = {
'allEvents': allEvents,
'openEvents': openEvents,
'closedEvents': closedEvents,
'publicEvents': publicEvents,
'allSeries': allSeries,
'regOpenEvents': publicEvents.filter(registrationOpen=True).filter(
Q(publicevent__category__isnull=True) | Q(publicevent__category__separateOnRegistrationPage=False)
),
'regClosedEvents': publicEvents.filter(registrationOpen=False).filter(
Q(publicevent__category__isnull=True) | Q(publicevent__category__separateOnRegistrationPage=False)
),
'categorySeparateEvents': publicEvents.filter(
publicevent__category__separateOnRegistrationPage=True
).order_by('publicevent__category'),
'regOpenSeries': allSeries.filter(registrationOpen=True).filter(
Q(series__category__isnull=True) | Q(series__category__separateOnRegistrationPage=False)
),
'regClosedSeries': allSeries.filter(registrationOpen=False).filter(
Q(series__category__isnull=True) | Q(series__category__separateOnRegistrationPage=False)
),
'categorySeparateSeries': allSeries.filter(
series__category__separateOnRegistrationPage=True
).order_by('series__category'),
}
return self.listing | This function gets all of the information that we need to either render or
validate the form. It is structured to avoid duplicate DB queries | Below is the the instruction that describes the task:
### Input:
This function gets all of the information that we need to either render or
validate the form. It is structured to avoid duplicate DB queries
### Response:
def get_listing(self):
'''
This function gets all of the information that we need to either render or
validate the form. It is structured to avoid duplicate DB queries
'''
if not hasattr(self,'listing'):
allEvents = self.get_allEvents()
openEvents = allEvents.filter(registrationOpen=True)
closedEvents = allEvents.filter(registrationOpen=False)
publicEvents = allEvents.instance_of(PublicEvent)
allSeries = allEvents.instance_of(Series)
self.listing = {
'allEvents': allEvents,
'openEvents': openEvents,
'closedEvents': closedEvents,
'publicEvents': publicEvents,
'allSeries': allSeries,
'regOpenEvents': publicEvents.filter(registrationOpen=True).filter(
Q(publicevent__category__isnull=True) | Q(publicevent__category__separateOnRegistrationPage=False)
),
'regClosedEvents': publicEvents.filter(registrationOpen=False).filter(
Q(publicevent__category__isnull=True) | Q(publicevent__category__separateOnRegistrationPage=False)
),
'categorySeparateEvents': publicEvents.filter(
publicevent__category__separateOnRegistrationPage=True
).order_by('publicevent__category'),
'regOpenSeries': allSeries.filter(registrationOpen=True).filter(
Q(series__category__isnull=True) | Q(series__category__separateOnRegistrationPage=False)
),
'regClosedSeries': allSeries.filter(registrationOpen=False).filter(
Q(series__category__isnull=True) | Q(series__category__separateOnRegistrationPage=False)
),
'categorySeparateSeries': allSeries.filter(
series__category__separateOnRegistrationPage=True
).order_by('series__category'),
}
return self.listing |
def nextCmd(snmpEngine, authData, transportTarget, contextData,
*varBinds, **options):
"""Performs SNMP GETNEXT query.
Based on passed parameters, prepares SNMP GETNEXT packet
(:RFC:`1905#section-4.2.2`) and schedules its transmission by
:mod:`twisted` I/O framework at a later point of time.
Parameters
----------
snmpEngine : :class:`~pysnmp.hlapi.SnmpEngine`
Class instance representing SNMP engine.
authData : :class:`~pysnmp.hlapi.CommunityData` or :class:`~pysnmp.hlapi.UsmUserData`
Class instance representing SNMP credentials.
transportTarget : :class:`~pysnmp.hlapi.twisted.UdpTransportTarget` or :class:`~pysnmp.hlapi.twisted.Udp6TransportTarget`
Class instance representing transport type along with SNMP peer address.
contextData : :class:`~pysnmp.hlapi.ContextData`
Class instance representing SNMP ContextEngineId and ContextName values.
\*varBinds : :class:`~pysnmp.smi.rfc1902.ObjectType`
One or more class instances representing MIB variables to place
into SNMP request.
Other Parameters
----------------
\*\*options :
Request options:
* `lookupMib` - load MIB and resolve response MIB variables at
the cost of slightly reduced performance. Default is `True`.
* `ignoreNonIncreasingOid` - continue iteration even if response
MIB variables (OIDs) are not greater then request MIB variables.
Be aware that setting it to `True` may cause infinite loop between
SNMP management and agent applications. Default is `False`.
Returns
-------
deferred : :class:`~twisted.internet.defer.Deferred`
Twisted Deferred object representing work-in-progress. User
is expected to attach his own `success` and `error` callback
functions to the Deferred object though
:meth:`~twisted.internet.defer.Deferred.addCallbacks` method.
Raises
------
PySnmpError
Or its derivative indicating that an error occurred while
performing SNMP operation.
Notes
-----
User `success` callback is called with the following tuple as
its first argument:
* errorStatus (str) : True value indicates SNMP PDU error.
* errorIndex (int) : Non-zero value refers to `varBinds[errorIndex-1]`
* varBinds (tuple) :
A sequence of sequences (e.g. 2-D array) of
:py:class:`~pysnmp.smi.rfc1902.ObjectType` class instances
representing a table of MIB variables returned in SNMP response.
Inner sequences represent table rows and ordered exactly the same
as `varBinds` in request. Response to GETNEXT always contain
a single row.
User `error` callback is called with `errorIndication` object wrapped
in :class:`~twisted.python.failure.Failure` object.
Examples
--------
>>> from twisted.internet.task import react
>>> from pysnmp.hlapi.twisted import *
>>>
>>> def success(args):
... (errorStatus, errorIndex, varBindTable) = args
... print(errorStatus, errorIndex, varBindTable)
...
>>> def failure(errorIndication):
... print(errorIndication)
...
>>> def run(reactor):
... d = nextCmd(SnmpEngine(),
... CommunityData('public'),
... UdpTransportTarget(('demo.snmplabs.com', 161)),
... ContextData(),
... ObjectType(ObjectIdentity('SNMPv2-MIB', 'system'))
... d.addCallback(success).addErrback(failure)
... return d
...
>>> react(run)
(0, 0, [[ObjectType(ObjectIdentity(ObjectName('1.3.6.1.2.1.1.1.0')), DisplayString('SunOS zeus.snmplabs.com 4.1.3_U1 1 sun4m'))]])
"""
def __cbFun(snmpEngine, sendRequestHandle,
errorIndication, errorStatus, errorIndex,
varBindTable, cbCtx):
lookupMib, deferred = cbCtx
if (options.get('ignoreNonIncreasingOid', False) and errorIndication
and isinstance(errorIndication, errind.OidNotIncreasing)):
errorIndication = None
if errorIndication:
deferred.errback(Failure(errorIndication))
else:
try:
varBindTable = [
VB_PROCESSOR.unmakeVarBinds(snmpEngine.cache,
varBindTableRow, lookupMib)
for varBindTableRow in varBindTable
]
except Exception as e:
deferred.errback(Failure(e))
else:
deferred.callback((errorStatus, errorIndex, varBindTable))
addrName, paramsName = LCD.configure(
snmpEngine, authData, transportTarget, contextData.contextName)
varBinds = VB_PROCESSOR.makeVarBinds(snmpEngine.cache, varBinds)
deferred = Deferred()
cmdgen.NextCommandGenerator().sendVarBinds(
snmpEngine, addrName, contextData.contextEngineId,
contextData.contextName, varBinds, __cbFun,
(options.get('lookupMib', True), deferred))
return deferred | Performs SNMP GETNEXT query.
Based on passed parameters, prepares SNMP GETNEXT packet
(:RFC:`1905#section-4.2.2`) and schedules its transmission by
:mod:`twisted` I/O framework at a later point of time.
Parameters
----------
snmpEngine : :class:`~pysnmp.hlapi.SnmpEngine`
Class instance representing SNMP engine.
authData : :class:`~pysnmp.hlapi.CommunityData` or :class:`~pysnmp.hlapi.UsmUserData`
Class instance representing SNMP credentials.
transportTarget : :class:`~pysnmp.hlapi.twisted.UdpTransportTarget` or :class:`~pysnmp.hlapi.twisted.Udp6TransportTarget`
Class instance representing transport type along with SNMP peer address.
contextData : :class:`~pysnmp.hlapi.ContextData`
Class instance representing SNMP ContextEngineId and ContextName values.
\*varBinds : :class:`~pysnmp.smi.rfc1902.ObjectType`
One or more class instances representing MIB variables to place
into SNMP request.
Other Parameters
----------------
\*\*options :
Request options:
* `lookupMib` - load MIB and resolve response MIB variables at
the cost of slightly reduced performance. Default is `True`.
* `ignoreNonIncreasingOid` - continue iteration even if response
MIB variables (OIDs) are not greater then request MIB variables.
Be aware that setting it to `True` may cause infinite loop between
SNMP management and agent applications. Default is `False`.
Returns
-------
deferred : :class:`~twisted.internet.defer.Deferred`
Twisted Deferred object representing work-in-progress. User
is expected to attach his own `success` and `error` callback
functions to the Deferred object though
:meth:`~twisted.internet.defer.Deferred.addCallbacks` method.
Raises
------
PySnmpError
Or its derivative indicating that an error occurred while
performing SNMP operation.
Notes
-----
User `success` callback is called with the following tuple as
its first argument:
* errorStatus (str) : True value indicates SNMP PDU error.
* errorIndex (int) : Non-zero value refers to `varBinds[errorIndex-1]`
* varBinds (tuple) :
A sequence of sequences (e.g. 2-D array) of
:py:class:`~pysnmp.smi.rfc1902.ObjectType` class instances
representing a table of MIB variables returned in SNMP response.
Inner sequences represent table rows and ordered exactly the same
as `varBinds` in request. Response to GETNEXT always contain
a single row.
User `error` callback is called with `errorIndication` object wrapped
in :class:`~twisted.python.failure.Failure` object.
Examples
--------
>>> from twisted.internet.task import react
>>> from pysnmp.hlapi.twisted import *
>>>
>>> def success(args):
... (errorStatus, errorIndex, varBindTable) = args
... print(errorStatus, errorIndex, varBindTable)
...
>>> def failure(errorIndication):
... print(errorIndication)
...
>>> def run(reactor):
... d = nextCmd(SnmpEngine(),
... CommunityData('public'),
... UdpTransportTarget(('demo.snmplabs.com', 161)),
... ContextData(),
... ObjectType(ObjectIdentity('SNMPv2-MIB', 'system'))
... d.addCallback(success).addErrback(failure)
... return d
...
>>> react(run)
(0, 0, [[ObjectType(ObjectIdentity(ObjectName('1.3.6.1.2.1.1.1.0')), DisplayString('SunOS zeus.snmplabs.com 4.1.3_U1 1 sun4m'))]]) | Below is the the instruction that describes the task:
### Input:
Performs SNMP GETNEXT query.
Based on passed parameters, prepares SNMP GETNEXT packet
(:RFC:`1905#section-4.2.2`) and schedules its transmission by
:mod:`twisted` I/O framework at a later point of time.
Parameters
----------
snmpEngine : :class:`~pysnmp.hlapi.SnmpEngine`
Class instance representing SNMP engine.
authData : :class:`~pysnmp.hlapi.CommunityData` or :class:`~pysnmp.hlapi.UsmUserData`
Class instance representing SNMP credentials.
transportTarget : :class:`~pysnmp.hlapi.twisted.UdpTransportTarget` or :class:`~pysnmp.hlapi.twisted.Udp6TransportTarget`
Class instance representing transport type along with SNMP peer address.
contextData : :class:`~pysnmp.hlapi.ContextData`
Class instance representing SNMP ContextEngineId and ContextName values.
\*varBinds : :class:`~pysnmp.smi.rfc1902.ObjectType`
One or more class instances representing MIB variables to place
into SNMP request.
Other Parameters
----------------
\*\*options :
Request options:
* `lookupMib` - load MIB and resolve response MIB variables at
the cost of slightly reduced performance. Default is `True`.
* `ignoreNonIncreasingOid` - continue iteration even if response
MIB variables (OIDs) are not greater then request MIB variables.
Be aware that setting it to `True` may cause infinite loop between
SNMP management and agent applications. Default is `False`.
Returns
-------
deferred : :class:`~twisted.internet.defer.Deferred`
Twisted Deferred object representing work-in-progress. User
is expected to attach his own `success` and `error` callback
functions to the Deferred object though
:meth:`~twisted.internet.defer.Deferred.addCallbacks` method.
Raises
------
PySnmpError
Or its derivative indicating that an error occurred while
performing SNMP operation.
Notes
-----
User `success` callback is called with the following tuple as
its first argument:
* errorStatus (str) : True value indicates SNMP PDU error.
* errorIndex (int) : Non-zero value refers to `varBinds[errorIndex-1]`
* varBinds (tuple) :
A sequence of sequences (e.g. 2-D array) of
:py:class:`~pysnmp.smi.rfc1902.ObjectType` class instances
representing a table of MIB variables returned in SNMP response.
Inner sequences represent table rows and ordered exactly the same
as `varBinds` in request. Response to GETNEXT always contain
a single row.
User `error` callback is called with `errorIndication` object wrapped
in :class:`~twisted.python.failure.Failure` object.
Examples
--------
>>> from twisted.internet.task import react
>>> from pysnmp.hlapi.twisted import *
>>>
>>> def success(args):
... (errorStatus, errorIndex, varBindTable) = args
... print(errorStatus, errorIndex, varBindTable)
...
>>> def failure(errorIndication):
... print(errorIndication)
...
>>> def run(reactor):
... d = nextCmd(SnmpEngine(),
... CommunityData('public'),
... UdpTransportTarget(('demo.snmplabs.com', 161)),
... ContextData(),
... ObjectType(ObjectIdentity('SNMPv2-MIB', 'system'))
... d.addCallback(success).addErrback(failure)
... return d
...
>>> react(run)
(0, 0, [[ObjectType(ObjectIdentity(ObjectName('1.3.6.1.2.1.1.1.0')), DisplayString('SunOS zeus.snmplabs.com 4.1.3_U1 1 sun4m'))]])
### Response:
def nextCmd(snmpEngine, authData, transportTarget, contextData,
*varBinds, **options):
"""Performs SNMP GETNEXT query.
Based on passed parameters, prepares SNMP GETNEXT packet
(:RFC:`1905#section-4.2.2`) and schedules its transmission by
:mod:`twisted` I/O framework at a later point of time.
Parameters
----------
snmpEngine : :class:`~pysnmp.hlapi.SnmpEngine`
Class instance representing SNMP engine.
authData : :class:`~pysnmp.hlapi.CommunityData` or :class:`~pysnmp.hlapi.UsmUserData`
Class instance representing SNMP credentials.
transportTarget : :class:`~pysnmp.hlapi.twisted.UdpTransportTarget` or :class:`~pysnmp.hlapi.twisted.Udp6TransportTarget`
Class instance representing transport type along with SNMP peer address.
contextData : :class:`~pysnmp.hlapi.ContextData`
Class instance representing SNMP ContextEngineId and ContextName values.
\*varBinds : :class:`~pysnmp.smi.rfc1902.ObjectType`
One or more class instances representing MIB variables to place
into SNMP request.
Other Parameters
----------------
\*\*options :
Request options:
* `lookupMib` - load MIB and resolve response MIB variables at
the cost of slightly reduced performance. Default is `True`.
* `ignoreNonIncreasingOid` - continue iteration even if response
MIB variables (OIDs) are not greater then request MIB variables.
Be aware that setting it to `True` may cause infinite loop between
SNMP management and agent applications. Default is `False`.
Returns
-------
deferred : :class:`~twisted.internet.defer.Deferred`
Twisted Deferred object representing work-in-progress. User
is expected to attach his own `success` and `error` callback
functions to the Deferred object though
:meth:`~twisted.internet.defer.Deferred.addCallbacks` method.
Raises
------
PySnmpError
Or its derivative indicating that an error occurred while
performing SNMP operation.
Notes
-----
User `success` callback is called with the following tuple as
its first argument:
* errorStatus (str) : True value indicates SNMP PDU error.
* errorIndex (int) : Non-zero value refers to `varBinds[errorIndex-1]`
* varBinds (tuple) :
A sequence of sequences (e.g. 2-D array) of
:py:class:`~pysnmp.smi.rfc1902.ObjectType` class instances
representing a table of MIB variables returned in SNMP response.
Inner sequences represent table rows and ordered exactly the same
as `varBinds` in request. Response to GETNEXT always contain
a single row.
User `error` callback is called with `errorIndication` object wrapped
in :class:`~twisted.python.failure.Failure` object.
Examples
--------
>>> from twisted.internet.task import react
>>> from pysnmp.hlapi.twisted import *
>>>
>>> def success(args):
... (errorStatus, errorIndex, varBindTable) = args
... print(errorStatus, errorIndex, varBindTable)
...
>>> def failure(errorIndication):
... print(errorIndication)
...
>>> def run(reactor):
... d = nextCmd(SnmpEngine(),
... CommunityData('public'),
... UdpTransportTarget(('demo.snmplabs.com', 161)),
... ContextData(),
... ObjectType(ObjectIdentity('SNMPv2-MIB', 'system'))
... d.addCallback(success).addErrback(failure)
... return d
...
>>> react(run)
(0, 0, [[ObjectType(ObjectIdentity(ObjectName('1.3.6.1.2.1.1.1.0')), DisplayString('SunOS zeus.snmplabs.com 4.1.3_U1 1 sun4m'))]])
"""
def __cbFun(snmpEngine, sendRequestHandle,
errorIndication, errorStatus, errorIndex,
varBindTable, cbCtx):
lookupMib, deferred = cbCtx
if (options.get('ignoreNonIncreasingOid', False) and errorIndication
and isinstance(errorIndication, errind.OidNotIncreasing)):
errorIndication = None
if errorIndication:
deferred.errback(Failure(errorIndication))
else:
try:
varBindTable = [
VB_PROCESSOR.unmakeVarBinds(snmpEngine.cache,
varBindTableRow, lookupMib)
for varBindTableRow in varBindTable
]
except Exception as e:
deferred.errback(Failure(e))
else:
deferred.callback((errorStatus, errorIndex, varBindTable))
addrName, paramsName = LCD.configure(
snmpEngine, authData, transportTarget, contextData.contextName)
varBinds = VB_PROCESSOR.makeVarBinds(snmpEngine.cache, varBinds)
deferred = Deferred()
cmdgen.NextCommandGenerator().sendVarBinds(
snmpEngine, addrName, contextData.contextEngineId,
contextData.contextName, varBinds, __cbFun,
(options.get('lookupMib', True), deferred))
return deferred |
def lookup_comment_list(self):
"""Lookup list of comments for an issue.
~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
:returns: The pair (ISSUE, COMMENTS) where ISSUE is a dict for the
main issue and COMMENTS is a list of comments on the issue.
~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
PURPOSE: Do the work of getting data from github, handling paging,
and so on.
"""
if self.thread_id is None:
return None, None
# Just pulling a single issue here so pagination shouldn't be problem
my_req = self.raw_pull(self.thread_id)
if my_req.status_code != 200:
raise GitHubAngry('Bad status code %s because %s' % (
my_req.status_code, my_req.reason))
issue_json = my_req.json()
comments_url = issue_json['comments_url'] + self.url_extras
kwargs = {} if not self.user else {'auth': (self.user, self.token)}
comments_json = []
while comments_url:
logging.debug('Pulling comments URL: %s', comments_url)
c_req = requests.get(comments_url, **kwargs)
my_json = c_req.json()
assert isinstance(my_json, list)
comments_json.extend(my_json)
comments_url = None
if 'link' in c_req.headers: # need to handle pagination.
logging.debug('Paginating in lookup_comment_list')
link = c_req.headers['link'].split(',')
for thing in link:
potential_url, part = thing.split('; ')
if part == 'rel="next"':
comments_url = potential_url.lstrip(' <').rstrip('> ')
return issue_json, comments_json | Lookup list of comments for an issue.
~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
:returns: The pair (ISSUE, COMMENTS) where ISSUE is a dict for the
main issue and COMMENTS is a list of comments on the issue.
~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
PURPOSE: Do the work of getting data from github, handling paging,
and so on. | Below is the the instruction that describes the task:
### Input:
Lookup list of comments for an issue.
~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
:returns: The pair (ISSUE, COMMENTS) where ISSUE is a dict for the
main issue and COMMENTS is a list of comments on the issue.
~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
PURPOSE: Do the work of getting data from github, handling paging,
and so on.
### Response:
def lookup_comment_list(self):
"""Lookup list of comments for an issue.
~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
:returns: The pair (ISSUE, COMMENTS) where ISSUE is a dict for the
main issue and COMMENTS is a list of comments on the issue.
~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
PURPOSE: Do the work of getting data from github, handling paging,
and so on.
"""
if self.thread_id is None:
return None, None
# Just pulling a single issue here so pagination shouldn't be problem
my_req = self.raw_pull(self.thread_id)
if my_req.status_code != 200:
raise GitHubAngry('Bad status code %s because %s' % (
my_req.status_code, my_req.reason))
issue_json = my_req.json()
comments_url = issue_json['comments_url'] + self.url_extras
kwargs = {} if not self.user else {'auth': (self.user, self.token)}
comments_json = []
while comments_url:
logging.debug('Pulling comments URL: %s', comments_url)
c_req = requests.get(comments_url, **kwargs)
my_json = c_req.json()
assert isinstance(my_json, list)
comments_json.extend(my_json)
comments_url = None
if 'link' in c_req.headers: # need to handle pagination.
logging.debug('Paginating in lookup_comment_list')
link = c_req.headers['link'].split(',')
for thing in link:
potential_url, part = thing.split('; ')
if part == 'rel="next"':
comments_url = potential_url.lstrip(' <').rstrip('> ')
return issue_json, comments_json |
def resample_nn_1d(a, centers):
"""Return one-dimensional nearest-neighbor indexes based on user-specified centers.
Parameters
----------
a : array-like
1-dimensional array of numeric values from which to
extract indexes of nearest-neighbors
centers : array-like
1-dimensional array of numeric values representing a subset of values to approximate
Returns
-------
An array of indexes representing values closest to given array values
"""
ix = []
for center in centers:
index = (np.abs(a - center)).argmin()
if index not in ix:
ix.append(index)
return ix | Return one-dimensional nearest-neighbor indexes based on user-specified centers.
Parameters
----------
a : array-like
1-dimensional array of numeric values from which to
extract indexes of nearest-neighbors
centers : array-like
1-dimensional array of numeric values representing a subset of values to approximate
Returns
-------
An array of indexes representing values closest to given array values | Below is the the instruction that describes the task:
### Input:
Return one-dimensional nearest-neighbor indexes based on user-specified centers.
Parameters
----------
a : array-like
1-dimensional array of numeric values from which to
extract indexes of nearest-neighbors
centers : array-like
1-dimensional array of numeric values representing a subset of values to approximate
Returns
-------
An array of indexes representing values closest to given array values
### Response:
def resample_nn_1d(a, centers):
"""Return one-dimensional nearest-neighbor indexes based on user-specified centers.
Parameters
----------
a : array-like
1-dimensional array of numeric values from which to
extract indexes of nearest-neighbors
centers : array-like
1-dimensional array of numeric values representing a subset of values to approximate
Returns
-------
An array of indexes representing values closest to given array values
"""
ix = []
for center in centers:
index = (np.abs(a - center)).argmin()
if index not in ix:
ix.append(index)
return ix |
def now_playing(self, **kwargs):
"""
Get the list of movies playing in theatres. This list refreshes
every day. The maximum number of items this list will include is 100.
Args:
page: (optional) Minimum value of 1. Expected value is an integer.
language: (optional) ISO 639-1 code.
Returns:
A dict representation of the JSON returned from the API.
"""
path = self._get_path('now_playing')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response | Get the list of movies playing in theatres. This list refreshes
every day. The maximum number of items this list will include is 100.
Args:
page: (optional) Minimum value of 1. Expected value is an integer.
language: (optional) ISO 639-1 code.
Returns:
A dict representation of the JSON returned from the API. | Below is the the instruction that describes the task:
### Input:
Get the list of movies playing in theatres. This list refreshes
every day. The maximum number of items this list will include is 100.
Args:
page: (optional) Minimum value of 1. Expected value is an integer.
language: (optional) ISO 639-1 code.
Returns:
A dict representation of the JSON returned from the API.
### Response:
def now_playing(self, **kwargs):
"""
Get the list of movies playing in theatres. This list refreshes
every day. The maximum number of items this list will include is 100.
Args:
page: (optional) Minimum value of 1. Expected value is an integer.
language: (optional) ISO 639-1 code.
Returns:
A dict representation of the JSON returned from the API.
"""
path = self._get_path('now_playing')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response |
def read_key(suppress=False):
"""
Blocks until a keyboard event happens, then returns that event's name or,
if missing, its scan code.
"""
event = read_event(suppress)
return event.name or event.scan_code | Blocks until a keyboard event happens, then returns that event's name or,
if missing, its scan code. | Below is the the instruction that describes the task:
### Input:
Blocks until a keyboard event happens, then returns that event's name or,
if missing, its scan code.
### Response:
def read_key(suppress=False):
"""
Blocks until a keyboard event happens, then returns that event's name or,
if missing, its scan code.
"""
event = read_event(suppress)
return event.name or event.scan_code |
def backend_from_fname(name):
"""Determine backend module object from a file name."""
ext = splitext(name)[1]
try:
mime = EXTS_TO_MIMETYPES[ext]
except KeyError:
try:
f = open(name, 'rb')
except IOError as e:
# The file may not exist, we are being asked to determine it's type
# from it's name. Other errors are unexpected.
if e.errno != errno.ENOENT:
raise
# We will have to fall back upon the default backend.
msg = "No handler for %r, defaulting to %r" % (ext, DEFAULT_MIME)
if 'FULLTEXT_TESTING' in os.environ:
warn(msg)
else:
LOGGER.debug(msg)
mod_name = MIMETYPE_TO_BACKENDS[DEFAULT_MIME]
else:
with f:
return backend_from_fobj(f)
else:
mod_name = MIMETYPE_TO_BACKENDS[mime]
mod = import_mod(mod_name)
return mod | Determine backend module object from a file name. | Below is the the instruction that describes the task:
### Input:
Determine backend module object from a file name.
### Response:
def backend_from_fname(name):
"""Determine backend module object from a file name."""
ext = splitext(name)[1]
try:
mime = EXTS_TO_MIMETYPES[ext]
except KeyError:
try:
f = open(name, 'rb')
except IOError as e:
# The file may not exist, we are being asked to determine it's type
# from it's name. Other errors are unexpected.
if e.errno != errno.ENOENT:
raise
# We will have to fall back upon the default backend.
msg = "No handler for %r, defaulting to %r" % (ext, DEFAULT_MIME)
if 'FULLTEXT_TESTING' in os.environ:
warn(msg)
else:
LOGGER.debug(msg)
mod_name = MIMETYPE_TO_BACKENDS[DEFAULT_MIME]
else:
with f:
return backend_from_fobj(f)
else:
mod_name = MIMETYPE_TO_BACKENDS[mime]
mod = import_mod(mod_name)
return mod |
def get_badge_image_url(pid, ext='svg'):
"""Return the badge for a DOI."""
return url_for('invenio_formatter_badges.badge',
title=pid.pid_type, value=pid.pid_value, ext=ext) | Return the badge for a DOI. | Below is the the instruction that describes the task:
### Input:
Return the badge for a DOI.
### Response:
def get_badge_image_url(pid, ext='svg'):
"""Return the badge for a DOI."""
return url_for('invenio_formatter_badges.badge',
title=pid.pid_type, value=pid.pid_value, ext=ext) |
def design_z_filter_single_pole(filt_str, max_gain_freq):
"""
Finds the coefficients for a simple lowpass/highpass filter.
This function just prints the coefficient values, besides the given
filter equation and its power gain. There's 3 constraints used to find the
coefficients:
1. The G value is defined by the max gain of 1 (0 dB) imposed at a
specific frequency
2. The R value is defined by the 50% power cutoff frequency given in
rad/sample.
3. Filter should be stable (-1 < R < 1)
Parameters
----------
filt_str :
Filter equation as a string using the G, R, w and z values.
max_gain_freq :
A value of zero (DC) or pi (Nyquist) to ensure the max gain as 1 (0 dB).
Note
----
The R value is evaluated only at pi/4 rad/sample to find whether -1 < R < 1,
and the max gain is assumed to be either 0 or pi, using other values might
fail.
"""
print("H(z) = " + filt_str) # Avoids printing as "1/z"
filt = sympify(filt_str, dict(G=G, R=R, w=w, z=z))
print()
# Finds the power magnitude equation for the filter
freq_resp = filt.subs(z, exp(I * w))
frr, fri = freq_resp.as_real_imag()
power_resp = fcompose(expand_complex, cancel, trigsimp)(frr ** 2 + fri ** 2)
pprint(Eq(Symbol("Power"), power_resp))
print()
# Finds the G value given the max gain value of 1 at the DC or Nyquist
# frequency. As exp(I*pi) is -1 and exp(I*0) is 1, we can use freq_resp
# (without "abs") instead of power_resp.
Gsolutions = factor(solve(Eq(freq_resp.subs(w, max_gain_freq), 1), G))
assert len(Gsolutions) == 1
pprint(Eq(G, Gsolutions[0]))
print()
# Finds the unconstrained R values for a given cutoff frequency
power_resp_no_G = power_resp.subs(G, Gsolutions[0])
half_power_eq = Eq(power_resp_no_G, S.Half)
Rsolutions = solve(half_power_eq, R)
# Constraining -1 < R < 1 when w = pi/4 (although the constraint is general)
Rsolutions_stable = [el for el in Rsolutions if -1 < el.subs(w, pi/4) < 1]
assert len(Rsolutions_stable) == 1
# Constraining w to the [0;pi] range, so |sin(w)| = sin(w)
Rsolution = Rsolutions_stable[0].subs(abs(sin(w)), sin(w))
pprint(Eq(R, Rsolution))
# More information about the pole (or -pole)
print("\n ** Alternative way to write R **\n")
if has_sqrt(Rsolution):
x = Symbol("x") # A helper symbol
xval = sum(el for el in Rsolution.args if not has_sqrt(el))
pprint(Eq(x, xval))
print()
pprint(Eq(R, expand(Rsolution.subs(xval, x))))
else:
# That's also what would be found in a bilinear transform with prewarping
pprint(Eq(R, Rsolution.rewrite(tan).cancel())) # Not so nice numerically
# See whether the R denominator can be zeroed
for root in solve(fraction(Rsolution)[1], w):
if 0 <= root <= pi:
power_resp_r = fcompose(expand, cancel)(power_resp_no_G.subs(w, root))
Rsolutions_r = solve(Eq(power_resp_r, S.Half), R)
assert len(Rsolutions_r) == 1
print("\nDenominator is zero for this value of " + pretty(w))
pprint(Eq(w, root))
pprint(Eq(R, Rsolutions_r[0])) | Finds the coefficients for a simple lowpass/highpass filter.
This function just prints the coefficient values, besides the given
filter equation and its power gain. There's 3 constraints used to find the
coefficients:
1. The G value is defined by the max gain of 1 (0 dB) imposed at a
specific frequency
2. The R value is defined by the 50% power cutoff frequency given in
rad/sample.
3. Filter should be stable (-1 < R < 1)
Parameters
----------
filt_str :
Filter equation as a string using the G, R, w and z values.
max_gain_freq :
A value of zero (DC) or pi (Nyquist) to ensure the max gain as 1 (0 dB).
Note
----
The R value is evaluated only at pi/4 rad/sample to find whether -1 < R < 1,
and the max gain is assumed to be either 0 or pi, using other values might
fail. | Below is the the instruction that describes the task:
### Input:
Finds the coefficients for a simple lowpass/highpass filter.
This function just prints the coefficient values, besides the given
filter equation and its power gain. There's 3 constraints used to find the
coefficients:
1. The G value is defined by the max gain of 1 (0 dB) imposed at a
specific frequency
2. The R value is defined by the 50% power cutoff frequency given in
rad/sample.
3. Filter should be stable (-1 < R < 1)
Parameters
----------
filt_str :
Filter equation as a string using the G, R, w and z values.
max_gain_freq :
A value of zero (DC) or pi (Nyquist) to ensure the max gain as 1 (0 dB).
Note
----
The R value is evaluated only at pi/4 rad/sample to find whether -1 < R < 1,
and the max gain is assumed to be either 0 or pi, using other values might
fail.
### Response:
def design_z_filter_single_pole(filt_str, max_gain_freq):
"""
Finds the coefficients for a simple lowpass/highpass filter.
This function just prints the coefficient values, besides the given
filter equation and its power gain. There's 3 constraints used to find the
coefficients:
1. The G value is defined by the max gain of 1 (0 dB) imposed at a
specific frequency
2. The R value is defined by the 50% power cutoff frequency given in
rad/sample.
3. Filter should be stable (-1 < R < 1)
Parameters
----------
filt_str :
Filter equation as a string using the G, R, w and z values.
max_gain_freq :
A value of zero (DC) or pi (Nyquist) to ensure the max gain as 1 (0 dB).
Note
----
The R value is evaluated only at pi/4 rad/sample to find whether -1 < R < 1,
and the max gain is assumed to be either 0 or pi, using other values might
fail.
"""
print("H(z) = " + filt_str) # Avoids printing as "1/z"
filt = sympify(filt_str, dict(G=G, R=R, w=w, z=z))
print()
# Finds the power magnitude equation for the filter
freq_resp = filt.subs(z, exp(I * w))
frr, fri = freq_resp.as_real_imag()
power_resp = fcompose(expand_complex, cancel, trigsimp)(frr ** 2 + fri ** 2)
pprint(Eq(Symbol("Power"), power_resp))
print()
# Finds the G value given the max gain value of 1 at the DC or Nyquist
# frequency. As exp(I*pi) is -1 and exp(I*0) is 1, we can use freq_resp
# (without "abs") instead of power_resp.
Gsolutions = factor(solve(Eq(freq_resp.subs(w, max_gain_freq), 1), G))
assert len(Gsolutions) == 1
pprint(Eq(G, Gsolutions[0]))
print()
# Finds the unconstrained R values for a given cutoff frequency
power_resp_no_G = power_resp.subs(G, Gsolutions[0])
half_power_eq = Eq(power_resp_no_G, S.Half)
Rsolutions = solve(half_power_eq, R)
# Constraining -1 < R < 1 when w = pi/4 (although the constraint is general)
Rsolutions_stable = [el for el in Rsolutions if -1 < el.subs(w, pi/4) < 1]
assert len(Rsolutions_stable) == 1
# Constraining w to the [0;pi] range, so |sin(w)| = sin(w)
Rsolution = Rsolutions_stable[0].subs(abs(sin(w)), sin(w))
pprint(Eq(R, Rsolution))
# More information about the pole (or -pole)
print("\n ** Alternative way to write R **\n")
if has_sqrt(Rsolution):
x = Symbol("x") # A helper symbol
xval = sum(el for el in Rsolution.args if not has_sqrt(el))
pprint(Eq(x, xval))
print()
pprint(Eq(R, expand(Rsolution.subs(xval, x))))
else:
# That's also what would be found in a bilinear transform with prewarping
pprint(Eq(R, Rsolution.rewrite(tan).cancel())) # Not so nice numerically
# See whether the R denominator can be zeroed
for root in solve(fraction(Rsolution)[1], w):
if 0 <= root <= pi:
power_resp_r = fcompose(expand, cancel)(power_resp_no_G.subs(w, root))
Rsolutions_r = solve(Eq(power_resp_r, S.Half), R)
assert len(Rsolutions_r) == 1
print("\nDenominator is zero for this value of " + pretty(w))
pprint(Eq(w, root))
pprint(Eq(R, Rsolutions_r[0])) |
def multipath_flush(device):
'''
Device-Mapper Multipath flush
CLI Example:
.. code-block:: bash
salt '*' devmap.multipath_flush mpath1
'''
if not os.path.exists(device):
return '{0} does not exist'.format(device)
cmd = 'multipath -f {0}'.format(device)
return __salt__['cmd.run'](cmd).splitlines() | Device-Mapper Multipath flush
CLI Example:
.. code-block:: bash
salt '*' devmap.multipath_flush mpath1 | Below is the the instruction that describes the task:
### Input:
Device-Mapper Multipath flush
CLI Example:
.. code-block:: bash
salt '*' devmap.multipath_flush mpath1
### Response:
def multipath_flush(device):
'''
Device-Mapper Multipath flush
CLI Example:
.. code-block:: bash
salt '*' devmap.multipath_flush mpath1
'''
if not os.path.exists(device):
return '{0} does not exist'.format(device)
cmd = 'multipath -f {0}'.format(device)
return __salt__['cmd.run'](cmd).splitlines() |
def inflate(self):
"""Load the resource from the server, if not already loaded."""
if not self._is_inflated:
if self._is_inflating:
# catch infinite recursion when attempting to inflate
# an object that doesn't have enough data to inflate
msg = ("There is not enough data to inflate this object. "
"Need either an href: {} or a {}: {}")
msg = msg.format(self._href, self.primary_key, self._data.get(self.primary_key))
raise exceptions.ClientError(msg)
self._is_inflating = True
try:
params = self.searchParameters if hasattr(self, 'searchParameters') else {}
# To keep the method same as the original request. The default is GET
self.load(self.client.request(self.method, self.url, **params))
except Exception:
self.load(self._data)
self._is_inflated = True
self._is_inflating = False
return self | Load the resource from the server, if not already loaded. | Below is the the instruction that describes the task:
### Input:
Load the resource from the server, if not already loaded.
### Response:
def inflate(self):
"""Load the resource from the server, if not already loaded."""
if not self._is_inflated:
if self._is_inflating:
# catch infinite recursion when attempting to inflate
# an object that doesn't have enough data to inflate
msg = ("There is not enough data to inflate this object. "
"Need either an href: {} or a {}: {}")
msg = msg.format(self._href, self.primary_key, self._data.get(self.primary_key))
raise exceptions.ClientError(msg)
self._is_inflating = True
try:
params = self.searchParameters if hasattr(self, 'searchParameters') else {}
# To keep the method same as the original request. The default is GET
self.load(self.client.request(self.method, self.url, **params))
except Exception:
self.load(self._data)
self._is_inflated = True
self._is_inflating = False
return self |
def trend(self, order=LINEAR):
'''Override Series.trend() to return a TimeSeries instance.'''
coefficients = self.trend_coefficients(order)
x = self.timestamps
trend_y = LazyImport.numpy().polyval(coefficients, x)
return TimeSeries(zip(x, trend_y)) | Override Series.trend() to return a TimeSeries instance. | Below is the the instruction that describes the task:
### Input:
Override Series.trend() to return a TimeSeries instance.
### Response:
def trend(self, order=LINEAR):
'''Override Series.trend() to return a TimeSeries instance.'''
coefficients = self.trend_coefficients(order)
x = self.timestamps
trend_y = LazyImport.numpy().polyval(coefficients, x)
return TimeSeries(zip(x, trend_y)) |
def handle_request(self, filter_name, path):
"""
Handle image request
:param filter_name: filter_name
:param path: image_path
:return:
"""
if filter_name in self._filter_sets:
if self._filter_sets[filter_name]['cached']:
cached_item_path = self._adapter.check_cached_item('%s/%s' % (filter_name, path))
if cached_item_path:
return redirect(cached_item_path, self._redirect_code)
resource = self._adapter.get_item(path)
if resource:
for filter_item in self._filter_sets[filter_name]['filters']:
resource = filter_item.apply(resource)
if self._filter_sets[filter_name]['cached']:
return redirect(
self._adapter.create_cached_item('%s/%s' % (filter_name, path), resource),
self._redirect_code
)
else:
output = BytesIO()
resource.save(output, format=str(resource.format))
return output.getvalue()
else:
LOGGER.warning('File "%s" not found.' % path)
abort(404)
else:
LOGGER.warning('Filter "%s" not found.' % filter_name)
abort(404) | Handle image request
:param filter_name: filter_name
:param path: image_path
:return: | Below is the the instruction that describes the task:
### Input:
Handle image request
:param filter_name: filter_name
:param path: image_path
:return:
### Response:
def handle_request(self, filter_name, path):
"""
Handle image request
:param filter_name: filter_name
:param path: image_path
:return:
"""
if filter_name in self._filter_sets:
if self._filter_sets[filter_name]['cached']:
cached_item_path = self._adapter.check_cached_item('%s/%s' % (filter_name, path))
if cached_item_path:
return redirect(cached_item_path, self._redirect_code)
resource = self._adapter.get_item(path)
if resource:
for filter_item in self._filter_sets[filter_name]['filters']:
resource = filter_item.apply(resource)
if self._filter_sets[filter_name]['cached']:
return redirect(
self._adapter.create_cached_item('%s/%s' % (filter_name, path), resource),
self._redirect_code
)
else:
output = BytesIO()
resource.save(output, format=str(resource.format))
return output.getvalue()
else:
LOGGER.warning('File "%s" not found.' % path)
abort(404)
else:
LOGGER.warning('Filter "%s" not found.' % filter_name)
abort(404) |
def run(self):
"""
Run the given command and yield each line(s) one by one.
.. note::
The difference between this method and :code:`self.execute()`
is that :code:`self.execute()` wait for the process to end
in order to return its output.
"""
with Popen(self.command, stdout=PIPE, shell=True) as process:
# We initiate a process and parse the command to it.
while True:
# We loop infinitly because we want to get the output
# until there is none.
# We get the current line from the process stdout.
#
# Note: we use rstrip() because we are paranoid :-)
current_line = process.stdout.readline().rstrip()
if not current_line:
# The current line is empty or equal to None.
# We break the loop.
break
# The line is not empty nor equal to None.
# We encode and yield the current line
yield self._decode_output(current_line) | Run the given command and yield each line(s) one by one.
.. note::
The difference between this method and :code:`self.execute()`
is that :code:`self.execute()` wait for the process to end
in order to return its output. | Below is the the instruction that describes the task:
### Input:
Run the given command and yield each line(s) one by one.
.. note::
The difference between this method and :code:`self.execute()`
is that :code:`self.execute()` wait for the process to end
in order to return its output.
### Response:
def run(self):
"""
Run the given command and yield each line(s) one by one.
.. note::
The difference between this method and :code:`self.execute()`
is that :code:`self.execute()` wait for the process to end
in order to return its output.
"""
with Popen(self.command, stdout=PIPE, shell=True) as process:
# We initiate a process and parse the command to it.
while True:
# We loop infinitly because we want to get the output
# until there is none.
# We get the current line from the process stdout.
#
# Note: we use rstrip() because we are paranoid :-)
current_line = process.stdout.readline().rstrip()
if not current_line:
# The current line is empty or equal to None.
# We break the loop.
break
# The line is not empty nor equal to None.
# We encode and yield the current line
yield self._decode_output(current_line) |
def _locate_file(f, base_dir):
"""
Utility method for finding full path to a filename as string
"""
if base_dir == None:
return f
file_name = os.path.join(base_dir, f)
real = os.path.realpath(file_name)
#print_v('- Located %s at %s'%(f,real))
return real | Utility method for finding full path to a filename as string | Below is the the instruction that describes the task:
### Input:
Utility method for finding full path to a filename as string
### Response:
def _locate_file(f, base_dir):
"""
Utility method for finding full path to a filename as string
"""
if base_dir == None:
return f
file_name = os.path.join(base_dir, f)
real = os.path.realpath(file_name)
#print_v('- Located %s at %s'%(f,real))
return real |
def table(self, name, database=None, schema=None):
"""Create a table expression that references a particular a table
called `name` in a MySQL database called `database`.
Parameters
----------
name : str
The name of the table to retrieve.
database : str, optional
The database in which the table referred to by `name` resides. If
``None`` then the ``current_database`` is used.
schema : str, optional
The schema in which the table resides. If ``None`` then the
`public` schema is assumed.
Returns
-------
table : TableExpr
A table expression.
"""
if database is not None and database != self.current_database:
return self.database(name=database).table(name=name, schema=schema)
else:
alch_table = self._get_sqla_table(name, schema=schema)
node = self.table_class(alch_table, self, self._schemas.get(name))
return self.table_expr_class(node) | Create a table expression that references a particular a table
called `name` in a MySQL database called `database`.
Parameters
----------
name : str
The name of the table to retrieve.
database : str, optional
The database in which the table referred to by `name` resides. If
``None`` then the ``current_database`` is used.
schema : str, optional
The schema in which the table resides. If ``None`` then the
`public` schema is assumed.
Returns
-------
table : TableExpr
A table expression. | Below is the the instruction that describes the task:
### Input:
Create a table expression that references a particular a table
called `name` in a MySQL database called `database`.
Parameters
----------
name : str
The name of the table to retrieve.
database : str, optional
The database in which the table referred to by `name` resides. If
``None`` then the ``current_database`` is used.
schema : str, optional
The schema in which the table resides. If ``None`` then the
`public` schema is assumed.
Returns
-------
table : TableExpr
A table expression.
### Response:
def table(self, name, database=None, schema=None):
"""Create a table expression that references a particular a table
called `name` in a MySQL database called `database`.
Parameters
----------
name : str
The name of the table to retrieve.
database : str, optional
The database in which the table referred to by `name` resides. If
``None`` then the ``current_database`` is used.
schema : str, optional
The schema in which the table resides. If ``None`` then the
`public` schema is assumed.
Returns
-------
table : TableExpr
A table expression.
"""
if database is not None and database != self.current_database:
return self.database(name=database).table(name=name, schema=schema)
else:
alch_table = self._get_sqla_table(name, schema=schema)
node = self.table_class(alch_table, self, self._schemas.get(name))
return self.table_expr_class(node) |
def draw(self):
"""
Renders the class balance chart on the specified axes from support.
"""
# Number of colors is either number of classes or 2
colors = resolve_colors(len(self.support_))
if self._mode == BALANCE:
self.ax.bar(
np.arange(len(self.support_)), self.support_,
color=colors, align='center', width=0.5
)
# Compare mode
else:
bar_width = 0.35
labels = ["train", "test"]
for idx, support in enumerate(self.support_):
index = np.arange(len(self.classes_))
if idx > 0:
index = index + bar_width
self.ax.bar(
index, support, bar_width,
color=colors[idx], label=labels[idx]
)
return self.ax | Renders the class balance chart on the specified axes from support. | Below is the the instruction that describes the task:
### Input:
Renders the class balance chart on the specified axes from support.
### Response:
def draw(self):
"""
Renders the class balance chart on the specified axes from support.
"""
# Number of colors is either number of classes or 2
colors = resolve_colors(len(self.support_))
if self._mode == BALANCE:
self.ax.bar(
np.arange(len(self.support_)), self.support_,
color=colors, align='center', width=0.5
)
# Compare mode
else:
bar_width = 0.35
labels = ["train", "test"]
for idx, support in enumerate(self.support_):
index = np.arange(len(self.classes_))
if idx > 0:
index = index + bar_width
self.ax.bar(
index, support, bar_width,
color=colors[idx], label=labels[idx]
)
return self.ax |
def run_HTM_false_positive_experiment_synapses(num_neurons = 1,
a = 512,
dim = 16000,
num_samples = 1000,
num_dendrites = 500,
test_dendrite_lengths = range(2, 32, 2),
num_trials = 1000):
"""
Run an experiment to test the false positive rate based on number of
synapses per dendrite, dimension and sparsity. Uses a single neuron,
with a threshold nonlinearity of theta = s/2.
Based on figure 5B in the original SDR paper.
The parameters used in generating the figure for this experiment are:
1. a = 512, dim = 16000
2. a = 4000, dim = 16000
3. a = 32, dim = 2000
4. a = 32, dim = 4000
In each case, we used 1000 samples per trial, 1000 trials, 500 dendrite
segments, and tested dendrite lengths in (2, 4, ..., 30), with the threshold
for each segment being half its length.
"""
for dendrite_length in test_dendrite_lengths:
nonlinearity = threshold_nonlinearity(dendrite_length / 2)
fps = []
fns = []
for trial in range(num_trials):
neuron = Neuron(size = dendrite_length*num_dendrites, num_dendrites = num_dendrites, dendrite_length = dendrite_length, dim = dim, nonlinearity = nonlinearity)
pos, neg = generate_evenly_distributed_data_sparse(dim = dim, num_active = a, num_samples = num_samples/2), generate_evenly_distributed_data_sparse(dim = dim, num_active = a, num_samples = num_samples/2)
#labels = numpy.asarray([1 for i in range(num_samples/2)] + [-1 for i in range(num_samples/2)])
neuron.HTM_style_initialize_on_data(pos, numpy.asarray([1 for i in range(num_samples/2)]))
error, fp, fn = get_error(neg, [-1 for i in range(num_samples/2)], [neuron])
fps.append(fp)
fns.append(fn)
print "Error at {} synapses per dendrite is {}, with {} false positives and {} false negatives".format(dendrite_length, fp/(num_samples/2.), fp, fn)
with open("num_dendrites_FP_{}_{}.txt".format(a, dim), "a") as f:
f.write(str(dendrite_length) + ", " + str(sum(fps)) + ", " + str(num_trials*num_samples/2.) + "\n") | Run an experiment to test the false positive rate based on number of
synapses per dendrite, dimension and sparsity. Uses a single neuron,
with a threshold nonlinearity of theta = s/2.
Based on figure 5B in the original SDR paper.
The parameters used in generating the figure for this experiment are:
1. a = 512, dim = 16000
2. a = 4000, dim = 16000
3. a = 32, dim = 2000
4. a = 32, dim = 4000
In each case, we used 1000 samples per trial, 1000 trials, 500 dendrite
segments, and tested dendrite lengths in (2, 4, ..., 30), with the threshold
for each segment being half its length. | Below is the the instruction that describes the task:
### Input:
Run an experiment to test the false positive rate based on number of
synapses per dendrite, dimension and sparsity. Uses a single neuron,
with a threshold nonlinearity of theta = s/2.
Based on figure 5B in the original SDR paper.
The parameters used in generating the figure for this experiment are:
1. a = 512, dim = 16000
2. a = 4000, dim = 16000
3. a = 32, dim = 2000
4. a = 32, dim = 4000
In each case, we used 1000 samples per trial, 1000 trials, 500 dendrite
segments, and tested dendrite lengths in (2, 4, ..., 30), with the threshold
for each segment being half its length.
### Response:
def run_HTM_false_positive_experiment_synapses(num_neurons = 1,
a = 512,
dim = 16000,
num_samples = 1000,
num_dendrites = 500,
test_dendrite_lengths = range(2, 32, 2),
num_trials = 1000):
"""
Run an experiment to test the false positive rate based on number of
synapses per dendrite, dimension and sparsity. Uses a single neuron,
with a threshold nonlinearity of theta = s/2.
Based on figure 5B in the original SDR paper.
The parameters used in generating the figure for this experiment are:
1. a = 512, dim = 16000
2. a = 4000, dim = 16000
3. a = 32, dim = 2000
4. a = 32, dim = 4000
In each case, we used 1000 samples per trial, 1000 trials, 500 dendrite
segments, and tested dendrite lengths in (2, 4, ..., 30), with the threshold
for each segment being half its length.
"""
for dendrite_length in test_dendrite_lengths:
nonlinearity = threshold_nonlinearity(dendrite_length / 2)
fps = []
fns = []
for trial in range(num_trials):
neuron = Neuron(size = dendrite_length*num_dendrites, num_dendrites = num_dendrites, dendrite_length = dendrite_length, dim = dim, nonlinearity = nonlinearity)
pos, neg = generate_evenly_distributed_data_sparse(dim = dim, num_active = a, num_samples = num_samples/2), generate_evenly_distributed_data_sparse(dim = dim, num_active = a, num_samples = num_samples/2)
#labels = numpy.asarray([1 for i in range(num_samples/2)] + [-1 for i in range(num_samples/2)])
neuron.HTM_style_initialize_on_data(pos, numpy.asarray([1 for i in range(num_samples/2)]))
error, fp, fn = get_error(neg, [-1 for i in range(num_samples/2)], [neuron])
fps.append(fp)
fns.append(fn)
print "Error at {} synapses per dendrite is {}, with {} false positives and {} false negatives".format(dendrite_length, fp/(num_samples/2.), fp, fn)
with open("num_dendrites_FP_{}_{}.txt".format(a, dim), "a") as f:
f.write(str(dendrite_length) + ", " + str(sum(fps)) + ", " + str(num_trials*num_samples/2.) + "\n") |
def source_table(self):
"""Source table (`~astropy.table.Table`).
Columns: GLON, GLAT, COUNTS
"""
table = Table()
table['GLON'] = np.array([0, 45, 45], dtype='float32')
table['GLAT'] = np.array([0, 0, 45], dtype='float32')
table['COUNTS'] = np.array([100, 100, 100], dtype='int32')
return table | Source table (`~astropy.table.Table`).
Columns: GLON, GLAT, COUNTS | Below is the the instruction that describes the task:
### Input:
Source table (`~astropy.table.Table`).
Columns: GLON, GLAT, COUNTS
### Response:
def source_table(self):
"""Source table (`~astropy.table.Table`).
Columns: GLON, GLAT, COUNTS
"""
table = Table()
table['GLON'] = np.array([0, 45, 45], dtype='float32')
table['GLAT'] = np.array([0, 0, 45], dtype='float32')
table['COUNTS'] = np.array([100, 100, 100], dtype='int32')
return table |
def read_smet(filename, mode):
"""Reads smet data and returns the data in required dataformat (pd df)
See https://models.slf.ch/docserver/meteoio/SMET_specifications.pdf
for further details on the specifications of this file format.
Parameters
----
filename : SMET file to read
mode : "d" for daily and "h" for hourly input
Returns
----
[header, data]
header: header as dict
data : data as pd df
"""
# dictionary
# based on smet spec V.1.1 and self defined
# daily data
dict_d = {'TA': 'tmean',
'TMAX': 'tmax', # no spec
'TMIN': 'tmin', # no spec
'PSUM': 'precip',
'ISWR': 'glob', # no spec
'RH': 'hum',
'VW': 'wind'}
# hourly data
dict_h = {'TA': 'temp',
'PSUM': 'precip',
'ISWR': 'glob', # no spec
'RH': 'hum',
'VW': 'wind'}
with open(filename) as f:
in_header = False
data_start = None
header = collections.OrderedDict()
for line_num, line in enumerate(f):
if line.strip() == '[HEADER]':
in_header = True
continue
elif line.strip() == '[DATA]':
data_start = line_num + 1
break
if in_header:
line_split = line.split('=')
k = line_split[0].strip()
v = line_split[1].strip()
header[k] = v
# get column names
columns = header['fields'].split()
multiplier = [float(x) for x in header['units_multiplier'].split()][1:]
data = pd.read_table(
filename,
sep=r'\s+',
na_values=[-999],
skiprows=data_start,
names=columns,
index_col='timestamp',
parse_dates=True,
)
data = data*multiplier
del data.index.name
# rename columns
if mode == "d":
data = data.rename(columns=dict_d)
if mode == "h":
data = data.rename(columns=dict_h)
return header, data | Reads smet data and returns the data in required dataformat (pd df)
See https://models.slf.ch/docserver/meteoio/SMET_specifications.pdf
for further details on the specifications of this file format.
Parameters
----
filename : SMET file to read
mode : "d" for daily and "h" for hourly input
Returns
----
[header, data]
header: header as dict
data : data as pd df | Below is the the instruction that describes the task:
### Input:
Reads smet data and returns the data in required dataformat (pd df)
See https://models.slf.ch/docserver/meteoio/SMET_specifications.pdf
for further details on the specifications of this file format.
Parameters
----
filename : SMET file to read
mode : "d" for daily and "h" for hourly input
Returns
----
[header, data]
header: header as dict
data : data as pd df
### Response:
def read_smet(filename, mode):
"""Reads smet data and returns the data in required dataformat (pd df)
See https://models.slf.ch/docserver/meteoio/SMET_specifications.pdf
for further details on the specifications of this file format.
Parameters
----
filename : SMET file to read
mode : "d" for daily and "h" for hourly input
Returns
----
[header, data]
header: header as dict
data : data as pd df
"""
# dictionary
# based on smet spec V.1.1 and self defined
# daily data
dict_d = {'TA': 'tmean',
'TMAX': 'tmax', # no spec
'TMIN': 'tmin', # no spec
'PSUM': 'precip',
'ISWR': 'glob', # no spec
'RH': 'hum',
'VW': 'wind'}
# hourly data
dict_h = {'TA': 'temp',
'PSUM': 'precip',
'ISWR': 'glob', # no spec
'RH': 'hum',
'VW': 'wind'}
with open(filename) as f:
in_header = False
data_start = None
header = collections.OrderedDict()
for line_num, line in enumerate(f):
if line.strip() == '[HEADER]':
in_header = True
continue
elif line.strip() == '[DATA]':
data_start = line_num + 1
break
if in_header:
line_split = line.split('=')
k = line_split[0].strip()
v = line_split[1].strip()
header[k] = v
# get column names
columns = header['fields'].split()
multiplier = [float(x) for x in header['units_multiplier'].split()][1:]
data = pd.read_table(
filename,
sep=r'\s+',
na_values=[-999],
skiprows=data_start,
names=columns,
index_col='timestamp',
parse_dates=True,
)
data = data*multiplier
del data.index.name
# rename columns
if mode == "d":
data = data.rename(columns=dict_d)
if mode == "h":
data = data.rename(columns=dict_h)
return header, data |
def _get_pos(self):
"""
Get current position for scroll bar.
"""
if self._canvas.height >= self._max_height:
return 0
else:
return self._canvas.start_line / (self._max_height - self._canvas.height + 1) | Get current position for scroll bar. | Below is the the instruction that describes the task:
### Input:
Get current position for scroll bar.
### Response:
def _get_pos(self):
"""
Get current position for scroll bar.
"""
if self._canvas.height >= self._max_height:
return 0
else:
return self._canvas.start_line / (self._max_height - self._canvas.height + 1) |
def more_statements(self, more_url):
"""Query the LRS for more statements
:param more_url: URL from a StatementsResult object used to retrieve more statements
:type more_url: str | unicode
:return: LRS Response object with the returned StatementsResult object as content
:rtype: :class:`tincan.lrs_response.LRSResponse`
"""
if isinstance(more_url, StatementsResult):
more_url = more_url.more
more_url = self.get_endpoint_server_root() + more_url
request = HTTPRequest(
method="GET",
resource=more_url
)
lrs_response = self._send_request(request)
if lrs_response.success:
lrs_response.content = StatementsResult.from_json(lrs_response.data)
return lrs_response | Query the LRS for more statements
:param more_url: URL from a StatementsResult object used to retrieve more statements
:type more_url: str | unicode
:return: LRS Response object with the returned StatementsResult object as content
:rtype: :class:`tincan.lrs_response.LRSResponse` | Below is the the instruction that describes the task:
### Input:
Query the LRS for more statements
:param more_url: URL from a StatementsResult object used to retrieve more statements
:type more_url: str | unicode
:return: LRS Response object with the returned StatementsResult object as content
:rtype: :class:`tincan.lrs_response.LRSResponse`
### Response:
def more_statements(self, more_url):
"""Query the LRS for more statements
:param more_url: URL from a StatementsResult object used to retrieve more statements
:type more_url: str | unicode
:return: LRS Response object with the returned StatementsResult object as content
:rtype: :class:`tincan.lrs_response.LRSResponse`
"""
if isinstance(more_url, StatementsResult):
more_url = more_url.more
more_url = self.get_endpoint_server_root() + more_url
request = HTTPRequest(
method="GET",
resource=more_url
)
lrs_response = self._send_request(request)
if lrs_response.success:
lrs_response.content = StatementsResult.from_json(lrs_response.data)
return lrs_response |
def obfn_reg(self):
r"""Compute regularisation terms and contribution to objective
function. Regularisation terms are :math:`\| Y \|_1` and
:math:`\| Y \|_{2,1}`.
"""
rl21 = np.sum(self.wl21 * np.sqrt(np.sum(self.obfn_gvar()**2,
axis=self.cri.axisC)))
rgr = sl.rfl2norm2(np.sqrt(self.GHGf*np.conj(fvf)*fvf), self.cri.Nv,
self.cri.axisN)/2.0
return (self.lmbda*rl21 + self.mu*rgr, rl21, rgr) | r"""Compute regularisation terms and contribution to objective
function. Regularisation terms are :math:`\| Y \|_1` and
:math:`\| Y \|_{2,1}`. | Below is the the instruction that describes the task:
### Input:
r"""Compute regularisation terms and contribution to objective
function. Regularisation terms are :math:`\| Y \|_1` and
:math:`\| Y \|_{2,1}`.
### Response:
def obfn_reg(self):
r"""Compute regularisation terms and contribution to objective
function. Regularisation terms are :math:`\| Y \|_1` and
:math:`\| Y \|_{2,1}`.
"""
rl21 = np.sum(self.wl21 * np.sqrt(np.sum(self.obfn_gvar()**2,
axis=self.cri.axisC)))
rgr = sl.rfl2norm2(np.sqrt(self.GHGf*np.conj(fvf)*fvf), self.cri.Nv,
self.cri.axisN)/2.0
return (self.lmbda*rl21 + self.mu*rgr, rl21, rgr) |
def add_date_facet(self, *args, **kwargs):
"""Add a date factory facet"""
self.facets.append(DateHistogramFacet(*args, **kwargs)) | Add a date factory facet | Below is the the instruction that describes the task:
### Input:
Add a date factory facet
### Response:
def add_date_facet(self, *args, **kwargs):
"""Add a date factory facet"""
self.facets.append(DateHistogramFacet(*args, **kwargs)) |
def triangle_normal(a,b,c):
'''
triangle_normal(a, b, c) yields the normal vector of the triangle whose vertices are given by
the points a, b, and c. If the points are 2D points, then 3D normal vectors are still yielded,
that are always (0,0,1) or (0,0,-1). This function auto-threads over matrices, in which case
they must be in equivalent orientations, and the result is returned in whatever orientation
they are given in. In some cases, the intended orientation of the matrices is ambiguous (e.g.,
if a, b, and c are 2 x 3 matrices), in which case the matrix is always assumed to be given in
(dims x vertices) orientation.
'''
(a,b,c) = [np.asarray(x) for x in (a,b,c)]
if len(a.shape) == 1 and len(b.shape) == 1 and len(c.shape) == 1:
return triangle_normal(*[np.transpose([x]) for x in (a,b,c)])[:,0]
(a,b,c) = [np.transpose([x]) if len(x.shape) == 1 else x for x in (a,b,c)]
# find a required number of dimensions, if possible
if a.shape[0] in (2,3):
dims = a.shape[0]
tx = True
else:
dims = a.shape[1]
(a,b,c) = [x.T for x in (a,b,c)]
tx = False
n = (a.shape[1] if a.shape[1] != 1 else b.shape[1] if b.shape[1] != 1 else
c.shape[1] if c.shape[1] != 1 else 1)
if dims == 2:
(a,b,c) = [np.vstack((x, np.zeros((1,n)))) for x in (a,b,c)]
ab = normalize(b - a)
ac = normalize(c - a)
res = np.cross(ab, ac, axisa=0, axisb=0)
return res.T if tx else res | triangle_normal(a, b, c) yields the normal vector of the triangle whose vertices are given by
the points a, b, and c. If the points are 2D points, then 3D normal vectors are still yielded,
that are always (0,0,1) or (0,0,-1). This function auto-threads over matrices, in which case
they must be in equivalent orientations, and the result is returned in whatever orientation
they are given in. In some cases, the intended orientation of the matrices is ambiguous (e.g.,
if a, b, and c are 2 x 3 matrices), in which case the matrix is always assumed to be given in
(dims x vertices) orientation. | Below is the the instruction that describes the task:
### Input:
triangle_normal(a, b, c) yields the normal vector of the triangle whose vertices are given by
the points a, b, and c. If the points are 2D points, then 3D normal vectors are still yielded,
that are always (0,0,1) or (0,0,-1). This function auto-threads over matrices, in which case
they must be in equivalent orientations, and the result is returned in whatever orientation
they are given in. In some cases, the intended orientation of the matrices is ambiguous (e.g.,
if a, b, and c are 2 x 3 matrices), in which case the matrix is always assumed to be given in
(dims x vertices) orientation.
### Response:
def triangle_normal(a,b,c):
'''
triangle_normal(a, b, c) yields the normal vector of the triangle whose vertices are given by
the points a, b, and c. If the points are 2D points, then 3D normal vectors are still yielded,
that are always (0,0,1) or (0,0,-1). This function auto-threads over matrices, in which case
they must be in equivalent orientations, and the result is returned in whatever orientation
they are given in. In some cases, the intended orientation of the matrices is ambiguous (e.g.,
if a, b, and c are 2 x 3 matrices), in which case the matrix is always assumed to be given in
(dims x vertices) orientation.
'''
(a,b,c) = [np.asarray(x) for x in (a,b,c)]
if len(a.shape) == 1 and len(b.shape) == 1 and len(c.shape) == 1:
return triangle_normal(*[np.transpose([x]) for x in (a,b,c)])[:,0]
(a,b,c) = [np.transpose([x]) if len(x.shape) == 1 else x for x in (a,b,c)]
# find a required number of dimensions, if possible
if a.shape[0] in (2,3):
dims = a.shape[0]
tx = True
else:
dims = a.shape[1]
(a,b,c) = [x.T for x in (a,b,c)]
tx = False
n = (a.shape[1] if a.shape[1] != 1 else b.shape[1] if b.shape[1] != 1 else
c.shape[1] if c.shape[1] != 1 else 1)
if dims == 2:
(a,b,c) = [np.vstack((x, np.zeros((1,n)))) for x in (a,b,c)]
ab = normalize(b - a)
ac = normalize(c - a)
res = np.cross(ab, ac, axisa=0, axisb=0)
return res.T if tx else res |
def get_direct_derivatives(self, address):
"""Get all targets derived directly from the specified target.
Note that the specified target itself is not returned.
:API: public
"""
derivative_addrs = self._derivatives_by_derived_from.get(address, [])
return [self.get_target(addr) for addr in derivative_addrs] | Get all targets derived directly from the specified target.
Note that the specified target itself is not returned.
:API: public | Below is the the instruction that describes the task:
### Input:
Get all targets derived directly from the specified target.
Note that the specified target itself is not returned.
:API: public
### Response:
def get_direct_derivatives(self, address):
"""Get all targets derived directly from the specified target.
Note that the specified target itself is not returned.
:API: public
"""
derivative_addrs = self._derivatives_by_derived_from.get(address, [])
return [self.get_target(addr) for addr in derivative_addrs] |
def switchport_list(self):
"""list[dict]:A list of dictionary items describing the details
of list of dictionary items describing the details of switch port"""
urn = "{urn:brocade.com:mgmt:brocade-interface-ext}"
result = []
request_interface = self.get_interface_switchport_request()
interface_result = self._callback(request_interface, 'get')
for interface in interface_result.findall('%sswitchport' % urn):
vlans = []
interface_type = self.get_node_value(interface, '%sinterface-type',
urn)
interface_name = self.get_node_value(interface, '%sinterface-name',
urn)
mode = self.get_node_value(interface, '%smode', urn)
intf = interface.find('%sactive-vlans' % urn)
for vlan_node in intf.findall('%svlanid' % urn):
vlan = vlan_node.text
vlans.append(vlan)
results = {'vlan-id': vlans,
'mode': mode,
'interface-name': interface_name,
'interface_type': interface_type}
result.append(results)
return result | list[dict]:A list of dictionary items describing the details
of list of dictionary items describing the details of switch port | Below is the the instruction that describes the task:
### Input:
list[dict]:A list of dictionary items describing the details
of list of dictionary items describing the details of switch port
### Response:
def switchport_list(self):
"""list[dict]:A list of dictionary items describing the details
of list of dictionary items describing the details of switch port"""
urn = "{urn:brocade.com:mgmt:brocade-interface-ext}"
result = []
request_interface = self.get_interface_switchport_request()
interface_result = self._callback(request_interface, 'get')
for interface in interface_result.findall('%sswitchport' % urn):
vlans = []
interface_type = self.get_node_value(interface, '%sinterface-type',
urn)
interface_name = self.get_node_value(interface, '%sinterface-name',
urn)
mode = self.get_node_value(interface, '%smode', urn)
intf = interface.find('%sactive-vlans' % urn)
for vlan_node in intf.findall('%svlanid' % urn):
vlan = vlan_node.text
vlans.append(vlan)
results = {'vlan-id': vlans,
'mode': mode,
'interface-name': interface_name,
'interface_type': interface_type}
result.append(results)
return result |
def scene_color(frames):
"""parse a scene.color message"""
# "scene.color" <scene_id> <color>
reader = MessageReader(frames)
results = reader.string("command").uint32("scene_id").uint8_3("color").assert_end().get()
if results.command != "scene.color":
raise MessageParserError("Command is not 'scene.color'")
return (results.scene_id, np.array([results.color[0]/255, results.color[1]/255, results.color[2]/255])) | parse a scene.color message | Below is the the instruction that describes the task:
### Input:
parse a scene.color message
### Response:
def scene_color(frames):
"""parse a scene.color message"""
# "scene.color" <scene_id> <color>
reader = MessageReader(frames)
results = reader.string("command").uint32("scene_id").uint8_3("color").assert_end().get()
if results.command != "scene.color":
raise MessageParserError("Command is not 'scene.color'")
return (results.scene_id, np.array([results.color[0]/255, results.color[1]/255, results.color[2]/255])) |
def _append_log(self, specs):
"""
The log contains the tids and corresponding specifications
used during launch with the specifications in JSON format.
"""
self._spec_log += specs # This should be removed
log_path = os.path.join(self.root_directory, ("%s.log" % self.batch_name))
core.Log.write_log(log_path, [spec for (_, spec) in specs], allow_append=True) | The log contains the tids and corresponding specifications
used during launch with the specifications in JSON format. | Below is the the instruction that describes the task:
### Input:
The log contains the tids and corresponding specifications
used during launch with the specifications in JSON format.
### Response:
def _append_log(self, specs):
"""
The log contains the tids and corresponding specifications
used during launch with the specifications in JSON format.
"""
self._spec_log += specs # This should be removed
log_path = os.path.join(self.root_directory, ("%s.log" % self.batch_name))
core.Log.write_log(log_path, [spec for (_, spec) in specs], allow_append=True) |
def SETNS(cpu, dest):
"""
Sets byte if not sign.
:param cpu: current CPU.
:param dest: destination operand.
"""
dest.write(Operators.ITEBV(dest.size, cpu.SF == False, 1, 0)) | Sets byte if not sign.
:param cpu: current CPU.
:param dest: destination operand. | Below is the the instruction that describes the task:
### Input:
Sets byte if not sign.
:param cpu: current CPU.
:param dest: destination operand.
### Response:
def SETNS(cpu, dest):
"""
Sets byte if not sign.
:param cpu: current CPU.
:param dest: destination operand.
"""
dest.write(Operators.ITEBV(dest.size, cpu.SF == False, 1, 0)) |
def _purge_jobs(timestamp):
'''
Purge records from the returner tables.
:param job_age_in_seconds: Purge jobs older than this
:return:
'''
with _get_serv() as cur:
try:
sql = 'delete from `jids` where jid in (select distinct jid from salt_returns where alter_time < %s)'
cur.execute(sql, (timestamp,))
cur.execute('COMMIT')
except MySQLdb.Error as e:
log.error('mysql returner archiver was unable to delete contents of table \'jids\'')
log.error(six.text_type(e))
raise salt.exceptions.SaltRunnerError(six.text_type(e))
try:
sql = 'delete from `salt_returns` where alter_time < %s'
cur.execute(sql, (timestamp,))
cur.execute('COMMIT')
except MySQLdb.Error as e:
log.error('mysql returner archiver was unable to delete contents of table \'salt_returns\'')
log.error(six.text_type(e))
raise salt.exceptions.SaltRunnerError(six.text_type(e))
try:
sql = 'delete from `salt_events` where alter_time < %s'
cur.execute(sql, (timestamp,))
cur.execute('COMMIT')
except MySQLdb.Error as e:
log.error('mysql returner archiver was unable to delete contents of table \'salt_events\'')
log.error(six.text_type(e))
raise salt.exceptions.SaltRunnerError(six.text_type(e))
return True | Purge records from the returner tables.
:param job_age_in_seconds: Purge jobs older than this
:return: | Below is the the instruction that describes the task:
### Input:
Purge records from the returner tables.
:param job_age_in_seconds: Purge jobs older than this
:return:
### Response:
def _purge_jobs(timestamp):
'''
Purge records from the returner tables.
:param job_age_in_seconds: Purge jobs older than this
:return:
'''
with _get_serv() as cur:
try:
sql = 'delete from `jids` where jid in (select distinct jid from salt_returns where alter_time < %s)'
cur.execute(sql, (timestamp,))
cur.execute('COMMIT')
except MySQLdb.Error as e:
log.error('mysql returner archiver was unable to delete contents of table \'jids\'')
log.error(six.text_type(e))
raise salt.exceptions.SaltRunnerError(six.text_type(e))
try:
sql = 'delete from `salt_returns` where alter_time < %s'
cur.execute(sql, (timestamp,))
cur.execute('COMMIT')
except MySQLdb.Error as e:
log.error('mysql returner archiver was unable to delete contents of table \'salt_returns\'')
log.error(six.text_type(e))
raise salt.exceptions.SaltRunnerError(six.text_type(e))
try:
sql = 'delete from `salt_events` where alter_time < %s'
cur.execute(sql, (timestamp,))
cur.execute('COMMIT')
except MySQLdb.Error as e:
log.error('mysql returner archiver was unable to delete contents of table \'salt_events\'')
log.error(six.text_type(e))
raise salt.exceptions.SaltRunnerError(six.text_type(e))
return True |
def set_completed(self, *, completition_date=None):
""" Sets this message flag as completed
:param completition_date: the datetime this followUp was completed
"""
self.__status = Flag.Complete
completition_date = completition_date or dt.datetime.now()
if completition_date.tzinfo is None:
completition_date = self.protocol.timezone.localize(completition_date)
self.__completed = completition_date
self._track_changes() | Sets this message flag as completed
:param completition_date: the datetime this followUp was completed | Below is the the instruction that describes the task:
### Input:
Sets this message flag as completed
:param completition_date: the datetime this followUp was completed
### Response:
def set_completed(self, *, completition_date=None):
""" Sets this message flag as completed
:param completition_date: the datetime this followUp was completed
"""
self.__status = Flag.Complete
completition_date = completition_date or dt.datetime.now()
if completition_date.tzinfo is None:
completition_date = self.protocol.timezone.localize(completition_date)
self.__completed = completition_date
self._track_changes() |
def _get_best_percentile(self, cluster, counts):
''' return all UMIs with counts >1% of the
median counts in the cluster '''
if len(cluster) == 1:
return list(cluster)
else:
threshold = np.median(list(counts.values()))/100
return [read for read in cluster if counts[read] > threshold] | return all UMIs with counts >1% of the
median counts in the cluster | Below is the the instruction that describes the task:
### Input:
return all UMIs with counts >1% of the
median counts in the cluster
### Response:
def _get_best_percentile(self, cluster, counts):
''' return all UMIs with counts >1% of the
median counts in the cluster '''
if len(cluster) == 1:
return list(cluster)
else:
threshold = np.median(list(counts.values()))/100
return [read for read in cluster if counts[read] > threshold] |
def move_to(self, x, y, pre_dl=None, post_dl=None):
"""Move mouse to (x, y)
**中文文档**
移动鼠标到 (x, y) 的坐标处。
"""
self.delay(pre_dl)
self.m.move(x, y)
self.delay(post_dl) | Move mouse to (x, y)
**中文文档**
移动鼠标到 (x, y) 的坐标处。 | Below is the the instruction that describes the task:
### Input:
Move mouse to (x, y)
**中文文档**
移动鼠标到 (x, y) 的坐标处。
### Response:
def move_to(self, x, y, pre_dl=None, post_dl=None):
"""Move mouse to (x, y)
**中文文档**
移动鼠标到 (x, y) 的坐标处。
"""
self.delay(pre_dl)
self.m.move(x, y)
self.delay(post_dl) |
def sha1(filename):
"""Return SHA1 of filename."""
filename = unmap_file(filename)
if filename not in file_cache:
cache_file(filename)
if filename not in file_cache:
return None
pass
if file_cache[filename].sha1:
return file_cache[filename].sha1.hexdigest()
sha1 = hashlib.sha1()
for line in file_cache[filename].lines['plain']:
sha1.update(line.encode('utf-8'))
pass
file_cache[filename].sha1 = sha1
return sha1.hexdigest() | Return SHA1 of filename. | Below is the the instruction that describes the task:
### Input:
Return SHA1 of filename.
### Response:
def sha1(filename):
"""Return SHA1 of filename."""
filename = unmap_file(filename)
if filename not in file_cache:
cache_file(filename)
if filename not in file_cache:
return None
pass
if file_cache[filename].sha1:
return file_cache[filename].sha1.hexdigest()
sha1 = hashlib.sha1()
for line in file_cache[filename].lines['plain']:
sha1.update(line.encode('utf-8'))
pass
file_cache[filename].sha1 = sha1
return sha1.hexdigest() |
def write_crtf(regions, filename, coordsys='fk5', fmt='.6f', radunit='deg'):
"""
Converts a `list` of `~regions.Region` to CRTF string and write to file.
Parameters
----------
regions : `list`
List of `~regions.Region` objects
filename : `str`
Filename in which the string is to be written. Default is 'new.crtf'
coordsys : `str`, optional
Astropy Coordinate system that overrides the coordinate frames of all
regions. Default is 'fk5'.
fmt : `str`, optional
A python string format defining the output precision. Default is .6f,
which is accurate to 0.0036 arcseconds.
radunit : `str`, optional
This denotes the unit of the radius. Default is deg (degrees)
Examples
--------
>>> from astropy import units as u
>>> from astropy.coordinates import SkyCoord
>>> from regions import CircleSkyRegion, write_crtf
>>> reg_sky = CircleSkyRegion(SkyCoord(1 * u.deg, 2 * u.deg), 5 * u.deg)
>>> write_crtf([reg_sky], 'test_write.crtf')
>>> with open('test_write.crtf') as f:
... print(f.read())
#CRTF
global coord=fk5
+circle[[1.000007deg, 2.000002deg], 5.000000deg]
"""
output = crtf_objects_to_string(regions, coordsys, fmt, radunit)
with open(filename, 'w') as fh:
fh.write(output) | Converts a `list` of `~regions.Region` to CRTF string and write to file.
Parameters
----------
regions : `list`
List of `~regions.Region` objects
filename : `str`
Filename in which the string is to be written. Default is 'new.crtf'
coordsys : `str`, optional
Astropy Coordinate system that overrides the coordinate frames of all
regions. Default is 'fk5'.
fmt : `str`, optional
A python string format defining the output precision. Default is .6f,
which is accurate to 0.0036 arcseconds.
radunit : `str`, optional
This denotes the unit of the radius. Default is deg (degrees)
Examples
--------
>>> from astropy import units as u
>>> from astropy.coordinates import SkyCoord
>>> from regions import CircleSkyRegion, write_crtf
>>> reg_sky = CircleSkyRegion(SkyCoord(1 * u.deg, 2 * u.deg), 5 * u.deg)
>>> write_crtf([reg_sky], 'test_write.crtf')
>>> with open('test_write.crtf') as f:
... print(f.read())
#CRTF
global coord=fk5
+circle[[1.000007deg, 2.000002deg], 5.000000deg] | Below is the the instruction that describes the task:
### Input:
Converts a `list` of `~regions.Region` to CRTF string and write to file.
Parameters
----------
regions : `list`
List of `~regions.Region` objects
filename : `str`
Filename in which the string is to be written. Default is 'new.crtf'
coordsys : `str`, optional
Astropy Coordinate system that overrides the coordinate frames of all
regions. Default is 'fk5'.
fmt : `str`, optional
A python string format defining the output precision. Default is .6f,
which is accurate to 0.0036 arcseconds.
radunit : `str`, optional
This denotes the unit of the radius. Default is deg (degrees)
Examples
--------
>>> from astropy import units as u
>>> from astropy.coordinates import SkyCoord
>>> from regions import CircleSkyRegion, write_crtf
>>> reg_sky = CircleSkyRegion(SkyCoord(1 * u.deg, 2 * u.deg), 5 * u.deg)
>>> write_crtf([reg_sky], 'test_write.crtf')
>>> with open('test_write.crtf') as f:
... print(f.read())
#CRTF
global coord=fk5
+circle[[1.000007deg, 2.000002deg], 5.000000deg]
### Response:
def write_crtf(regions, filename, coordsys='fk5', fmt='.6f', radunit='deg'):
"""
Converts a `list` of `~regions.Region` to CRTF string and write to file.
Parameters
----------
regions : `list`
List of `~regions.Region` objects
filename : `str`
Filename in which the string is to be written. Default is 'new.crtf'
coordsys : `str`, optional
Astropy Coordinate system that overrides the coordinate frames of all
regions. Default is 'fk5'.
fmt : `str`, optional
A python string format defining the output precision. Default is .6f,
which is accurate to 0.0036 arcseconds.
radunit : `str`, optional
This denotes the unit of the radius. Default is deg (degrees)
Examples
--------
>>> from astropy import units as u
>>> from astropy.coordinates import SkyCoord
>>> from regions import CircleSkyRegion, write_crtf
>>> reg_sky = CircleSkyRegion(SkyCoord(1 * u.deg, 2 * u.deg), 5 * u.deg)
>>> write_crtf([reg_sky], 'test_write.crtf')
>>> with open('test_write.crtf') as f:
... print(f.read())
#CRTF
global coord=fk5
+circle[[1.000007deg, 2.000002deg], 5.000000deg]
"""
output = crtf_objects_to_string(regions, coordsys, fmt, radunit)
with open(filename, 'w') as fh:
fh.write(output) |
def replace_wildcards(string, wildcard, regex):
"""
Replace wildcard symbols with regular expressions
:param wildcard:
:type wildcard: _sre.SRE_Pattern
:param regex:
:type regex: str
:rtype: tuple of (str, bool)
"""
replaced = False
match = wildcard.search(string)
if match:
string = wildcard.sub(regex, string)
logging.getLogger('agentml.trigger').debug('Parsing Pattern wildcards: {pattern}'.format(pattern=string))
replaced = True
return string, replaced | Replace wildcard symbols with regular expressions
:param wildcard:
:type wildcard: _sre.SRE_Pattern
:param regex:
:type regex: str
:rtype: tuple of (str, bool) | Below is the the instruction that describes the task:
### Input:
Replace wildcard symbols with regular expressions
:param wildcard:
:type wildcard: _sre.SRE_Pattern
:param regex:
:type regex: str
:rtype: tuple of (str, bool)
### Response:
def replace_wildcards(string, wildcard, regex):
"""
Replace wildcard symbols with regular expressions
:param wildcard:
:type wildcard: _sre.SRE_Pattern
:param regex:
:type regex: str
:rtype: tuple of (str, bool)
"""
replaced = False
match = wildcard.search(string)
if match:
string = wildcard.sub(regex, string)
logging.getLogger('agentml.trigger').debug('Parsing Pattern wildcards: {pattern}'.format(pattern=string))
replaced = True
return string, replaced |
def do_dot2(self, args, arguments):
"""
::
Usage:
dot2 FILENAME FORMAT
Export the data in cvs format to a file. Former cvs command
Arguments:
FILENAME The filename
FORMAT the export format, pdf, png, ...
"""
filename = arguments['FILENAME']
output_format = arguments['FORMAT']
base = filename.replace(".dot", "")
out = base + "." + output_format
if output_format == "pdf":
exec_command = "dot -Tps %s | epstopdf --filter --ooutput %s" % (
file, out)
else:
exec_command = "dot -T%s %s -o %s 2>/tmp/err" % (output_format, file, out)
os.system(exec_command) | ::
Usage:
dot2 FILENAME FORMAT
Export the data in cvs format to a file. Former cvs command
Arguments:
FILENAME The filename
FORMAT the export format, pdf, png, ... | Below is the the instruction that describes the task:
### Input:
::
Usage:
dot2 FILENAME FORMAT
Export the data in cvs format to a file. Former cvs command
Arguments:
FILENAME The filename
FORMAT the export format, pdf, png, ...
### Response:
def do_dot2(self, args, arguments):
"""
::
Usage:
dot2 FILENAME FORMAT
Export the data in cvs format to a file. Former cvs command
Arguments:
FILENAME The filename
FORMAT the export format, pdf, png, ...
"""
filename = arguments['FILENAME']
output_format = arguments['FORMAT']
base = filename.replace(".dot", "")
out = base + "." + output_format
if output_format == "pdf":
exec_command = "dot -Tps %s | epstopdf --filter --ooutput %s" % (
file, out)
else:
exec_command = "dot -T%s %s -o %s 2>/tmp/err" % (output_format, file, out)
os.system(exec_command) |
def fill_missing_info(info: dict, site_url: str = DEFAULT_SITE) -> dict:
"Add missing info in a censored post info dict."
try:
md5, ext = find_censored_md5ext(info["id"])
except TypeError: # None returned by find_..
return info
sample_ext = "jpg" if ext != "zip" else "webm"
if info["id"] > 2_800_000:
site_url = site_url.rstrip("/")
file_url = f"{site_url}/data/{md5}.{ext}"
sample_url = f"{site_url}/data/sample/sample-{md5}.{sample_ext}"
else:
server = "raikou2" if info["id"] > 850_000 else "raikou1"
url_base = f"https://{server}.donmai.us"
file_url = f"{url_base}/{md5[:2]}/{md5[2:4]}/{md5}.{ext}"
sample_url = (f"{url_base}/sample/{md5[:2]}/{md5[2:4]}/"
f"sample-{md5}.{sample_ext}")
if info["image_width"] < 850:
sample_url = file_url
return {**info, **{
"file_ext": ext,
"md5": md5,
"file_url": file_url,
"large_file_url": sample_url,
"preview_file_url": (f"https://raikou4.donmai.us/preview/"
f"{md5[:2]}/{md5[2:4]}/{md5}.jpg"),
}} | Add missing info in a censored post info dict. | Below is the the instruction that describes the task:
### Input:
Add missing info in a censored post info dict.
### Response:
def fill_missing_info(info: dict, site_url: str = DEFAULT_SITE) -> dict:
"Add missing info in a censored post info dict."
try:
md5, ext = find_censored_md5ext(info["id"])
except TypeError: # None returned by find_..
return info
sample_ext = "jpg" if ext != "zip" else "webm"
if info["id"] > 2_800_000:
site_url = site_url.rstrip("/")
file_url = f"{site_url}/data/{md5}.{ext}"
sample_url = f"{site_url}/data/sample/sample-{md5}.{sample_ext}"
else:
server = "raikou2" if info["id"] > 850_000 else "raikou1"
url_base = f"https://{server}.donmai.us"
file_url = f"{url_base}/{md5[:2]}/{md5[2:4]}/{md5}.{ext}"
sample_url = (f"{url_base}/sample/{md5[:2]}/{md5[2:4]}/"
f"sample-{md5}.{sample_ext}")
if info["image_width"] < 850:
sample_url = file_url
return {**info, **{
"file_ext": ext,
"md5": md5,
"file_url": file_url,
"large_file_url": sample_url,
"preview_file_url": (f"https://raikou4.donmai.us/preview/"
f"{md5[:2]}/{md5[2:4]}/{md5}.jpg"),
}} |
def to_ngrams(self, terms):
''' Converts terms to all possibe ngrams
terms: list of terms
'''
if len(terms) <= self.n:
return terms
if self.n == 1:
n_grams = [[term] for term in terms]
else:
n_grams = []
for i in range(0,len(terms)-self.n+1):
n_grams.append(terms[i:i+self.n])
return n_grams | Converts terms to all possibe ngrams
terms: list of terms | Below is the the instruction that describes the task:
### Input:
Converts terms to all possibe ngrams
terms: list of terms
### Response:
def to_ngrams(self, terms):
''' Converts terms to all possibe ngrams
terms: list of terms
'''
if len(terms) <= self.n:
return terms
if self.n == 1:
n_grams = [[term] for term in terms]
else:
n_grams = []
for i in range(0,len(terms)-self.n+1):
n_grams.append(terms[i:i+self.n])
return n_grams |
def get_daily_songs(self):
"""
获取每日推荐歌曲
"""
self._daily_playlist = douban.get_daily_songs()
# 加入索引
for index, i in enumerate(self._daily_playlist):
i['title'] = str(index + 1) + '/' + str(len(self._daily_playlist)) + ' ' + i['title'] | 获取每日推荐歌曲 | Below is the the instruction that describes the task:
### Input:
获取每日推荐歌曲
### Response:
def get_daily_songs(self):
"""
获取每日推荐歌曲
"""
self._daily_playlist = douban.get_daily_songs()
# 加入索引
for index, i in enumerate(self._daily_playlist):
i['title'] = str(index + 1) + '/' + str(len(self._daily_playlist)) + ' ' + i['title'] |
def clean_code(self, name, forbidden):
"""
Remove lines containing items in 'forbidden' from the code.
Helpful for executing converted notebooks that still retain IPython
magic commands.
"""
code = self.read_code(name)
code = code.split('\n')
new_code = []
for line in code:
if [bad for bad in forbidden if bad in line]:
pass
else:
allowed = ['time','timeit'] # Magics where we want to keep the command
line = self.strip_line_magic(line, allowed)
if isinstance(line,list):
line = ' '.join(line)
new_code.append(line)
new_code = '\n'.join(new_code)
self.write_code(name, new_code)
return new_code | Remove lines containing items in 'forbidden' from the code.
Helpful for executing converted notebooks that still retain IPython
magic commands. | Below is the the instruction that describes the task:
### Input:
Remove lines containing items in 'forbidden' from the code.
Helpful for executing converted notebooks that still retain IPython
magic commands.
### Response:
def clean_code(self, name, forbidden):
"""
Remove lines containing items in 'forbidden' from the code.
Helpful for executing converted notebooks that still retain IPython
magic commands.
"""
code = self.read_code(name)
code = code.split('\n')
new_code = []
for line in code:
if [bad for bad in forbidden if bad in line]:
pass
else:
allowed = ['time','timeit'] # Magics where we want to keep the command
line = self.strip_line_magic(line, allowed)
if isinstance(line,list):
line = ' '.join(line)
new_code.append(line)
new_code = '\n'.join(new_code)
self.write_code(name, new_code)
return new_code |
def parse_roles(self, fetched_role, params):
"""
Parse a single IAM role and fetch additional data
"""
role = {}
role['instances_count'] = 'N/A'
# When resuming upon throttling error, skip if already fetched
if fetched_role['RoleName'] in self.roles:
return
api_client = params['api_client']
# Ensure consistent attribute names across resource types
role['id'] = fetched_role.pop('RoleId')
role['name'] = fetched_role.pop('RoleName')
role['arn'] = fetched_role.pop('Arn')
# Get other attributes
get_keys(fetched_role, role, [ 'CreateDate', 'Path'])
# Get role policies
policies = self.__get_inline_policies(api_client, 'role', role['id'], role['name'])
if len(policies):
role['inline_policies'] = policies
role['inline_policies_count'] = len(policies)
# Get instance profiles
profiles = handle_truncated_response(api_client.list_instance_profiles_for_role, {'RoleName': role['name']}, ['InstanceProfiles'])
manage_dictionary(role, 'instance_profiles', {})
for profile in profiles['InstanceProfiles']:
manage_dictionary(role['instance_profiles'], profile['InstanceProfileId'], {})
role['instance_profiles'][profile['InstanceProfileId']]['arn'] = profile['Arn']
role['instance_profiles'][profile['InstanceProfileId']]['name'] = profile['InstanceProfileName']
# Get trust relationship
role['assume_role_policy'] = {}
role['assume_role_policy']['PolicyDocument'] = fetched_role.pop('AssumeRolePolicyDocument')
# Save role
self.roles[role['id']] = role | Parse a single IAM role and fetch additional data | Below is the the instruction that describes the task:
### Input:
Parse a single IAM role and fetch additional data
### Response:
def parse_roles(self, fetched_role, params):
"""
Parse a single IAM role and fetch additional data
"""
role = {}
role['instances_count'] = 'N/A'
# When resuming upon throttling error, skip if already fetched
if fetched_role['RoleName'] in self.roles:
return
api_client = params['api_client']
# Ensure consistent attribute names across resource types
role['id'] = fetched_role.pop('RoleId')
role['name'] = fetched_role.pop('RoleName')
role['arn'] = fetched_role.pop('Arn')
# Get other attributes
get_keys(fetched_role, role, [ 'CreateDate', 'Path'])
# Get role policies
policies = self.__get_inline_policies(api_client, 'role', role['id'], role['name'])
if len(policies):
role['inline_policies'] = policies
role['inline_policies_count'] = len(policies)
# Get instance profiles
profiles = handle_truncated_response(api_client.list_instance_profiles_for_role, {'RoleName': role['name']}, ['InstanceProfiles'])
manage_dictionary(role, 'instance_profiles', {})
for profile in profiles['InstanceProfiles']:
manage_dictionary(role['instance_profiles'], profile['InstanceProfileId'], {})
role['instance_profiles'][profile['InstanceProfileId']]['arn'] = profile['Arn']
role['instance_profiles'][profile['InstanceProfileId']]['name'] = profile['InstanceProfileName']
# Get trust relationship
role['assume_role_policy'] = {}
role['assume_role_policy']['PolicyDocument'] = fetched_role.pop('AssumeRolePolicyDocument')
# Save role
self.roles[role['id']] = role |
def init_input_obj(self):
"""Section 4 - Create uwg objects from input parameters
self.simTime # simulation time parameter obj
self.weather # weather obj for simulation time period
self.forcIP # Forcing obj
self.forc # Empty forcing obj
self.geoParam # geographic parameters obj
self.RSM # Rural site & vertical diffusion model obj
self.USM # Urban site & vertical diffusion model obj
self.UCM # Urban canopy model obj
self.UBL # Urban boundary layer model
self.road # urban road element
self.rural # rural road element
self.soilindex1 # soil index for urban rsoad depth
self.soilindex2 # soil index for rural road depth
self.Sch # list of Schedule objects
"""
climate_file_path = os.path.join(self.epwDir, self.epwFileName)
self.simTime = SimParam(self.dtSim, self.dtWeather, self.Month,
self.Day, self.nDay) # simulation time parametrs
# weather file data for simulation time period
self.weather = Weather(climate_file_path, self.simTime.timeInitial, self.simTime.timeFinal)
self.forcIP = Forcing(self.weather.staTemp, self.weather) # initialized Forcing class
self.forc = Forcing() # empty forcing class
# Initialize geographic Param and Urban Boundary Layer Objects
nightStart = 18. # arbitrary values for begin/end hour for night setpoint
nightEnd = 8.
maxdx = 250. # max dx (m)
self.geoParam = Param(self.h_ubl1, self.h_ubl2, self.h_ref, self.h_temp, self.h_wind, self.c_circ,
self.maxDay, self.maxNight, self.latTree, self.latGrss, self.albVeg, self.vegStart, self.vegEnd,
nightStart, nightEnd, self.windMin, self.WGMAX, self.c_exch, maxdx, self.G, self.CP, self.VK, self.R,
self.RV, self.LV, math.pi, self.SIGMA, self.WATERDENS, self.LVTT, self.TT, self.ESTT, self.CL,
self.CPV, self.B, self.CM, self.COLBURN)
self.UBL = UBLDef(
'C', self.charLength, self.weather.staTemp[0], maxdx, self.geoParam.dayBLHeight, self.geoParam.nightBLHeight)
# Defining road
emis = 0.93
asphalt = Material(self.kRoad, self.cRoad, 'asphalt')
road_T_init = 293.
road_horizontal = 1
# fraction of surface vegetation coverage
road_veg_coverage = min(self.vegCover/(1-self.bldDensity), 1.)
# define road layers
road_layer_num = int(math.ceil(self.d_road/0.05))
# 0.5/0.05 ~ 10 x 1 matrix of 0.05 thickness
thickness_vector = [0.05 for r in range(road_layer_num)]
material_vector = [asphalt for r in range(road_layer_num)]
self.road = Element(self.alb_road, emis, thickness_vector, material_vector, road_veg_coverage,
road_T_init, road_horizontal, name="urban_road")
self.rural = copy.deepcopy(self.road)
self.rural.vegCoverage = self.rurVegCover
self.rural._name = "rural_road"
# Reference site class (also include VDM)
self.RSM = RSMDef(self.lat, self.lon, self.GMT, self.h_obs,
self.weather.staTemp[0], self.weather.staPres[0], self.geoParam, self.z_meso_dir_path)
self.USM = RSMDef(self.lat, self.lon, self.GMT, self.bldHeight/10.,
self.weather.staTemp[0], self.weather.staPres[0], self.geoParam, self.z_meso_dir_path)
T_init = self.weather.staTemp[0]
H_init = self.weather.staHum[0]
self.UCM = UCMDef(self.bldHeight, self.bldDensity, self.verToHor, self.treeCoverage, self.sensAnth, self.latAnth, T_init, H_init,
self.weather.staUmod[0], self.geoParam, self.r_glaze_total, self.SHGC_total, self.alb_wall_total, self.road)
self.UCM.h_mix = self.h_mix
# Define Road Element & buffer to match ground temperature depth
roadMat, newthickness = procMat(self.road, self.MAXTHICKNESS, self.MINTHICKNESS)
for i in range(self.nSoil):
# if soil depth is greater then the thickness of the road
# we add new slices of soil at max thickness until road is greater or equal
is_soildepth_equal = self.is_near_zero(self.depth_soil[i][0] - sum(newthickness), 1e-15)
if is_soildepth_equal or (self.depth_soil[i][0] > sum(newthickness)):
while self.depth_soil[i][0] > sum(newthickness):
newthickness.append(self.MAXTHICKNESS)
roadMat.append(self.SOIL)
self.soilindex1 = i
break
self.road = Element(self.road.albedo, self.road.emissivity, newthickness, roadMat,
self.road.vegCoverage, self.road.layerTemp[0], self.road.horizontal, self.road._name)
# Define Rural Element
ruralMat, newthickness = procMat(self.rural, self.MAXTHICKNESS, self.MINTHICKNESS)
for i in range(self.nSoil):
# if soil depth is greater then the thickness of the road
# we add new slices of soil at max thickness until road is greater or equal
is_soildepth_equal = self.is_near_zero(self.depth_soil[i][0] - sum(newthickness), 1e-15)
if is_soildepth_equal or (self.depth_soil[i][0] > sum(newthickness)):
while self.depth_soil[i][0] > sum(newthickness):
newthickness.append(self.MAXTHICKNESS)
ruralMat.append(self.SOIL)
self.soilindex2 = i
break
self.rural = Element(self.rural.albedo, self.rural.emissivity, newthickness,
ruralMat, self.rural.vegCoverage, self.rural.layerTemp[0], self.rural.horizontal, self.rural._name) | Section 4 - Create uwg objects from input parameters
self.simTime # simulation time parameter obj
self.weather # weather obj for simulation time period
self.forcIP # Forcing obj
self.forc # Empty forcing obj
self.geoParam # geographic parameters obj
self.RSM # Rural site & vertical diffusion model obj
self.USM # Urban site & vertical diffusion model obj
self.UCM # Urban canopy model obj
self.UBL # Urban boundary layer model
self.road # urban road element
self.rural # rural road element
self.soilindex1 # soil index for urban rsoad depth
self.soilindex2 # soil index for rural road depth
self.Sch # list of Schedule objects | Below is the the instruction that describes the task:
### Input:
Section 4 - Create uwg objects from input parameters
self.simTime # simulation time parameter obj
self.weather # weather obj for simulation time period
self.forcIP # Forcing obj
self.forc # Empty forcing obj
self.geoParam # geographic parameters obj
self.RSM # Rural site & vertical diffusion model obj
self.USM # Urban site & vertical diffusion model obj
self.UCM # Urban canopy model obj
self.UBL # Urban boundary layer model
self.road # urban road element
self.rural # rural road element
self.soilindex1 # soil index for urban rsoad depth
self.soilindex2 # soil index for rural road depth
self.Sch # list of Schedule objects
### Response:
def init_input_obj(self):
"""Section 4 - Create uwg objects from input parameters
self.simTime # simulation time parameter obj
self.weather # weather obj for simulation time period
self.forcIP # Forcing obj
self.forc # Empty forcing obj
self.geoParam # geographic parameters obj
self.RSM # Rural site & vertical diffusion model obj
self.USM # Urban site & vertical diffusion model obj
self.UCM # Urban canopy model obj
self.UBL # Urban boundary layer model
self.road # urban road element
self.rural # rural road element
self.soilindex1 # soil index for urban rsoad depth
self.soilindex2 # soil index for rural road depth
self.Sch # list of Schedule objects
"""
climate_file_path = os.path.join(self.epwDir, self.epwFileName)
self.simTime = SimParam(self.dtSim, self.dtWeather, self.Month,
self.Day, self.nDay) # simulation time parametrs
# weather file data for simulation time period
self.weather = Weather(climate_file_path, self.simTime.timeInitial, self.simTime.timeFinal)
self.forcIP = Forcing(self.weather.staTemp, self.weather) # initialized Forcing class
self.forc = Forcing() # empty forcing class
# Initialize geographic Param and Urban Boundary Layer Objects
nightStart = 18. # arbitrary values for begin/end hour for night setpoint
nightEnd = 8.
maxdx = 250. # max dx (m)
self.geoParam = Param(self.h_ubl1, self.h_ubl2, self.h_ref, self.h_temp, self.h_wind, self.c_circ,
self.maxDay, self.maxNight, self.latTree, self.latGrss, self.albVeg, self.vegStart, self.vegEnd,
nightStart, nightEnd, self.windMin, self.WGMAX, self.c_exch, maxdx, self.G, self.CP, self.VK, self.R,
self.RV, self.LV, math.pi, self.SIGMA, self.WATERDENS, self.LVTT, self.TT, self.ESTT, self.CL,
self.CPV, self.B, self.CM, self.COLBURN)
self.UBL = UBLDef(
'C', self.charLength, self.weather.staTemp[0], maxdx, self.geoParam.dayBLHeight, self.geoParam.nightBLHeight)
# Defining road
emis = 0.93
asphalt = Material(self.kRoad, self.cRoad, 'asphalt')
road_T_init = 293.
road_horizontal = 1
# fraction of surface vegetation coverage
road_veg_coverage = min(self.vegCover/(1-self.bldDensity), 1.)
# define road layers
road_layer_num = int(math.ceil(self.d_road/0.05))
# 0.5/0.05 ~ 10 x 1 matrix of 0.05 thickness
thickness_vector = [0.05 for r in range(road_layer_num)]
material_vector = [asphalt for r in range(road_layer_num)]
self.road = Element(self.alb_road, emis, thickness_vector, material_vector, road_veg_coverage,
road_T_init, road_horizontal, name="urban_road")
self.rural = copy.deepcopy(self.road)
self.rural.vegCoverage = self.rurVegCover
self.rural._name = "rural_road"
# Reference site class (also include VDM)
self.RSM = RSMDef(self.lat, self.lon, self.GMT, self.h_obs,
self.weather.staTemp[0], self.weather.staPres[0], self.geoParam, self.z_meso_dir_path)
self.USM = RSMDef(self.lat, self.lon, self.GMT, self.bldHeight/10.,
self.weather.staTemp[0], self.weather.staPres[0], self.geoParam, self.z_meso_dir_path)
T_init = self.weather.staTemp[0]
H_init = self.weather.staHum[0]
self.UCM = UCMDef(self.bldHeight, self.bldDensity, self.verToHor, self.treeCoverage, self.sensAnth, self.latAnth, T_init, H_init,
self.weather.staUmod[0], self.geoParam, self.r_glaze_total, self.SHGC_total, self.alb_wall_total, self.road)
self.UCM.h_mix = self.h_mix
# Define Road Element & buffer to match ground temperature depth
roadMat, newthickness = procMat(self.road, self.MAXTHICKNESS, self.MINTHICKNESS)
for i in range(self.nSoil):
# if soil depth is greater then the thickness of the road
# we add new slices of soil at max thickness until road is greater or equal
is_soildepth_equal = self.is_near_zero(self.depth_soil[i][0] - sum(newthickness), 1e-15)
if is_soildepth_equal or (self.depth_soil[i][0] > sum(newthickness)):
while self.depth_soil[i][0] > sum(newthickness):
newthickness.append(self.MAXTHICKNESS)
roadMat.append(self.SOIL)
self.soilindex1 = i
break
self.road = Element(self.road.albedo, self.road.emissivity, newthickness, roadMat,
self.road.vegCoverage, self.road.layerTemp[0], self.road.horizontal, self.road._name)
# Define Rural Element
ruralMat, newthickness = procMat(self.rural, self.MAXTHICKNESS, self.MINTHICKNESS)
for i in range(self.nSoil):
# if soil depth is greater then the thickness of the road
# we add new slices of soil at max thickness until road is greater or equal
is_soildepth_equal = self.is_near_zero(self.depth_soil[i][0] - sum(newthickness), 1e-15)
if is_soildepth_equal or (self.depth_soil[i][0] > sum(newthickness)):
while self.depth_soil[i][0] > sum(newthickness):
newthickness.append(self.MAXTHICKNESS)
ruralMat.append(self.SOIL)
self.soilindex2 = i
break
self.rural = Element(self.rural.albedo, self.rural.emissivity, newthickness,
ruralMat, self.rural.vegCoverage, self.rural.layerTemp[0], self.rural.horizontal, self.rural._name) |
def interpolate_bilinear_lonlat(self, lon, lat, values):
"""
Interpolate values at specific longitudes/latitudes using bilinear interpolation
If a position does not have four neighbours, this currently returns NaN.
Parameters
----------
lon, lat : :class:`~astropy.units.Quantity`
The longitude and latitude values as :class:`~astropy.units.Quantity` instances
with angle units.
values : `~numpy.ndarray`
1-D array with the values in each HEALPix pixel. This must have a
length of the form 12 * nside ** 2 (and nside is determined
automatically from this).
Returns
-------
result : `~numpy.ndarray`
1-D array of interpolated values
"""
if len(values) != self.npix:
raise ValueError('values must be an array of length {0} (got {1})'.format(self.npix, len(values)))
return interpolate_bilinear_lonlat(lon, lat, values, order=self.order) | Interpolate values at specific longitudes/latitudes using bilinear interpolation
If a position does not have four neighbours, this currently returns NaN.
Parameters
----------
lon, lat : :class:`~astropy.units.Quantity`
The longitude and latitude values as :class:`~astropy.units.Quantity` instances
with angle units.
values : `~numpy.ndarray`
1-D array with the values in each HEALPix pixel. This must have a
length of the form 12 * nside ** 2 (and nside is determined
automatically from this).
Returns
-------
result : `~numpy.ndarray`
1-D array of interpolated values | Below is the the instruction that describes the task:
### Input:
Interpolate values at specific longitudes/latitudes using bilinear interpolation
If a position does not have four neighbours, this currently returns NaN.
Parameters
----------
lon, lat : :class:`~astropy.units.Quantity`
The longitude and latitude values as :class:`~astropy.units.Quantity` instances
with angle units.
values : `~numpy.ndarray`
1-D array with the values in each HEALPix pixel. This must have a
length of the form 12 * nside ** 2 (and nside is determined
automatically from this).
Returns
-------
result : `~numpy.ndarray`
1-D array of interpolated values
### Response:
def interpolate_bilinear_lonlat(self, lon, lat, values):
"""
Interpolate values at specific longitudes/latitudes using bilinear interpolation
If a position does not have four neighbours, this currently returns NaN.
Parameters
----------
lon, lat : :class:`~astropy.units.Quantity`
The longitude and latitude values as :class:`~astropy.units.Quantity` instances
with angle units.
values : `~numpy.ndarray`
1-D array with the values in each HEALPix pixel. This must have a
length of the form 12 * nside ** 2 (and nside is determined
automatically from this).
Returns
-------
result : `~numpy.ndarray`
1-D array of interpolated values
"""
if len(values) != self.npix:
raise ValueError('values must be an array of length {0} (got {1})'.format(self.npix, len(values)))
return interpolate_bilinear_lonlat(lon, lat, values, order=self.order) |
def xml_to_string(elem, qualified_name=None, public_id=None, system_id=None):
"""
Return a pretty-printed XML string for the Element.
Also allows setting a document type.
"""
from xml.dom import minidom
rough_string = ET.tostring(elem, 'utf-8')
reparsed = minidom.parseString(rough_string)
if qualified_name:
doctype = minidom.DOMImplementation().createDocumentType(
qualified_name, public_id, system_id)
reparsed.insertBefore(doctype, reparsed.documentElement)
return reparsed.toprettyxml(indent=" ") | Return a pretty-printed XML string for the Element.
Also allows setting a document type. | Below is the the instruction that describes the task:
### Input:
Return a pretty-printed XML string for the Element.
Also allows setting a document type.
### Response:
def xml_to_string(elem, qualified_name=None, public_id=None, system_id=None):
"""
Return a pretty-printed XML string for the Element.
Also allows setting a document type.
"""
from xml.dom import minidom
rough_string = ET.tostring(elem, 'utf-8')
reparsed = minidom.parseString(rough_string)
if qualified_name:
doctype = minidom.DOMImplementation().createDocumentType(
qualified_name, public_id, system_id)
reparsed.insertBefore(doctype, reparsed.documentElement)
return reparsed.toprettyxml(indent=" ") |
def from_dict(config):
'''
Instantiate a new ProxyConfig from a dictionary that represents a
client configuration, as described in `the documentation`_.
.. _the documentation:
https://docs.docker.com/network/proxy/#configure-the-docker-client
'''
return ProxyConfig(
http=config.get('httpProxy'),
https=config.get('httpsProxy'),
ftp=config.get('ftpProxy'),
no_proxy=config.get('noProxy'),
) | Instantiate a new ProxyConfig from a dictionary that represents a
client configuration, as described in `the documentation`_.
.. _the documentation:
https://docs.docker.com/network/proxy/#configure-the-docker-client | Below is the the instruction that describes the task:
### Input:
Instantiate a new ProxyConfig from a dictionary that represents a
client configuration, as described in `the documentation`_.
.. _the documentation:
https://docs.docker.com/network/proxy/#configure-the-docker-client
### Response:
def from_dict(config):
'''
Instantiate a new ProxyConfig from a dictionary that represents a
client configuration, as described in `the documentation`_.
.. _the documentation:
https://docs.docker.com/network/proxy/#configure-the-docker-client
'''
return ProxyConfig(
http=config.get('httpProxy'),
https=config.get('httpsProxy'),
ftp=config.get('ftpProxy'),
no_proxy=config.get('noProxy'),
) |
def compress(self, condition, axis=0, out=None):
"""Return selected slices of an array along given axis.
Parameters
----------
condition : array_like, bool
Array that selects which entries to return. N.B., if len(condition)
is less than the size of the given axis, then output is truncated to the length
of the condition array.
axis : int, optional
Axis along which to take slices. If None, work on the flattened array.
out : ndarray, optional
Output array. Its type is preserved and it must be of the right
shape to hold the output.
Returns
-------
out : HaplotypeArray
A copy of the array without the slices along axis for which `condition`
is false.
Examples
--------
>>> import allel
>>> h = allel.HaplotypeArray([[0, 0, 0, 1],
... [0, 1, 1, 1],
... [0, 2, -1, -1]], dtype='i1')
>>> h.compress([True, False, True], axis=0)
<HaplotypeArray shape=(2, 4) dtype=int8>
0 0 0 1
0 2 . .
>>> h.compress([True, False, True, False], axis=1)
<HaplotypeArray shape=(3, 2) dtype=int8>
0 0
0 1
0 .
"""
return compress_haplotype_array(self, condition, axis=axis, cls=type(self),
compress=np.compress, out=out) | Return selected slices of an array along given axis.
Parameters
----------
condition : array_like, bool
Array that selects which entries to return. N.B., if len(condition)
is less than the size of the given axis, then output is truncated to the length
of the condition array.
axis : int, optional
Axis along which to take slices. If None, work on the flattened array.
out : ndarray, optional
Output array. Its type is preserved and it must be of the right
shape to hold the output.
Returns
-------
out : HaplotypeArray
A copy of the array without the slices along axis for which `condition`
is false.
Examples
--------
>>> import allel
>>> h = allel.HaplotypeArray([[0, 0, 0, 1],
... [0, 1, 1, 1],
... [0, 2, -1, -1]], dtype='i1')
>>> h.compress([True, False, True], axis=0)
<HaplotypeArray shape=(2, 4) dtype=int8>
0 0 0 1
0 2 . .
>>> h.compress([True, False, True, False], axis=1)
<HaplotypeArray shape=(3, 2) dtype=int8>
0 0
0 1
0 . | Below is the the instruction that describes the task:
### Input:
Return selected slices of an array along given axis.
Parameters
----------
condition : array_like, bool
Array that selects which entries to return. N.B., if len(condition)
is less than the size of the given axis, then output is truncated to the length
of the condition array.
axis : int, optional
Axis along which to take slices. If None, work on the flattened array.
out : ndarray, optional
Output array. Its type is preserved and it must be of the right
shape to hold the output.
Returns
-------
out : HaplotypeArray
A copy of the array without the slices along axis for which `condition`
is false.
Examples
--------
>>> import allel
>>> h = allel.HaplotypeArray([[0, 0, 0, 1],
... [0, 1, 1, 1],
... [0, 2, -1, -1]], dtype='i1')
>>> h.compress([True, False, True], axis=0)
<HaplotypeArray shape=(2, 4) dtype=int8>
0 0 0 1
0 2 . .
>>> h.compress([True, False, True, False], axis=1)
<HaplotypeArray shape=(3, 2) dtype=int8>
0 0
0 1
0 .
### Response:
def compress(self, condition, axis=0, out=None):
"""Return selected slices of an array along given axis.
Parameters
----------
condition : array_like, bool
Array that selects which entries to return. N.B., if len(condition)
is less than the size of the given axis, then output is truncated to the length
of the condition array.
axis : int, optional
Axis along which to take slices. If None, work on the flattened array.
out : ndarray, optional
Output array. Its type is preserved and it must be of the right
shape to hold the output.
Returns
-------
out : HaplotypeArray
A copy of the array without the slices along axis for which `condition`
is false.
Examples
--------
>>> import allel
>>> h = allel.HaplotypeArray([[0, 0, 0, 1],
... [0, 1, 1, 1],
... [0, 2, -1, -1]], dtype='i1')
>>> h.compress([True, False, True], axis=0)
<HaplotypeArray shape=(2, 4) dtype=int8>
0 0 0 1
0 2 . .
>>> h.compress([True, False, True, False], axis=1)
<HaplotypeArray shape=(3, 2) dtype=int8>
0 0
0 1
0 .
"""
return compress_haplotype_array(self, condition, axis=axis, cls=type(self),
compress=np.compress, out=out) |
def normalize(self, bias_range=1, poly_range=None, ignored_terms=None):
"""Normalizes the biases of the binary polynomial such that they fall in
the provided range(s).
If `poly_range` is provided, then `bias_range` will be treated as
the range for the linear biases and `poly_range` will be used for
the range of the other biases.
Args:
bias_range (number/pair):
Value/range by which to normalize the all the biases, or if
`poly_range` is provided, just the linear biases.
poly_range (number/pair, optional):
Value/range by which to normalize the higher order biases.
ignored_terms (iterable, optional):
Biases associated with these terms are not scaled.
"""
def parse_range(r):
if isinstance(r, Number):
return -abs(r), abs(r)
return r
if ignored_terms is None:
ignored_terms = set()
else:
ignored_terms = {asfrozenset(term) for term in ignored_terms}
if poly_range is None:
linear_range, poly_range = bias_range, bias_range
else:
linear_range = bias_range
lin_range, poly_range = map(parse_range, (linear_range, poly_range))
# determine the current ranges for linear, higherorder
lmin = lmax = 0
pmin = pmax = 0
for term, bias in self.items():
if term in ignored_terms:
# we don't use the ignored terms to calculate the scaling
continue
if len(term) == 1:
lmin = min(bias, lmin)
lmax = max(bias, lmax)
elif len(term) > 1:
pmin = min(bias, pmin)
pmax = max(bias, pmax)
inv_scalar = max(lmin / lin_range[0], lmax / lin_range[1],
pmin / poly_range[0], pmax / poly_range[1])
if inv_scalar != 0:
self.scale(1 / inv_scalar, ignored_terms=ignored_terms) | Normalizes the biases of the binary polynomial such that they fall in
the provided range(s).
If `poly_range` is provided, then `bias_range` will be treated as
the range for the linear biases and `poly_range` will be used for
the range of the other biases.
Args:
bias_range (number/pair):
Value/range by which to normalize the all the biases, or if
`poly_range` is provided, just the linear biases.
poly_range (number/pair, optional):
Value/range by which to normalize the higher order biases.
ignored_terms (iterable, optional):
Biases associated with these terms are not scaled. | Below is the the instruction that describes the task:
### Input:
Normalizes the biases of the binary polynomial such that they fall in
the provided range(s).
If `poly_range` is provided, then `bias_range` will be treated as
the range for the linear biases and `poly_range` will be used for
the range of the other biases.
Args:
bias_range (number/pair):
Value/range by which to normalize the all the biases, or if
`poly_range` is provided, just the linear biases.
poly_range (number/pair, optional):
Value/range by which to normalize the higher order biases.
ignored_terms (iterable, optional):
Biases associated with these terms are not scaled.
### Response:
def normalize(self, bias_range=1, poly_range=None, ignored_terms=None):
"""Normalizes the biases of the binary polynomial such that they fall in
the provided range(s).
If `poly_range` is provided, then `bias_range` will be treated as
the range for the linear biases and `poly_range` will be used for
the range of the other biases.
Args:
bias_range (number/pair):
Value/range by which to normalize the all the biases, or if
`poly_range` is provided, just the linear biases.
poly_range (number/pair, optional):
Value/range by which to normalize the higher order biases.
ignored_terms (iterable, optional):
Biases associated with these terms are not scaled.
"""
def parse_range(r):
if isinstance(r, Number):
return -abs(r), abs(r)
return r
if ignored_terms is None:
ignored_terms = set()
else:
ignored_terms = {asfrozenset(term) for term in ignored_terms}
if poly_range is None:
linear_range, poly_range = bias_range, bias_range
else:
linear_range = bias_range
lin_range, poly_range = map(parse_range, (linear_range, poly_range))
# determine the current ranges for linear, higherorder
lmin = lmax = 0
pmin = pmax = 0
for term, bias in self.items():
if term in ignored_terms:
# we don't use the ignored terms to calculate the scaling
continue
if len(term) == 1:
lmin = min(bias, lmin)
lmax = max(bias, lmax)
elif len(term) > 1:
pmin = min(bias, pmin)
pmax = max(bias, pmax)
inv_scalar = max(lmin / lin_range[0], lmax / lin_range[1],
pmin / poly_range[0], pmax / poly_range[1])
if inv_scalar != 0:
self.scale(1 / inv_scalar, ignored_terms=ignored_terms) |
def readcols(infile, cols=[0, 1, 2, 3], hms=False):
"""
Read the columns from an ASCII file as numpy arrays.
Parameters
----------
infile : str
Filename of ASCII file with array data as columns.
cols : list of int
List of 0-indexed column numbers for columns to be turned into numpy arrays
(DEFAULT- [0,1,2,3]).
Returns
-------
outarr : list of numpy arrays
Simple list of numpy arrays in the order as specifed in the 'cols' parameter.
"""
fin = open(infile,'r')
outarr = []
for l in fin.readlines():
l = l.strip()
if len(l) == 0 or len(l.split()) < len(cols) or (len(l) > 0 and l[0] == '#' or (l.find("INDEF") > -1)): continue
for i in range(10):
lnew = l.replace(" "," ")
if lnew == l: break
else: l = lnew
lspl = lnew.split(" ")
if len(outarr) == 0:
for c in range(len(cols)): outarr.append([])
for c,n in zip(cols,list(range(len(cols)))):
if not hms:
val = float(lspl[c])
else:
val = lspl[c]
outarr[n].append(val)
fin.close()
for n in range(len(cols)):
outarr[n] = np.array(outarr[n])
return outarr | Read the columns from an ASCII file as numpy arrays.
Parameters
----------
infile : str
Filename of ASCII file with array data as columns.
cols : list of int
List of 0-indexed column numbers for columns to be turned into numpy arrays
(DEFAULT- [0,1,2,3]).
Returns
-------
outarr : list of numpy arrays
Simple list of numpy arrays in the order as specifed in the 'cols' parameter. | Below is the the instruction that describes the task:
### Input:
Read the columns from an ASCII file as numpy arrays.
Parameters
----------
infile : str
Filename of ASCII file with array data as columns.
cols : list of int
List of 0-indexed column numbers for columns to be turned into numpy arrays
(DEFAULT- [0,1,2,3]).
Returns
-------
outarr : list of numpy arrays
Simple list of numpy arrays in the order as specifed in the 'cols' parameter.
### Response:
def readcols(infile, cols=[0, 1, 2, 3], hms=False):
"""
Read the columns from an ASCII file as numpy arrays.
Parameters
----------
infile : str
Filename of ASCII file with array data as columns.
cols : list of int
List of 0-indexed column numbers for columns to be turned into numpy arrays
(DEFAULT- [0,1,2,3]).
Returns
-------
outarr : list of numpy arrays
Simple list of numpy arrays in the order as specifed in the 'cols' parameter.
"""
fin = open(infile,'r')
outarr = []
for l in fin.readlines():
l = l.strip()
if len(l) == 0 or len(l.split()) < len(cols) or (len(l) > 0 and l[0] == '#' or (l.find("INDEF") > -1)): continue
for i in range(10):
lnew = l.replace(" "," ")
if lnew == l: break
else: l = lnew
lspl = lnew.split(" ")
if len(outarr) == 0:
for c in range(len(cols)): outarr.append([])
for c,n in zip(cols,list(range(len(cols)))):
if not hms:
val = float(lspl[c])
else:
val = lspl[c]
outarr[n].append(val)
fin.close()
for n in range(len(cols)):
outarr[n] = np.array(outarr[n])
return outarr |
def del_application(self, application, sync=True):
"""
delete application from this company
:param application: the subnet to be deleted from this company
:param sync: If sync=True(default) synchronize with Ariane server. If sync=False,
add the application object on list to be removed on next save().
:return:
"""
LOGGER.debug("Company.del_application")
if not sync:
self.applications_2_rm.append(application)
else:
if application.id is None:
application.sync()
if self.id is not None and application.id is not None:
params = {
'id': self.id,
'applicationID': application.id
}
args = {'http_operation': 'GET', 'operation_path': 'update/applications/delete', 'parameters': params}
response = CompanyService.requester.call(args)
if response.rc != 0:
LOGGER.warning(
'Company.del_application - Problem while updating company ' + self.name +
'. Reason: ' + str(response.response_content) + '-' + str(response.error_message) +
" (" + str(response.rc) + ")"
)
else:
self.applications_ids.remove(application.id)
application.sync()
else:
LOGGER.warning(
'Company.del_application - Problem while updating company ' + self.name + '. Reason: application ' +
application.name + ' id is None or self.id is None'
) | delete application from this company
:param application: the subnet to be deleted from this company
:param sync: If sync=True(default) synchronize with Ariane server. If sync=False,
add the application object on list to be removed on next save().
:return: | Below is the the instruction that describes the task:
### Input:
delete application from this company
:param application: the subnet to be deleted from this company
:param sync: If sync=True(default) synchronize with Ariane server. If sync=False,
add the application object on list to be removed on next save().
:return:
### Response:
def del_application(self, application, sync=True):
"""
delete application from this company
:param application: the subnet to be deleted from this company
:param sync: If sync=True(default) synchronize with Ariane server. If sync=False,
add the application object on list to be removed on next save().
:return:
"""
LOGGER.debug("Company.del_application")
if not sync:
self.applications_2_rm.append(application)
else:
if application.id is None:
application.sync()
if self.id is not None and application.id is not None:
params = {
'id': self.id,
'applicationID': application.id
}
args = {'http_operation': 'GET', 'operation_path': 'update/applications/delete', 'parameters': params}
response = CompanyService.requester.call(args)
if response.rc != 0:
LOGGER.warning(
'Company.del_application - Problem while updating company ' + self.name +
'. Reason: ' + str(response.response_content) + '-' + str(response.error_message) +
" (" + str(response.rc) + ")"
)
else:
self.applications_ids.remove(application.id)
application.sync()
else:
LOGGER.warning(
'Company.del_application - Problem while updating company ' + self.name + '. Reason: application ' +
application.name + ' id is None or self.id is None'
) |
def start(self, on_add, on_update, on_delete):
"""
Starts monitoring the file path, passing along on_(add|update|delete)
callbacks to a watchdog observer.
Iterates over the files in the target path before starting the observer
and calls the on_created callback before starting the observer, so
that existing files aren't missed.
"""
handler = ConfigFileChangeHandler(
self.target_class, on_add, on_update, on_delete
)
for file_name in os.listdir(self.file_path):
if os.path.isdir(os.path.join(self.file_path, file_name)):
continue
if (
not self.target_class.config_subdirectory and
not (
file_name.endswith(".yaml") or file_name.endswith(".yml")
)
):
continue
handler.on_created(
events.FileCreatedEvent(
os.path.join(self.file_path, file_name)
)
)
observer = observers.Observer()
observer.schedule(handler, self.file_path)
observer.start()
return observer | Starts monitoring the file path, passing along on_(add|update|delete)
callbacks to a watchdog observer.
Iterates over the files in the target path before starting the observer
and calls the on_created callback before starting the observer, so
that existing files aren't missed. | Below is the the instruction that describes the task:
### Input:
Starts monitoring the file path, passing along on_(add|update|delete)
callbacks to a watchdog observer.
Iterates over the files in the target path before starting the observer
and calls the on_created callback before starting the observer, so
that existing files aren't missed.
### Response:
def start(self, on_add, on_update, on_delete):
"""
Starts monitoring the file path, passing along on_(add|update|delete)
callbacks to a watchdog observer.
Iterates over the files in the target path before starting the observer
and calls the on_created callback before starting the observer, so
that existing files aren't missed.
"""
handler = ConfigFileChangeHandler(
self.target_class, on_add, on_update, on_delete
)
for file_name in os.listdir(self.file_path):
if os.path.isdir(os.path.join(self.file_path, file_name)):
continue
if (
not self.target_class.config_subdirectory and
not (
file_name.endswith(".yaml") or file_name.endswith(".yml")
)
):
continue
handler.on_created(
events.FileCreatedEvent(
os.path.join(self.file_path, file_name)
)
)
observer = observers.Observer()
observer.schedule(handler, self.file_path)
observer.start()
return observer |
def __process_by_python(self):
"""!
@brief Performs processing using python code.
"""
self.__scores = {}
for k in range(self.__kmin, self.__kmax):
clusters = self.__calculate_clusters(k)
if len(clusters) != k:
self.__scores[k] = float('nan')
continue
score = silhouette(self.__data, clusters).process().get_score()
self.__scores[k] = sum(score) / len(score)
if self.__scores[k] > self.__score:
self.__score = self.__scores[k]
self.__amount = k | !
@brief Performs processing using python code. | Below is the the instruction that describes the task:
### Input:
!
@brief Performs processing using python code.
### Response:
def __process_by_python(self):
"""!
@brief Performs processing using python code.
"""
self.__scores = {}
for k in range(self.__kmin, self.__kmax):
clusters = self.__calculate_clusters(k)
if len(clusters) != k:
self.__scores[k] = float('nan')
continue
score = silhouette(self.__data, clusters).process().get_score()
self.__scores[k] = sum(score) / len(score)
if self.__scores[k] > self.__score:
self.__score = self.__scores[k]
self.__amount = k |
def _call(self, x):
"""Return wavelet transform of ``x``."""
if self.impl == 'pywt':
coeffs = pywt.wavedecn(
x, wavelet=self.pywt_wavelet, level=self.nlevels,
mode=self.pywt_pad_mode, axes=self.axes)
return pywt.ravel_coeffs(coeffs, axes=self.axes)[0]
else:
raise RuntimeError("bad `impl` '{}'".format(self.impl)) | Return wavelet transform of ``x``. | Below is the the instruction that describes the task:
### Input:
Return wavelet transform of ``x``.
### Response:
def _call(self, x):
"""Return wavelet transform of ``x``."""
if self.impl == 'pywt':
coeffs = pywt.wavedecn(
x, wavelet=self.pywt_wavelet, level=self.nlevels,
mode=self.pywt_pad_mode, axes=self.axes)
return pywt.ravel_coeffs(coeffs, axes=self.axes)[0]
else:
raise RuntimeError("bad `impl` '{}'".format(self.impl)) |
def run(self, arguments=None, get_unknowns=False):
"""
Init point to execute the script.
If `arguments` string is given, will evaluate the arguments, else
evaluates sys.argv. Any inheriting class should extend the run method
(but first calling BaseCmdLineTool.run(self)).
"""
# redirect PIPE signal to quiet kill script, if not on Windows
if os.name != 'nt':
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
if get_unknowns:
if arguments:
self.args, self.unknown_args = (self.argparser.parse_known_args
(args=arguments.split()))
else:
(self.args,
self.unknown_args) = self.argparser.parse_known_args()
self.args = vars(self.args)
else:
if arguments:
myargs = arguments.split()
self.args = vars(self.argparser.parse_args
(args=myargs))
else:
self.args = vars(self.argparser.parse_args())
self.progress_bar_enabled = (not (self.args['no_progressbar'] or
self.is_stdin)) | Init point to execute the script.
If `arguments` string is given, will evaluate the arguments, else
evaluates sys.argv. Any inheriting class should extend the run method
(but first calling BaseCmdLineTool.run(self)). | Below is the the instruction that describes the task:
### Input:
Init point to execute the script.
If `arguments` string is given, will evaluate the arguments, else
evaluates sys.argv. Any inheriting class should extend the run method
(but first calling BaseCmdLineTool.run(self)).
### Response:
def run(self, arguments=None, get_unknowns=False):
"""
Init point to execute the script.
If `arguments` string is given, will evaluate the arguments, else
evaluates sys.argv. Any inheriting class should extend the run method
(but first calling BaseCmdLineTool.run(self)).
"""
# redirect PIPE signal to quiet kill script, if not on Windows
if os.name != 'nt':
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
if get_unknowns:
if arguments:
self.args, self.unknown_args = (self.argparser.parse_known_args
(args=arguments.split()))
else:
(self.args,
self.unknown_args) = self.argparser.parse_known_args()
self.args = vars(self.args)
else:
if arguments:
myargs = arguments.split()
self.args = vars(self.argparser.parse_args
(args=myargs))
else:
self.args = vars(self.argparser.parse_args())
self.progress_bar_enabled = (not (self.args['no_progressbar'] or
self.is_stdin)) |
def close(self):
"""Close the cursor"""
if self.closed or self.connection.closed:
return
self._cursor.close()
self.closed = True | Close the cursor | Below is the the instruction that describes the task:
### Input:
Close the cursor
### Response:
def close(self):
"""Close the cursor"""
if self.closed or self.connection.closed:
return
self._cursor.close()
self.closed = True |
def _processSegmentUpdates(self, activeColumns):
"""
Go through the list of accumulated segment updates and process them
as follows:
if the segment update is too old, remove the update
else if the cell received bottom-up, update its permanences
else if it's still being predicted, leave it in the queue
else remove it.
:param activeColumns TODO: document
"""
# The segmentUpdates dict has keys which are the column,cellIdx of the
# owner cell. The values are lists of segment updates for that cell
removeKeys = []
trimSegments = []
for key, updateList in self.segmentUpdates.iteritems():
# Get the column number and cell index of the owner cell
c, i = key[0], key[1]
# If the cell received bottom-up, update its segments
if c in activeColumns:
action = 'update'
# If not, either keep it around if it's still predicted, or remove it
else:
# If it is still predicted, and we are pooling, keep it around
if self.doPooling and self.lrnPredictedState['t'][c, i] == 1:
action = 'keep'
else:
action = 'remove'
# Process each segment for this cell. Each segment entry contains
# [creationDate, SegmentInfo]
updateListKeep = []
if action != 'remove':
for (createDate, segUpdate) in updateList:
if self.verbosity >= 4:
print "_nLrnIterations =", self.lrnIterationIdx,
print segUpdate
# If this segment has expired. Ignore this update (and hence remove it
# from list)
if self.lrnIterationIdx - createDate > self.segUpdateValidDuration:
continue
if action == 'update':
trimSegment = self._adaptSegment(segUpdate)
if trimSegment:
trimSegments.append((segUpdate.columnIdx, segUpdate.cellIdx,
segUpdate.segment))
else:
# Keep segments that haven't expired yet (the cell is still being
# predicted)
updateListKeep.append((createDate, segUpdate))
self.segmentUpdates[key] = updateListKeep
if len(updateListKeep) == 0:
removeKeys.append(key)
# Clean out empty segment updates
for key in removeKeys:
self.segmentUpdates.pop(key)
# Trim segments that had synapses go to 0
for (c, i, segment) in trimSegments:
self._trimSegmentsInCell(c, i, [segment], minPermanence = 0.00001,
minNumSyns = 0) | Go through the list of accumulated segment updates and process them
as follows:
if the segment update is too old, remove the update
else if the cell received bottom-up, update its permanences
else if it's still being predicted, leave it in the queue
else remove it.
:param activeColumns TODO: document | Below is the the instruction that describes the task:
### Input:
Go through the list of accumulated segment updates and process them
as follows:
if the segment update is too old, remove the update
else if the cell received bottom-up, update its permanences
else if it's still being predicted, leave it in the queue
else remove it.
:param activeColumns TODO: document
### Response:
def _processSegmentUpdates(self, activeColumns):
"""
Go through the list of accumulated segment updates and process them
as follows:
if the segment update is too old, remove the update
else if the cell received bottom-up, update its permanences
else if it's still being predicted, leave it in the queue
else remove it.
:param activeColumns TODO: document
"""
# The segmentUpdates dict has keys which are the column,cellIdx of the
# owner cell. The values are lists of segment updates for that cell
removeKeys = []
trimSegments = []
for key, updateList in self.segmentUpdates.iteritems():
# Get the column number and cell index of the owner cell
c, i = key[0], key[1]
# If the cell received bottom-up, update its segments
if c in activeColumns:
action = 'update'
# If not, either keep it around if it's still predicted, or remove it
else:
# If it is still predicted, and we are pooling, keep it around
if self.doPooling and self.lrnPredictedState['t'][c, i] == 1:
action = 'keep'
else:
action = 'remove'
# Process each segment for this cell. Each segment entry contains
# [creationDate, SegmentInfo]
updateListKeep = []
if action != 'remove':
for (createDate, segUpdate) in updateList:
if self.verbosity >= 4:
print "_nLrnIterations =", self.lrnIterationIdx,
print segUpdate
# If this segment has expired. Ignore this update (and hence remove it
# from list)
if self.lrnIterationIdx - createDate > self.segUpdateValidDuration:
continue
if action == 'update':
trimSegment = self._adaptSegment(segUpdate)
if trimSegment:
trimSegments.append((segUpdate.columnIdx, segUpdate.cellIdx,
segUpdate.segment))
else:
# Keep segments that haven't expired yet (the cell is still being
# predicted)
updateListKeep.append((createDate, segUpdate))
self.segmentUpdates[key] = updateListKeep
if len(updateListKeep) == 0:
removeKeys.append(key)
# Clean out empty segment updates
for key in removeKeys:
self.segmentUpdates.pop(key)
# Trim segments that had synapses go to 0
for (c, i, segment) in trimSegments:
self._trimSegmentsInCell(c, i, [segment], minPermanence = 0.00001,
minNumSyns = 0) |
def heartbeat(self):
"""Heartbeat request to keep session alive.
"""
unique_id = self.new_unique_id()
message = {
'op': 'heartbeat',
'id': unique_id,
}
self._send(message)
return unique_id | Heartbeat request to keep session alive. | Below is the the instruction that describes the task:
### Input:
Heartbeat request to keep session alive.
### Response:
def heartbeat(self):
"""Heartbeat request to keep session alive.
"""
unique_id = self.new_unique_id()
message = {
'op': 'heartbeat',
'id': unique_id,
}
self._send(message)
return unique_id |
def get_value_from_user(sc):
"""
Prompts the user for a value for the symbol or choice 'sc'. For
bool/tristate symbols and choices, provides a list of all the assignable
values.
"""
if not sc.visibility:
print(sc.name + " is not currently visible")
return False
prompt = "Value for {}".format(sc.name)
if sc.type in (BOOL, TRISTATE):
prompt += " (available: {})" \
.format(", ".join(TRI_TO_STR[val] for val in sc.assignable))
prompt += ": "
val = input(prompt)
# Automatically add a "0x" prefix for hex symbols, like the menuconfig
# interface does. This isn't done when loading .config files, hence why
# set_value() doesn't do it automatically.
if sc.type == HEX and not val.startswith(("0x", "0X")):
val = "0x" + val
# Let Kconfiglib itself print a warning here if the value is invalid. We
# could also disable warnings temporarily with
# kconf.disable_warnings() / kconf.enable_warnings() and print our own
# warning.
return sc.set_value(val) | Prompts the user for a value for the symbol or choice 'sc'. For
bool/tristate symbols and choices, provides a list of all the assignable
values. | Below is the the instruction that describes the task:
### Input:
Prompts the user for a value for the symbol or choice 'sc'. For
bool/tristate symbols and choices, provides a list of all the assignable
values.
### Response:
def get_value_from_user(sc):
"""
Prompts the user for a value for the symbol or choice 'sc'. For
bool/tristate symbols and choices, provides a list of all the assignable
values.
"""
if not sc.visibility:
print(sc.name + " is not currently visible")
return False
prompt = "Value for {}".format(sc.name)
if sc.type in (BOOL, TRISTATE):
prompt += " (available: {})" \
.format(", ".join(TRI_TO_STR[val] for val in sc.assignable))
prompt += ": "
val = input(prompt)
# Automatically add a "0x" prefix for hex symbols, like the menuconfig
# interface does. This isn't done when loading .config files, hence why
# set_value() doesn't do it automatically.
if sc.type == HEX and not val.startswith(("0x", "0X")):
val = "0x" + val
# Let Kconfiglib itself print a warning here if the value is invalid. We
# could also disable warnings temporarily with
# kconf.disable_warnings() / kconf.enable_warnings() and print our own
# warning.
return sc.set_value(val) |
def subsample(self, key, order='random', auto_resize=False, ID=None):
"""
Allows arbitrary slicing (subsampling) of the data.
.. note::
When using order='random', the sampling is random
for each of the measurements in the collection.
Parameters
----------
{FCMeasurement_subsample_parameters}
Returns
-------
FCCollection or a subclass
new collection of subsampled event data.
"""
def func(well):
return well.subsample(key=key, order=order, auto_resize=auto_resize)
return self.apply(func, output_format='collection', ID=ID) | Allows arbitrary slicing (subsampling) of the data.
.. note::
When using order='random', the sampling is random
for each of the measurements in the collection.
Parameters
----------
{FCMeasurement_subsample_parameters}
Returns
-------
FCCollection or a subclass
new collection of subsampled event data. | Below is the the instruction that describes the task:
### Input:
Allows arbitrary slicing (subsampling) of the data.
.. note::
When using order='random', the sampling is random
for each of the measurements in the collection.
Parameters
----------
{FCMeasurement_subsample_parameters}
Returns
-------
FCCollection or a subclass
new collection of subsampled event data.
### Response:
def subsample(self, key, order='random', auto_resize=False, ID=None):
"""
Allows arbitrary slicing (subsampling) of the data.
.. note::
When using order='random', the sampling is random
for each of the measurements in the collection.
Parameters
----------
{FCMeasurement_subsample_parameters}
Returns
-------
FCCollection or a subclass
new collection of subsampled event data.
"""
def func(well):
return well.subsample(key=key, order=order, auto_resize=auto_resize)
return self.apply(func, output_format='collection', ID=ID) |
def run_forever(self) -> None:
'''Execute the tasky/asyncio event loop until terminated.'''
Log.debug('running event loop until terminated')
asyncio.ensure_future(self.init())
self.loop.run_forever()
self.loop.close() | Execute the tasky/asyncio event loop until terminated. | Below is the the instruction that describes the task:
### Input:
Execute the tasky/asyncio event loop until terminated.
### Response:
def run_forever(self) -> None:
'''Execute the tasky/asyncio event loop until terminated.'''
Log.debug('running event loop until terminated')
asyncio.ensure_future(self.init())
self.loop.run_forever()
self.loop.close() |
def vi_score(self, x, index):
""" Wrapper function for selecting appropriate score
Parameters
----------
x : float
A random variable
index : int
0 or 1 depending on which latent variable
Returns
----------
The gradient of the scale latent variable at x
"""
if index == 0:
return self.vi_loc_score(x)
elif index == 1:
return self.vi_scale_score(x) | Wrapper function for selecting appropriate score
Parameters
----------
x : float
A random variable
index : int
0 or 1 depending on which latent variable
Returns
----------
The gradient of the scale latent variable at x | Below is the the instruction that describes the task:
### Input:
Wrapper function for selecting appropriate score
Parameters
----------
x : float
A random variable
index : int
0 or 1 depending on which latent variable
Returns
----------
The gradient of the scale latent variable at x
### Response:
def vi_score(self, x, index):
""" Wrapper function for selecting appropriate score
Parameters
----------
x : float
A random variable
index : int
0 or 1 depending on which latent variable
Returns
----------
The gradient of the scale latent variable at x
"""
if index == 0:
return self.vi_loc_score(x)
elif index == 1:
return self.vi_scale_score(x) |
def wait_for(self, job_id, base_path=None, interval=5):
"""
Wait for an asynchronous task to finish.
Unlike the thin methods elsewhere on this object, this one is actually
specific to how the Luminoso API works. This will poll an API
endpoint to find out the status of the job numbered `job_id`,
repeating every 5 seconds (by default) until the job is done. When
the job is done, it will return an object representing the result of
that job.
In the Luminoso API, requests that may take a long time return a
job ID instead of a result, so that your code can continue running
in the meantime. When it needs the job to be done to proceed, it can
use this method to wait.
The base URL where it looks for that job is by default `jobs/id/`
under the current URL, assuming that this LuminosoClient's URL
represents a project. You can specify a different URL by changing
`base_path`.
If the job failed, will raise a LuminosoError with the job status
as its message.
"""
if base_path is None:
base_path = 'jobs/id'
path = '%s%d' % (ensure_trailing_slash(base_path), job_id)
start = time.time()
next_log = 0
while True:
response = self.get(path)
if response['stop_time']:
if response['success']:
return response
else:
raise LuminosoError(response)
elapsed = time.time() - start
if elapsed > next_log:
logger.info('Still waiting (%d seconds elapsed).', next_log)
next_log += 120
time.sleep(interval) | Wait for an asynchronous task to finish.
Unlike the thin methods elsewhere on this object, this one is actually
specific to how the Luminoso API works. This will poll an API
endpoint to find out the status of the job numbered `job_id`,
repeating every 5 seconds (by default) until the job is done. When
the job is done, it will return an object representing the result of
that job.
In the Luminoso API, requests that may take a long time return a
job ID instead of a result, so that your code can continue running
in the meantime. When it needs the job to be done to proceed, it can
use this method to wait.
The base URL where it looks for that job is by default `jobs/id/`
under the current URL, assuming that this LuminosoClient's URL
represents a project. You can specify a different URL by changing
`base_path`.
If the job failed, will raise a LuminosoError with the job status
as its message. | Below is the the instruction that describes the task:
### Input:
Wait for an asynchronous task to finish.
Unlike the thin methods elsewhere on this object, this one is actually
specific to how the Luminoso API works. This will poll an API
endpoint to find out the status of the job numbered `job_id`,
repeating every 5 seconds (by default) until the job is done. When
the job is done, it will return an object representing the result of
that job.
In the Luminoso API, requests that may take a long time return a
job ID instead of a result, so that your code can continue running
in the meantime. When it needs the job to be done to proceed, it can
use this method to wait.
The base URL where it looks for that job is by default `jobs/id/`
under the current URL, assuming that this LuminosoClient's URL
represents a project. You can specify a different URL by changing
`base_path`.
If the job failed, will raise a LuminosoError with the job status
as its message.
### Response:
def wait_for(self, job_id, base_path=None, interval=5):
"""
Wait for an asynchronous task to finish.
Unlike the thin methods elsewhere on this object, this one is actually
specific to how the Luminoso API works. This will poll an API
endpoint to find out the status of the job numbered `job_id`,
repeating every 5 seconds (by default) until the job is done. When
the job is done, it will return an object representing the result of
that job.
In the Luminoso API, requests that may take a long time return a
job ID instead of a result, so that your code can continue running
in the meantime. When it needs the job to be done to proceed, it can
use this method to wait.
The base URL where it looks for that job is by default `jobs/id/`
under the current URL, assuming that this LuminosoClient's URL
represents a project. You can specify a different URL by changing
`base_path`.
If the job failed, will raise a LuminosoError with the job status
as its message.
"""
if base_path is None:
base_path = 'jobs/id'
path = '%s%d' % (ensure_trailing_slash(base_path), job_id)
start = time.time()
next_log = 0
while True:
response = self.get(path)
if response['stop_time']:
if response['success']:
return response
else:
raise LuminosoError(response)
elapsed = time.time() - start
if elapsed > next_log:
logger.info('Still waiting (%d seconds elapsed).', next_log)
next_log += 120
time.sleep(interval) |
def create_route(self, uri, sub_service):
"""Create the route for the URI.
:param uri: string - URI to be routed
:param sub_service: boolean - is the URI for a sub-service
:returns: n/a
"""
if uri not in self.routes.keys():
logger.debug('Service ({0}): Creating routes'
.format(self.name))
self.routes[uri] = {
'regex': StackInABoxService.get_service_regex(self.base_url,
uri,
sub_service),
'uri': uri,
'handlers': StackInABoxServiceRouter(self.name,
uri,
None,
self)
} | Create the route for the URI.
:param uri: string - URI to be routed
:param sub_service: boolean - is the URI for a sub-service
:returns: n/a | Below is the the instruction that describes the task:
### Input:
Create the route for the URI.
:param uri: string - URI to be routed
:param sub_service: boolean - is the URI for a sub-service
:returns: n/a
### Response:
def create_route(self, uri, sub_service):
"""Create the route for the URI.
:param uri: string - URI to be routed
:param sub_service: boolean - is the URI for a sub-service
:returns: n/a
"""
if uri not in self.routes.keys():
logger.debug('Service ({0}): Creating routes'
.format(self.name))
self.routes[uri] = {
'regex': StackInABoxService.get_service_regex(self.base_url,
uri,
sub_service),
'uri': uri,
'handlers': StackInABoxServiceRouter(self.name,
uri,
None,
self)
} |
def _initialize_attributes(model_class, name, bases, attrs):
"""Initialize the attributes of the model."""
model_class._attributes = {}
for k, v in attrs.iteritems():
if isinstance(v, Attribute):
model_class._attributes[k] = v
v.name = v.name or k | Initialize the attributes of the model. | Below is the the instruction that describes the task:
### Input:
Initialize the attributes of the model.
### Response:
def _initialize_attributes(model_class, name, bases, attrs):
"""Initialize the attributes of the model."""
model_class._attributes = {}
for k, v in attrs.iteritems():
if isinstance(v, Attribute):
model_class._attributes[k] = v
v.name = v.name or k |
def process_response(self, request, response):
""" Sets the cache and deals with caching headers if needed
"""
if not self.should_cache(request, response):
# We don't need to update the cache, just return
return response
response = self.patch_headers(response)
self.set_cache(request, response)
return response | Sets the cache and deals with caching headers if needed | Below is the the instruction that describes the task:
### Input:
Sets the cache and deals with caching headers if needed
### Response:
def process_response(self, request, response):
""" Sets the cache and deals with caching headers if needed
"""
if not self.should_cache(request, response):
# We don't need to update the cache, just return
return response
response = self.patch_headers(response)
self.set_cache(request, response)
return response |
def update_extension_statistics(self, extension_statistics_update, publisher_name, extension_name):
"""UpdateExtensionStatistics.
[Preview API]
:param :class:`<ExtensionStatisticUpdate> <azure.devops.v5_0.gallery.models.ExtensionStatisticUpdate>` extension_statistics_update:
:param str publisher_name:
:param str extension_name:
"""
route_values = {}
if publisher_name is not None:
route_values['publisherName'] = self._serialize.url('publisher_name', publisher_name, 'str')
if extension_name is not None:
route_values['extensionName'] = self._serialize.url('extension_name', extension_name, 'str')
content = self._serialize.body(extension_statistics_update, 'ExtensionStatisticUpdate')
self._send(http_method='PATCH',
location_id='a0ea3204-11e9-422d-a9ca-45851cc41400',
version='5.0-preview.1',
route_values=route_values,
content=content) | UpdateExtensionStatistics.
[Preview API]
:param :class:`<ExtensionStatisticUpdate> <azure.devops.v5_0.gallery.models.ExtensionStatisticUpdate>` extension_statistics_update:
:param str publisher_name:
:param str extension_name: | Below is the the instruction that describes the task:
### Input:
UpdateExtensionStatistics.
[Preview API]
:param :class:`<ExtensionStatisticUpdate> <azure.devops.v5_0.gallery.models.ExtensionStatisticUpdate>` extension_statistics_update:
:param str publisher_name:
:param str extension_name:
### Response:
def update_extension_statistics(self, extension_statistics_update, publisher_name, extension_name):
"""UpdateExtensionStatistics.
[Preview API]
:param :class:`<ExtensionStatisticUpdate> <azure.devops.v5_0.gallery.models.ExtensionStatisticUpdate>` extension_statistics_update:
:param str publisher_name:
:param str extension_name:
"""
route_values = {}
if publisher_name is not None:
route_values['publisherName'] = self._serialize.url('publisher_name', publisher_name, 'str')
if extension_name is not None:
route_values['extensionName'] = self._serialize.url('extension_name', extension_name, 'str')
content = self._serialize.body(extension_statistics_update, 'ExtensionStatisticUpdate')
self._send(http_method='PATCH',
location_id='a0ea3204-11e9-422d-a9ca-45851cc41400',
version='5.0-preview.1',
route_values=route_values,
content=content) |
def files(self):
""" File uploads parsed into an instance of :class:`MultiDict`.
This property contains file uploads parsed from an
`multipart/form-data` encoded POST request body. The values are
instances of :class:`cgi.FieldStorage`.
"""
files = MultiDict()
for name, item in self.POST.iterallitems():
if hasattr(item, 'filename'):
files[name] = item
return files | File uploads parsed into an instance of :class:`MultiDict`.
This property contains file uploads parsed from an
`multipart/form-data` encoded POST request body. The values are
instances of :class:`cgi.FieldStorage`. | Below is the the instruction that describes the task:
### Input:
File uploads parsed into an instance of :class:`MultiDict`.
This property contains file uploads parsed from an
`multipart/form-data` encoded POST request body. The values are
instances of :class:`cgi.FieldStorage`.
### Response:
def files(self):
""" File uploads parsed into an instance of :class:`MultiDict`.
This property contains file uploads parsed from an
`multipart/form-data` encoded POST request body. The values are
instances of :class:`cgi.FieldStorage`.
"""
files = MultiDict()
for name, item in self.POST.iterallitems():
if hasattr(item, 'filename'):
files[name] = item
return files |
def add_distribution_list_alias(self, distribution_list, alias):
"""
:param distribution_list: a distribution list object to be used as
a selector
:param alias: email alias address
:returns: None (the API itself returns nothing)
"""
self.request('AddDistributionListAlias', {
'id': self._get_or_fetch_id(
distribution_list, self.get_distribution_list
),
'alias': alias,
}) | :param distribution_list: a distribution list object to be used as
a selector
:param alias: email alias address
:returns: None (the API itself returns nothing) | Below is the the instruction that describes the task:
### Input:
:param distribution_list: a distribution list object to be used as
a selector
:param alias: email alias address
:returns: None (the API itself returns nothing)
### Response:
def add_distribution_list_alias(self, distribution_list, alias):
"""
:param distribution_list: a distribution list object to be used as
a selector
:param alias: email alias address
:returns: None (the API itself returns nothing)
"""
self.request('AddDistributionListAlias', {
'id': self._get_or_fetch_id(
distribution_list, self.get_distribution_list
),
'alias': alias,
}) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.