code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def get_meas_los(self, user_lo_config):
"""Embed default meas LO frequencies from backend and format them to list object.
If configured lo frequency is the same as default, this method returns `None`.
Args:
user_lo_config (LoConfig): A dictionary of LOs to format.
Returns:
list: A list of meas LOs.
Raises:
PulseError: when LO frequencies are missing.
"""
try:
_m_los = self.default_meas_los.copy()
except KeyError:
raise PulseError('Default measurement frequencies not exist.')
for channel, lo_freq in user_lo_config.meas_lo_dict().items():
_m_los[channel.index] = lo_freq
if _m_los == self.default_meas_los:
return None
return _m_los | Embed default meas LO frequencies from backend and format them to list object.
If configured lo frequency is the same as default, this method returns `None`.
Args:
user_lo_config (LoConfig): A dictionary of LOs to format.
Returns:
list: A list of meas LOs.
Raises:
PulseError: when LO frequencies are missing. | Below is the the instruction that describes the task:
### Input:
Embed default meas LO frequencies from backend and format them to list object.
If configured lo frequency is the same as default, this method returns `None`.
Args:
user_lo_config (LoConfig): A dictionary of LOs to format.
Returns:
list: A list of meas LOs.
Raises:
PulseError: when LO frequencies are missing.
### Response:
def get_meas_los(self, user_lo_config):
"""Embed default meas LO frequencies from backend and format them to list object.
If configured lo frequency is the same as default, this method returns `None`.
Args:
user_lo_config (LoConfig): A dictionary of LOs to format.
Returns:
list: A list of meas LOs.
Raises:
PulseError: when LO frequencies are missing.
"""
try:
_m_los = self.default_meas_los.copy()
except KeyError:
raise PulseError('Default measurement frequencies not exist.')
for channel, lo_freq in user_lo_config.meas_lo_dict().items():
_m_los[channel.index] = lo_freq
if _m_los == self.default_meas_los:
return None
return _m_los |
def cmd_log(self, reopen=False, rotate=False):
"""Allows managing of uWSGI log related stuff
:param bool reopen: Reopen log file. Could be required after third party rotation.
:param bool rotate: Trigger built-in log rotation.
"""
cmd = b''
if reopen:
cmd += b'l'
if rotate:
cmd += b'L'
return self.send_command(cmd) | Allows managing of uWSGI log related stuff
:param bool reopen: Reopen log file. Could be required after third party rotation.
:param bool rotate: Trigger built-in log rotation. | Below is the the instruction that describes the task:
### Input:
Allows managing of uWSGI log related stuff
:param bool reopen: Reopen log file. Could be required after third party rotation.
:param bool rotate: Trigger built-in log rotation.
### Response:
def cmd_log(self, reopen=False, rotate=False):
"""Allows managing of uWSGI log related stuff
:param bool reopen: Reopen log file. Could be required after third party rotation.
:param bool rotate: Trigger built-in log rotation.
"""
cmd = b''
if reopen:
cmd += b'l'
if rotate:
cmd += b'L'
return self.send_command(cmd) |
def random_pairs_with_replacement(n, shape, random_state=None):
"""make random record pairs"""
if not isinstance(random_state, np.random.RandomState):
random_state = np.random.RandomState(random_state)
n_max = max_pairs(shape)
if n_max <= 0:
raise ValueError('n_max must be larger than 0')
# make random pairs
indices = random_state.randint(0, n_max, n)
if len(shape) == 1:
return _map_tril_1d_on_2d(indices, shape[0])
else:
return np.unravel_index(indices, shape) | make random record pairs | Below is the the instruction that describes the task:
### Input:
make random record pairs
### Response:
def random_pairs_with_replacement(n, shape, random_state=None):
"""make random record pairs"""
if not isinstance(random_state, np.random.RandomState):
random_state = np.random.RandomState(random_state)
n_max = max_pairs(shape)
if n_max <= 0:
raise ValueError('n_max must be larger than 0')
# make random pairs
indices = random_state.randint(0, n_max, n)
if len(shape) == 1:
return _map_tril_1d_on_2d(indices, shape[0])
else:
return np.unravel_index(indices, shape) |
def invert(self, invert=True):
"""Inverts all the channels of a image according to *invert*. If invert is a tuple or a list, elementwise
invertion is performed, otherwise all channels are inverted if *invert* is true (default).
Note: 'Inverting' means that black becomes white, and vice-versa, not that the values are negated !
"""
if(isinstance(invert, (tuple, list)) and
len(self.channels) != len(invert)):
raise ValueError(
"Number of channels and invert components differ.")
logger.debug("Applying invert with parameters %s", str(invert))
if isinstance(invert, (tuple, list)):
for i, chn in enumerate(self.channels):
if invert[i]:
self.channels[i] = 1 - chn
elif invert:
for i, chn in enumerate(self.channels):
self.channels[i] = 1 - chn | Inverts all the channels of a image according to *invert*. If invert is a tuple or a list, elementwise
invertion is performed, otherwise all channels are inverted if *invert* is true (default).
Note: 'Inverting' means that black becomes white, and vice-versa, not that the values are negated ! | Below is the the instruction that describes the task:
### Input:
Inverts all the channels of a image according to *invert*. If invert is a tuple or a list, elementwise
invertion is performed, otherwise all channels are inverted if *invert* is true (default).
Note: 'Inverting' means that black becomes white, and vice-versa, not that the values are negated !
### Response:
def invert(self, invert=True):
"""Inverts all the channels of a image according to *invert*. If invert is a tuple or a list, elementwise
invertion is performed, otherwise all channels are inverted if *invert* is true (default).
Note: 'Inverting' means that black becomes white, and vice-versa, not that the values are negated !
"""
if(isinstance(invert, (tuple, list)) and
len(self.channels) != len(invert)):
raise ValueError(
"Number of channels and invert components differ.")
logger.debug("Applying invert with parameters %s", str(invert))
if isinstance(invert, (tuple, list)):
for i, chn in enumerate(self.channels):
if invert[i]:
self.channels[i] = 1 - chn
elif invert:
for i, chn in enumerate(self.channels):
self.channels[i] = 1 - chn |
def execute(self, timeout=None):
"""Execute all currently queued batch commands"""
logger.debug(' > Batch API request (length %s)' % len(self._commands))
auth = self._build_http_auth()
headers = self._build_request_headers()
logger.debug('\tbatch headers: %s' % headers)
logger.debug('\tbatch command length: %s' % len(self._commands))
path = self._build_request_path(self.BATCH_ENDPOINT)
data = json.dumps(self._commands, cls=self._json_encoder)
r = requests.post(
path,
auth=auth,
headers=headers,
data=data,
timeout=(self.DEFAULT_TIMEOUT if timeout is None else timeout)
)
self._commands = []
logger.debug('\tresponse code:%s' % r.status_code)
try:
logger.debug('\tresponse: %s' % r.json())
except:
logger.debug('\tresponse: %s' % r.content)
return r | Execute all currently queued batch commands | Below is the the instruction that describes the task:
### Input:
Execute all currently queued batch commands
### Response:
def execute(self, timeout=None):
"""Execute all currently queued batch commands"""
logger.debug(' > Batch API request (length %s)' % len(self._commands))
auth = self._build_http_auth()
headers = self._build_request_headers()
logger.debug('\tbatch headers: %s' % headers)
logger.debug('\tbatch command length: %s' % len(self._commands))
path = self._build_request_path(self.BATCH_ENDPOINT)
data = json.dumps(self._commands, cls=self._json_encoder)
r = requests.post(
path,
auth=auth,
headers=headers,
data=data,
timeout=(self.DEFAULT_TIMEOUT if timeout is None else timeout)
)
self._commands = []
logger.debug('\tresponse code:%s' % r.status_code)
try:
logger.debug('\tresponse: %s' % r.json())
except:
logger.debug('\tresponse: %s' % r.content)
return r |
def get(self):
"""
*get the ebook object*
**Return:**
- ``ebook``
**Usage:**
See class docstring for usage
"""
self.log.debug('starting the ``get`` method')
if self.format == "epub":
if self.urlOrPath[:4] == "http" or self.urlOrPath[:4] == "www.":
ebook = self._url_to_epub()
elif ".docx" in self.urlOrPath:
ebook = self._docx_to_epub()
if self.format == "mobi":
if self.urlOrPath[:4] == "http" or self.urlOrPath[:4] == "www.":
epub = self._url_to_epub()
elif ".docx" in self.urlOrPath:
epub = self._docx_to_epub()
if not epub:
return None
ebook = self._epub_to_mobi(
epubPath=epub,
deleteEpub=False
)
tag(
log=self.log,
filepath=ebook,
tags=False,
rating=False,
wherefrom=self.url
)
self.log.debug('completed the ``get`` method')
return ebook | *get the ebook object*
**Return:**
- ``ebook``
**Usage:**
See class docstring for usage | Below is the the instruction that describes the task:
### Input:
*get the ebook object*
**Return:**
- ``ebook``
**Usage:**
See class docstring for usage
### Response:
def get(self):
"""
*get the ebook object*
**Return:**
- ``ebook``
**Usage:**
See class docstring for usage
"""
self.log.debug('starting the ``get`` method')
if self.format == "epub":
if self.urlOrPath[:4] == "http" or self.urlOrPath[:4] == "www.":
ebook = self._url_to_epub()
elif ".docx" in self.urlOrPath:
ebook = self._docx_to_epub()
if self.format == "mobi":
if self.urlOrPath[:4] == "http" or self.urlOrPath[:4] == "www.":
epub = self._url_to_epub()
elif ".docx" in self.urlOrPath:
epub = self._docx_to_epub()
if not epub:
return None
ebook = self._epub_to_mobi(
epubPath=epub,
deleteEpub=False
)
tag(
log=self.log,
filepath=ebook,
tags=False,
rating=False,
wherefrom=self.url
)
self.log.debug('completed the ``get`` method')
return ebook |
def handle_class(signature_node, module, object_name, cache):
"""
Styles ``autoclass`` entries.
Adds ``abstract`` prefix to abstract classes.
"""
class_ = getattr(module, object_name, None)
if class_ is None:
return
if class_ not in cache:
cache[class_] = {}
attributes = inspect.classify_class_attrs(class_)
for attribute in attributes:
cache[class_][attribute.name] = attribute
if inspect.isabstract(class_):
emphasis = nodes.emphasis("abstract ", "abstract ", classes=["property"])
signature_node.insert(0, emphasis) | Styles ``autoclass`` entries.
Adds ``abstract`` prefix to abstract classes. | Below is the the instruction that describes the task:
### Input:
Styles ``autoclass`` entries.
Adds ``abstract`` prefix to abstract classes.
### Response:
def handle_class(signature_node, module, object_name, cache):
"""
Styles ``autoclass`` entries.
Adds ``abstract`` prefix to abstract classes.
"""
class_ = getattr(module, object_name, None)
if class_ is None:
return
if class_ not in cache:
cache[class_] = {}
attributes = inspect.classify_class_attrs(class_)
for attribute in attributes:
cache[class_][attribute.name] = attribute
if inspect.isabstract(class_):
emphasis = nodes.emphasis("abstract ", "abstract ", classes=["property"])
signature_node.insert(0, emphasis) |
def mulmod(computation: BaseComputation) -> None:
"""
Modulo Multiplication
"""
left, right, mod = computation.stack_pop(num_items=3, type_hint=constants.UINT256)
if mod == 0:
result = 0
else:
result = (left * right) % mod
computation.stack_push(result) | Modulo Multiplication | Below is the the instruction that describes the task:
### Input:
Modulo Multiplication
### Response:
def mulmod(computation: BaseComputation) -> None:
"""
Modulo Multiplication
"""
left, right, mod = computation.stack_pop(num_items=3, type_hint=constants.UINT256)
if mod == 0:
result = 0
else:
result = (left * right) % mod
computation.stack_push(result) |
def init_volumes(self, single=None, only_mount=None, skip_mount=None, swallow_exceptions=True):
"""Detects volumes (as volume system or as single volume) in all disks and yields the volumes. This calls
:func:`Disk.init_volumes` on all disks and should be called after :func:`mount_disks`.
:rtype: generator"""
for disk in self.disks:
logger.info("Mounting volumes in {0}".format(disk))
for volume in disk.init_volumes(single, only_mount, skip_mount, swallow_exceptions=swallow_exceptions):
yield volume | Detects volumes (as volume system or as single volume) in all disks and yields the volumes. This calls
:func:`Disk.init_volumes` on all disks and should be called after :func:`mount_disks`.
:rtype: generator | Below is the the instruction that describes the task:
### Input:
Detects volumes (as volume system or as single volume) in all disks and yields the volumes. This calls
:func:`Disk.init_volumes` on all disks and should be called after :func:`mount_disks`.
:rtype: generator
### Response:
def init_volumes(self, single=None, only_mount=None, skip_mount=None, swallow_exceptions=True):
"""Detects volumes (as volume system or as single volume) in all disks and yields the volumes. This calls
:func:`Disk.init_volumes` on all disks and should be called after :func:`mount_disks`.
:rtype: generator"""
for disk in self.disks:
logger.info("Mounting volumes in {0}".format(disk))
for volume in disk.init_volumes(single, only_mount, skip_mount, swallow_exceptions=swallow_exceptions):
yield volume |
def num_or_str(x):
"""The argument is a string; convert to a number if possible, or strip it.
>>> num_or_str('42')
42
>>> num_or_str(' 42x ')
'42x'
"""
if isnumber(x): return x
try:
return int(x)
except ValueError:
try:
return float(x)
except ValueError:
return str(x).strip() | The argument is a string; convert to a number if possible, or strip it.
>>> num_or_str('42')
42
>>> num_or_str(' 42x ')
'42x' | Below is the the instruction that describes the task:
### Input:
The argument is a string; convert to a number if possible, or strip it.
>>> num_or_str('42')
42
>>> num_or_str(' 42x ')
'42x'
### Response:
def num_or_str(x):
"""The argument is a string; convert to a number if possible, or strip it.
>>> num_or_str('42')
42
>>> num_or_str(' 42x ')
'42x'
"""
if isnumber(x): return x
try:
return int(x)
except ValueError:
try:
return float(x)
except ValueError:
return str(x).strip() |
def run(self):
"""Redirects messages until a shutdown message is received."""
while True:
if not self.task_socket.poll(-1):
continue
msg = self.task_socket.recv_multipart()
msg_type = msg[1]
if self.debug:
self.stats.append((time.time(),
msg_type,
len(self.unassigned_tasks),
len(self.available_workers)))
if time.time() - self.lastDebugTs > TIME_BETWEEN_PARTIALDEBUG:
self.writeDebug("debug/partial-{0}".format(
round(time.time(), -1)
))
self.lastDebugTs = time.time()
# New task inbound
if msg_type == TASK:
task_id = msg[2]
task = msg[3]
self.logger.debug("Received task {0}".format(task_id))
try:
address = self.available_workers.popleft()
except IndexError:
self.unassigned_tasks.append((task_id, task))
else:
self.logger.debug("Sent {0}".format(task_id))
self.task_socket.send_multipart([address, TASK, task])
self.assigned_tasks[address].add(task_id)
# Request for task
elif msg_type == REQUEST:
address = msg[0]
try:
task_id, task = self.unassigned_tasks.popleft()
except IndexError:
self.available_workers.append(address)
else:
self.logger.debug("Sent {0}".format(task_id))
self.task_socket.send_multipart([address, TASK, task])
self.assigned_tasks[address].add(task_id)
# A task status request is requested
elif msg_type == STATUS_REQ:
self.pruneAssignedTasks()
address = msg[0]
task_id = msg[2]
if any(task_id in x for x in self.assigned_tasks.values()):
status = STATUS_GIVEN
elif task_id in (x[0] for x in self.unassigned_tasks):
status = STATUS_HERE
else:
status = STATUS_NONE
self.task_socket.send_multipart([
address, STATUS_ANS, task_id, status
])
# A task status set (task done) is received
elif msg_type == STATUS_DONE:
address = msg[0]
task_id = msg[2]
try:
self.assigned_tasks[address].discard(task_id)
except KeyError:
pass
elif msg_type == STATUS_UPDATE:
address = msg[0]
try:
tasks_ids = pickle.loads(msg[2])
except:
self.logger.error("Could not unpickle status update message.")
else:
self.assigned_tasks[address] = tasks_ids
self.status_times[address] = time.time()
# Answer needing delivery
elif msg_type == REPLY:
self.logger.debug("Relaying")
destination = msg[-1]
origin = msg[0]
self.task_socket.send_multipart([destination] + msg[1:] + [origin])
# Shared variable to distribute
elif msg_type == VARIABLE:
address = msg[4]
value = msg[3]
key = msg[2]
self.shared_variables[address].update(
{key: value},
)
self.info_socket.send_multipart([VARIABLE,
key,
value,
address])
# Initialize the variables of a new worker
elif msg_type == INIT:
address = msg[0]
try:
self.processConfig(pickle.loads(msg[2]))
except pickle.PickleError:
continue
self.task_socket.send_multipart([
address,
pickle.dumps(self.config,
pickle.HIGHEST_PROTOCOL),
pickle.dumps(self.shared_variables,
pickle.HIGHEST_PROTOCOL),
])
self.task_socket.send_multipart([
address,
pickle.dumps(self.cluster_available,
pickle.HIGHEST_PROTOCOL),
])
# Add a given broker to its fellow list
elif msg_type == CONNECT:
try:
connect_brokers = pickle.loads(msg[2])
except pickle.PickleError:
self.logger.error("Could not understand CONNECT message.")
continue
self.logger.info("Connecting to other brokers...")
self.addBrokerList(connect_brokers)
# Shutdown of this broker was requested
elif msg_type == SHUTDOWN:
self.logger.debug("SHUTDOWN command received.")
self.shutdown()
break | Redirects messages until a shutdown message is received. | Below is the the instruction that describes the task:
### Input:
Redirects messages until a shutdown message is received.
### Response:
def run(self):
"""Redirects messages until a shutdown message is received."""
while True:
if not self.task_socket.poll(-1):
continue
msg = self.task_socket.recv_multipart()
msg_type = msg[1]
if self.debug:
self.stats.append((time.time(),
msg_type,
len(self.unassigned_tasks),
len(self.available_workers)))
if time.time() - self.lastDebugTs > TIME_BETWEEN_PARTIALDEBUG:
self.writeDebug("debug/partial-{0}".format(
round(time.time(), -1)
))
self.lastDebugTs = time.time()
# New task inbound
if msg_type == TASK:
task_id = msg[2]
task = msg[3]
self.logger.debug("Received task {0}".format(task_id))
try:
address = self.available_workers.popleft()
except IndexError:
self.unassigned_tasks.append((task_id, task))
else:
self.logger.debug("Sent {0}".format(task_id))
self.task_socket.send_multipart([address, TASK, task])
self.assigned_tasks[address].add(task_id)
# Request for task
elif msg_type == REQUEST:
address = msg[0]
try:
task_id, task = self.unassigned_tasks.popleft()
except IndexError:
self.available_workers.append(address)
else:
self.logger.debug("Sent {0}".format(task_id))
self.task_socket.send_multipart([address, TASK, task])
self.assigned_tasks[address].add(task_id)
# A task status request is requested
elif msg_type == STATUS_REQ:
self.pruneAssignedTasks()
address = msg[0]
task_id = msg[2]
if any(task_id in x for x in self.assigned_tasks.values()):
status = STATUS_GIVEN
elif task_id in (x[0] for x in self.unassigned_tasks):
status = STATUS_HERE
else:
status = STATUS_NONE
self.task_socket.send_multipart([
address, STATUS_ANS, task_id, status
])
# A task status set (task done) is received
elif msg_type == STATUS_DONE:
address = msg[0]
task_id = msg[2]
try:
self.assigned_tasks[address].discard(task_id)
except KeyError:
pass
elif msg_type == STATUS_UPDATE:
address = msg[0]
try:
tasks_ids = pickle.loads(msg[2])
except:
self.logger.error("Could not unpickle status update message.")
else:
self.assigned_tasks[address] = tasks_ids
self.status_times[address] = time.time()
# Answer needing delivery
elif msg_type == REPLY:
self.logger.debug("Relaying")
destination = msg[-1]
origin = msg[0]
self.task_socket.send_multipart([destination] + msg[1:] + [origin])
# Shared variable to distribute
elif msg_type == VARIABLE:
address = msg[4]
value = msg[3]
key = msg[2]
self.shared_variables[address].update(
{key: value},
)
self.info_socket.send_multipart([VARIABLE,
key,
value,
address])
# Initialize the variables of a new worker
elif msg_type == INIT:
address = msg[0]
try:
self.processConfig(pickle.loads(msg[2]))
except pickle.PickleError:
continue
self.task_socket.send_multipart([
address,
pickle.dumps(self.config,
pickle.HIGHEST_PROTOCOL),
pickle.dumps(self.shared_variables,
pickle.HIGHEST_PROTOCOL),
])
self.task_socket.send_multipart([
address,
pickle.dumps(self.cluster_available,
pickle.HIGHEST_PROTOCOL),
])
# Add a given broker to its fellow list
elif msg_type == CONNECT:
try:
connect_brokers = pickle.loads(msg[2])
except pickle.PickleError:
self.logger.error("Could not understand CONNECT message.")
continue
self.logger.info("Connecting to other brokers...")
self.addBrokerList(connect_brokers)
# Shutdown of this broker was requested
elif msg_type == SHUTDOWN:
self.logger.debug("SHUTDOWN command received.")
self.shutdown()
break |
async def set_config(cls, name: str, value):
"""Set a configuration value in MAAS.
Consult your MAAS server for recognised settings. Alternatively, use
the pre-canned functions also defined on this object.
"""
return await cls._handler.set_config(name=[name], value=[value]) | Set a configuration value in MAAS.
Consult your MAAS server for recognised settings. Alternatively, use
the pre-canned functions also defined on this object. | Below is the the instruction that describes the task:
### Input:
Set a configuration value in MAAS.
Consult your MAAS server for recognised settings. Alternatively, use
the pre-canned functions also defined on this object.
### Response:
async def set_config(cls, name: str, value):
"""Set a configuration value in MAAS.
Consult your MAAS server for recognised settings. Alternatively, use
the pre-canned functions also defined on this object.
"""
return await cls._handler.set_config(name=[name], value=[value]) |
def is_java_project(self):
"""
Indicates if the project's main binary is a Java Archive.
"""
if self._is_java_project is None:
self._is_java_project = isinstance(self.arch, ArchSoot)
return self._is_java_project | Indicates if the project's main binary is a Java Archive. | Below is the the instruction that describes the task:
### Input:
Indicates if the project's main binary is a Java Archive.
### Response:
def is_java_project(self):
"""
Indicates if the project's main binary is a Java Archive.
"""
if self._is_java_project is None:
self._is_java_project = isinstance(self.arch, ArchSoot)
return self._is_java_project |
def to_instants_dataframe(self, sql_ctx):
"""
Returns a DataFrame of instants, each a horizontal slice of this TimeSeriesRDD at a time.
This essentially transposes the TimeSeriesRDD, producing a DataFrame where each column
is a key form one of the rows in the TimeSeriesRDD.
"""
ssql_ctx = sql_ctx._ssql_ctx
jdf = self._jtsrdd.toInstantsDataFrame(ssql_ctx, -1)
return DataFrame(jdf, sql_ctx) | Returns a DataFrame of instants, each a horizontal slice of this TimeSeriesRDD at a time.
This essentially transposes the TimeSeriesRDD, producing a DataFrame where each column
is a key form one of the rows in the TimeSeriesRDD. | Below is the the instruction that describes the task:
### Input:
Returns a DataFrame of instants, each a horizontal slice of this TimeSeriesRDD at a time.
This essentially transposes the TimeSeriesRDD, producing a DataFrame where each column
is a key form one of the rows in the TimeSeriesRDD.
### Response:
def to_instants_dataframe(self, sql_ctx):
"""
Returns a DataFrame of instants, each a horizontal slice of this TimeSeriesRDD at a time.
This essentially transposes the TimeSeriesRDD, producing a DataFrame where each column
is a key form one of the rows in the TimeSeriesRDD.
"""
ssql_ctx = sql_ctx._ssql_ctx
jdf = self._jtsrdd.toInstantsDataFrame(ssql_ctx, -1)
return DataFrame(jdf, sql_ctx) |
def summary(dataset_uri, format):
"""Report summary information about a dataset."""
dataset = dtoolcore.DataSet.from_uri(dataset_uri)
creator_username = dataset._admin_metadata["creator_username"]
frozen_at = dataset._admin_metadata["frozen_at"]
num_items = len(dataset.identifiers)
tot_size = sum([dataset.item_properties(i)["size_in_bytes"]
for i in dataset.identifiers])
if format == "json":
json_lines = [
'{',
' "name": "{}",'.format(dataset.name),
' "uuid": "{}",'.format(dataset.uuid),
' "creator_username": "{}",'.format(creator_username),
' "number_of_items": {},'.format(num_items),
' "size_in_bytes": {},'.format(tot_size),
' "frozen_at": {}'.format(frozen_at),
'}',
]
formatted_json = "\n".join(json_lines)
colorful_json = pygments.highlight(
formatted_json,
pygments.lexers.JsonLexer(),
pygments.formatters.TerminalFormatter())
click.secho(colorful_json, nl=False)
else:
info = [
("name", dataset.name),
("uuid", dataset.uuid),
("creator_username", creator_username),
("number_of_items", str(num_items)),
("size", sizeof_fmt(tot_size).strip()),
("frozen_at", date_fmt(frozen_at)),
]
for key, value in info:
click.secho("{}: ".format(key), nl=False)
click.secho(value, fg="green") | Report summary information about a dataset. | Below is the the instruction that describes the task:
### Input:
Report summary information about a dataset.
### Response:
def summary(dataset_uri, format):
"""Report summary information about a dataset."""
dataset = dtoolcore.DataSet.from_uri(dataset_uri)
creator_username = dataset._admin_metadata["creator_username"]
frozen_at = dataset._admin_metadata["frozen_at"]
num_items = len(dataset.identifiers)
tot_size = sum([dataset.item_properties(i)["size_in_bytes"]
for i in dataset.identifiers])
if format == "json":
json_lines = [
'{',
' "name": "{}",'.format(dataset.name),
' "uuid": "{}",'.format(dataset.uuid),
' "creator_username": "{}",'.format(creator_username),
' "number_of_items": {},'.format(num_items),
' "size_in_bytes": {},'.format(tot_size),
' "frozen_at": {}'.format(frozen_at),
'}',
]
formatted_json = "\n".join(json_lines)
colorful_json = pygments.highlight(
formatted_json,
pygments.lexers.JsonLexer(),
pygments.formatters.TerminalFormatter())
click.secho(colorful_json, nl=False)
else:
info = [
("name", dataset.name),
("uuid", dataset.uuid),
("creator_username", creator_username),
("number_of_items", str(num_items)),
("size", sizeof_fmt(tot_size).strip()),
("frozen_at", date_fmt(frozen_at)),
]
for key, value in info:
click.secho("{}: ".format(key), nl=False)
click.secho(value, fg="green") |
def validate_file(file_type, file_path):
"""
Validates a file against a schema
Parameters
----------
file_type : str
Type of file to read. May be 'component', 'element', 'table', or 'references'
file_path:
Full path to the file to be validated
Raises
------
RuntimeError
If the file_type is not valid (and/or a schema doesn't exist)
ValidationError
If the given file does not pass validation
FileNotFoundError
If the file given by file_path doesn't exist
"""
file_data = fileio._read_plain_json(file_path, False)
validate_data(file_type, file_data) | Validates a file against a schema
Parameters
----------
file_type : str
Type of file to read. May be 'component', 'element', 'table', or 'references'
file_path:
Full path to the file to be validated
Raises
------
RuntimeError
If the file_type is not valid (and/or a schema doesn't exist)
ValidationError
If the given file does not pass validation
FileNotFoundError
If the file given by file_path doesn't exist | Below is the the instruction that describes the task:
### Input:
Validates a file against a schema
Parameters
----------
file_type : str
Type of file to read. May be 'component', 'element', 'table', or 'references'
file_path:
Full path to the file to be validated
Raises
------
RuntimeError
If the file_type is not valid (and/or a schema doesn't exist)
ValidationError
If the given file does not pass validation
FileNotFoundError
If the file given by file_path doesn't exist
### Response:
def validate_file(file_type, file_path):
"""
Validates a file against a schema
Parameters
----------
file_type : str
Type of file to read. May be 'component', 'element', 'table', or 'references'
file_path:
Full path to the file to be validated
Raises
------
RuntimeError
If the file_type is not valid (and/or a schema doesn't exist)
ValidationError
If the given file does not pass validation
FileNotFoundError
If the file given by file_path doesn't exist
"""
file_data = fileio._read_plain_json(file_path, False)
validate_data(file_type, file_data) |
def kld(d1, d2):
"""Return the Kullback-Leibler Divergence (KLD) between two distributions.
Args:
d1 (np.ndarray): The first distribution.
d2 (np.ndarray): The second distribution.
Returns:
float: The KLD of ``d1`` from ``d2``.
"""
d1, d2 = flatten(d1), flatten(d2)
return entropy(d1, d2, 2.0) | Return the Kullback-Leibler Divergence (KLD) between two distributions.
Args:
d1 (np.ndarray): The first distribution.
d2 (np.ndarray): The second distribution.
Returns:
float: The KLD of ``d1`` from ``d2``. | Below is the the instruction that describes the task:
### Input:
Return the Kullback-Leibler Divergence (KLD) between two distributions.
Args:
d1 (np.ndarray): The first distribution.
d2 (np.ndarray): The second distribution.
Returns:
float: The KLD of ``d1`` from ``d2``.
### Response:
def kld(d1, d2):
"""Return the Kullback-Leibler Divergence (KLD) between two distributions.
Args:
d1 (np.ndarray): The first distribution.
d2 (np.ndarray): The second distribution.
Returns:
float: The KLD of ``d1`` from ``d2``.
"""
d1, d2 = flatten(d1), flatten(d2)
return entropy(d1, d2, 2.0) |
def cmd_tool(args=None):
""" Command line tool for plotting and viewing info on guppi raw files """
from argparse import ArgumentParser
parser = ArgumentParser(description="Command line utility for creating spectra from GuppiRaw files.")
parser.add_argument('filename', type=str, help='Name of file to read')
parser.add_argument('-o', dest='outdir', type=str, default='./', help='output directory for PNG files')
args = parser.parse_args()
r = GuppiRaw(args.filename)
r.print_stats()
bname = os.path.splitext(os.path.basename(args.filename))[0]
bname = os.path.join(args.outdir, bname)
r.plot_histogram(filename="%s_hist.png" % bname)
r.plot_spectrum(filename="%s_spec.png" % bname) | Command line tool for plotting and viewing info on guppi raw files | Below is the the instruction that describes the task:
### Input:
Command line tool for plotting and viewing info on guppi raw files
### Response:
def cmd_tool(args=None):
""" Command line tool for plotting and viewing info on guppi raw files """
from argparse import ArgumentParser
parser = ArgumentParser(description="Command line utility for creating spectra from GuppiRaw files.")
parser.add_argument('filename', type=str, help='Name of file to read')
parser.add_argument('-o', dest='outdir', type=str, default='./', help='output directory for PNG files')
args = parser.parse_args()
r = GuppiRaw(args.filename)
r.print_stats()
bname = os.path.splitext(os.path.basename(args.filename))[0]
bname = os.path.join(args.outdir, bname)
r.plot_histogram(filename="%s_hist.png" % bname)
r.plot_spectrum(filename="%s_spec.png" % bname) |
def run(self):
# type: () -> bool
""" Run all linters and report results.
Returns:
bool: **True** if all checks were successful, **False** otherwise.
"""
with util.timed_block() as t:
files = self._collect_files()
log.info("Collected <33>{} <32>files in <33>{}s".format(
len(files), t.elapsed_s
))
if self.verbose:
for p in files:
log.info(" <0>{}", p)
# No files to lint - return success if empty runs are allowed.
if not files:
return self.allow_empty
with util.timed_block() as t:
results = self._run_checks(files)
log.info("Code checked in <33>{}s", t.elapsed_s)
success = True
for name, retcodes in results.items():
if any(x != 0 for x in retcodes):
success = False
log.err("<35>{} <31>failed with: <33>{}".format(
name, retcodes
))
return success | Run all linters and report results.
Returns:
bool: **True** if all checks were successful, **False** otherwise. | Below is the the instruction that describes the task:
### Input:
Run all linters and report results.
Returns:
bool: **True** if all checks were successful, **False** otherwise.
### Response:
def run(self):
# type: () -> bool
""" Run all linters and report results.
Returns:
bool: **True** if all checks were successful, **False** otherwise.
"""
with util.timed_block() as t:
files = self._collect_files()
log.info("Collected <33>{} <32>files in <33>{}s".format(
len(files), t.elapsed_s
))
if self.verbose:
for p in files:
log.info(" <0>{}", p)
# No files to lint - return success if empty runs are allowed.
if not files:
return self.allow_empty
with util.timed_block() as t:
results = self._run_checks(files)
log.info("Code checked in <33>{}s", t.elapsed_s)
success = True
for name, retcodes in results.items():
if any(x != 0 for x in retcodes):
success = False
log.err("<35>{} <31>failed with: <33>{}".format(
name, retcodes
))
return success |
def get_os_version_package(pkg, fatal=True):
'''Derive OpenStack version number from an installed package.'''
codename = get_os_codename_package(pkg, fatal=fatal)
if not codename:
return None
if 'swift' in pkg:
vers_map = SWIFT_CODENAMES
for cname, version in six.iteritems(vers_map):
if cname == codename:
return version[-1]
else:
vers_map = OPENSTACK_CODENAMES
for version, cname in six.iteritems(vers_map):
if cname == codename:
return version | Derive OpenStack version number from an installed package. | Below is the the instruction that describes the task:
### Input:
Derive OpenStack version number from an installed package.
### Response:
def get_os_version_package(pkg, fatal=True):
'''Derive OpenStack version number from an installed package.'''
codename = get_os_codename_package(pkg, fatal=fatal)
if not codename:
return None
if 'swift' in pkg:
vers_map = SWIFT_CODENAMES
for cname, version in six.iteritems(vers_map):
if cname == codename:
return version[-1]
else:
vers_map = OPENSTACK_CODENAMES
for version, cname in six.iteritems(vers_map):
if cname == codename:
return version |
def _release_info(jsn,VERSION):
"""Gives information about a particular package version."""
try:
release_point = jsn['releases'][VERSION][0]
except KeyError:
print "\033[91m\033[1mError: Release not found."
exit(1)
python_version = release_point['python_version']
filename = release_point['filename']
md5 = release_point['md5_digest']
download_url_for_release = release_point['url']
download_num_for_release = release_point['downloads']
download_size_for_release = _sizeof_fmt(int(release_point['size']))
print """
\033[1m\033[1m \033[4mPACKAGE VERSION INFO\033[0m
\033[1m md5 :\033[0m \033[93m%s \033[0m
\033[1m python version :\033[0m \033[93m%s \033[0m
\033[1m download url :\033[0m \033[93m%s \033[0m
\033[1m download number :\033[0m \033[93m%s \033[0m
\033[1m size :\033[0m \033[93m%s \033[0m
\033[1m filename :\033[0m \033[93m%s \033[0m
"""%(md5,python_version,download_url_for_release,\
download_num_for_release,download_size_for_release,filename) | Gives information about a particular package version. | Below is the the instruction that describes the task:
### Input:
Gives information about a particular package version.
### Response:
def _release_info(jsn,VERSION):
"""Gives information about a particular package version."""
try:
release_point = jsn['releases'][VERSION][0]
except KeyError:
print "\033[91m\033[1mError: Release not found."
exit(1)
python_version = release_point['python_version']
filename = release_point['filename']
md5 = release_point['md5_digest']
download_url_for_release = release_point['url']
download_num_for_release = release_point['downloads']
download_size_for_release = _sizeof_fmt(int(release_point['size']))
print """
\033[1m\033[1m \033[4mPACKAGE VERSION INFO\033[0m
\033[1m md5 :\033[0m \033[93m%s \033[0m
\033[1m python version :\033[0m \033[93m%s \033[0m
\033[1m download url :\033[0m \033[93m%s \033[0m
\033[1m download number :\033[0m \033[93m%s \033[0m
\033[1m size :\033[0m \033[93m%s \033[0m
\033[1m filename :\033[0m \033[93m%s \033[0m
"""%(md5,python_version,download_url_for_release,\
download_num_for_release,download_size_for_release,filename) |
def _parse_qualimap_globals_inregion(table):
"""Retrieve metrics from the global targeted region table.
"""
out = {}
for row in table.find_all("tr"):
col, val = [x.text for x in row.find_all("td")]
if col == "Mapped reads":
out.update(_parse_num_pct("%s (in regions)" % col, val))
return out | Retrieve metrics from the global targeted region table. | Below is the the instruction that describes the task:
### Input:
Retrieve metrics from the global targeted region table.
### Response:
def _parse_qualimap_globals_inregion(table):
"""Retrieve metrics from the global targeted region table.
"""
out = {}
for row in table.find_all("tr"):
col, val = [x.text for x in row.find_all("td")]
if col == "Mapped reads":
out.update(_parse_num_pct("%s (in regions)" % col, val))
return out |
def modularity_louvain_und(W, gamma=1, hierarchy=False, seed=None):
'''
The optimal community structure is a subdivision of the network into
nonoverlapping groups of nodes in a way that maximizes the number of
within-group edges, and minimizes the number of between-group edges.
The modularity is a statistic that quantifies the degree to which the
network may be subdivided into such clearly delineated groups.
The Louvain algorithm is a fast and accurate community detection
algorithm (as of writing). The algorithm may also be used to detect
hierarchical community structure.
Parameters
----------
W : NxN np.ndarray
undirected weighted/binary connection matrix
gamma : float
resolution parameter. default value=1. Values 0 <= gamma < 1 detect
larger modules while gamma > 1 detects smaller modules.
hierarchy : bool
Enables hierarchical output. Defalut value=False
seed : hashable, optional
If None (default), use the np.random's global random state to generate random numbers.
Otherwise, use a new np.random.RandomState instance seeded with the given value.
Returns
-------
ci : Nx1 np.ndarray
refined community affiliation vector. If hierarchical output enabled,
it is an NxH np.ndarray instead with multiple iterations
Q : float
optimized modularity metric. If hierarchical output enabled, becomes
an Hx1 array of floats instead.
Notes
-----
Ci and Q may vary from run to run, due to heuristics in the
algorithm. Consequently, it may be worth to compare multiple runs.
'''
rng = get_rng(seed)
n = len(W) # number of nodes
s = np.sum(W) # weight of edges
h = 0 # hierarchy index
ci = []
ci.append(np.arange(n) + 1) # hierarchical module assignments
q = []
q.append(-1) # hierarchical modularity values
n0 = n
#knm = np.zeros((n,n))
# for j in np.xrange(n0+1):
# knm[:,j] = np.sum(w[;,
while True:
if h > 300:
raise BCTParamError('Modularity Infinite Loop Style B. Please '
'contact the developer with this error.')
k = np.sum(W, axis=0) # node degree
Km = k.copy() # module degree
Knm = W.copy() # node-to-module degree
m = np.arange(n) + 1 # initial module assignments
flag = True # flag for within-hierarchy search
it = 0
while flag:
it += 1
if it > 1000:
raise BCTParamError('Modularity Infinite Loop Style C. Please '
'contact the developer with this error.')
flag = False
# loop over nodes in random order
for i in rng.permutation(n):
ma = m[i] - 1
# algorithm condition
dQ = ((Knm[i, :] - Knm[i, ma] + W[i, i]) -
gamma * k[i] * (Km - Km[ma] + k[i]) / s)
dQ[ma] = 0
max_dq = np.max(dQ) # find maximal modularity increase
if max_dq > 1e-10: # if maximal increase positive
j = np.argmax(dQ) # take only one value
# print max_dq,j,dQ[j]
Knm[:, j] += W[:, i] # change node-to-module degrees
Knm[:, ma] -= W[:, i]
Km[j] += k[i] # change module degrees
Km[ma] -= k[i]
m[i] = j + 1 # reassign module
flag = True
_, m = np.unique(m, return_inverse=True) # new module assignments
# print m,h
m += 1
h += 1
ci.append(np.zeros((n0,)))
# for i,mi in enumerate(m): #loop through initial module assignments
for i in range(n):
# print i, m[i], n0, h, len(m), n
# ci[h][np.where(ci[h-1]==i+1)]=mi #assign new modules
ci[h][np.where(ci[h - 1] == i + 1)] = m[i]
n = np.max(m) # new number of modules
W1 = np.zeros((n, n)) # new weighted matrix
for i in range(n):
for j in range(i, n):
# pool weights of nodes in same module
wp = np.sum(W[np.ix_(m == i + 1, m == j + 1)])
W1[i, j] = wp
W1[j, i] = wp
W = W1
q.append(0)
# compute modularity
q[h] = np.trace(W) / s - gamma * np.sum(np.dot(W / s, W / s))
if q[h] - q[h - 1] < 1e-10: # if modularity does not increase
break
ci = np.array(ci, dtype=int)
if hierarchy:
ci = ci[1:-1]
q = q[1:-1]
return ci, q
else:
return ci[h - 1], q[h - 1] | The optimal community structure is a subdivision of the network into
nonoverlapping groups of nodes in a way that maximizes the number of
within-group edges, and minimizes the number of between-group edges.
The modularity is a statistic that quantifies the degree to which the
network may be subdivided into such clearly delineated groups.
The Louvain algorithm is a fast and accurate community detection
algorithm (as of writing). The algorithm may also be used to detect
hierarchical community structure.
Parameters
----------
W : NxN np.ndarray
undirected weighted/binary connection matrix
gamma : float
resolution parameter. default value=1. Values 0 <= gamma < 1 detect
larger modules while gamma > 1 detects smaller modules.
hierarchy : bool
Enables hierarchical output. Defalut value=False
seed : hashable, optional
If None (default), use the np.random's global random state to generate random numbers.
Otherwise, use a new np.random.RandomState instance seeded with the given value.
Returns
-------
ci : Nx1 np.ndarray
refined community affiliation vector. If hierarchical output enabled,
it is an NxH np.ndarray instead with multiple iterations
Q : float
optimized modularity metric. If hierarchical output enabled, becomes
an Hx1 array of floats instead.
Notes
-----
Ci and Q may vary from run to run, due to heuristics in the
algorithm. Consequently, it may be worth to compare multiple runs. | Below is the the instruction that describes the task:
### Input:
The optimal community structure is a subdivision of the network into
nonoverlapping groups of nodes in a way that maximizes the number of
within-group edges, and minimizes the number of between-group edges.
The modularity is a statistic that quantifies the degree to which the
network may be subdivided into such clearly delineated groups.
The Louvain algorithm is a fast and accurate community detection
algorithm (as of writing). The algorithm may also be used to detect
hierarchical community structure.
Parameters
----------
W : NxN np.ndarray
undirected weighted/binary connection matrix
gamma : float
resolution parameter. default value=1. Values 0 <= gamma < 1 detect
larger modules while gamma > 1 detects smaller modules.
hierarchy : bool
Enables hierarchical output. Defalut value=False
seed : hashable, optional
If None (default), use the np.random's global random state to generate random numbers.
Otherwise, use a new np.random.RandomState instance seeded with the given value.
Returns
-------
ci : Nx1 np.ndarray
refined community affiliation vector. If hierarchical output enabled,
it is an NxH np.ndarray instead with multiple iterations
Q : float
optimized modularity metric. If hierarchical output enabled, becomes
an Hx1 array of floats instead.
Notes
-----
Ci and Q may vary from run to run, due to heuristics in the
algorithm. Consequently, it may be worth to compare multiple runs.
### Response:
def modularity_louvain_und(W, gamma=1, hierarchy=False, seed=None):
'''
The optimal community structure is a subdivision of the network into
nonoverlapping groups of nodes in a way that maximizes the number of
within-group edges, and minimizes the number of between-group edges.
The modularity is a statistic that quantifies the degree to which the
network may be subdivided into such clearly delineated groups.
The Louvain algorithm is a fast and accurate community detection
algorithm (as of writing). The algorithm may also be used to detect
hierarchical community structure.
Parameters
----------
W : NxN np.ndarray
undirected weighted/binary connection matrix
gamma : float
resolution parameter. default value=1. Values 0 <= gamma < 1 detect
larger modules while gamma > 1 detects smaller modules.
hierarchy : bool
Enables hierarchical output. Defalut value=False
seed : hashable, optional
If None (default), use the np.random's global random state to generate random numbers.
Otherwise, use a new np.random.RandomState instance seeded with the given value.
Returns
-------
ci : Nx1 np.ndarray
refined community affiliation vector. If hierarchical output enabled,
it is an NxH np.ndarray instead with multiple iterations
Q : float
optimized modularity metric. If hierarchical output enabled, becomes
an Hx1 array of floats instead.
Notes
-----
Ci and Q may vary from run to run, due to heuristics in the
algorithm. Consequently, it may be worth to compare multiple runs.
'''
rng = get_rng(seed)
n = len(W) # number of nodes
s = np.sum(W) # weight of edges
h = 0 # hierarchy index
ci = []
ci.append(np.arange(n) + 1) # hierarchical module assignments
q = []
q.append(-1) # hierarchical modularity values
n0 = n
#knm = np.zeros((n,n))
# for j in np.xrange(n0+1):
# knm[:,j] = np.sum(w[;,
while True:
if h > 300:
raise BCTParamError('Modularity Infinite Loop Style B. Please '
'contact the developer with this error.')
k = np.sum(W, axis=0) # node degree
Km = k.copy() # module degree
Knm = W.copy() # node-to-module degree
m = np.arange(n) + 1 # initial module assignments
flag = True # flag for within-hierarchy search
it = 0
while flag:
it += 1
if it > 1000:
raise BCTParamError('Modularity Infinite Loop Style C. Please '
'contact the developer with this error.')
flag = False
# loop over nodes in random order
for i in rng.permutation(n):
ma = m[i] - 1
# algorithm condition
dQ = ((Knm[i, :] - Knm[i, ma] + W[i, i]) -
gamma * k[i] * (Km - Km[ma] + k[i]) / s)
dQ[ma] = 0
max_dq = np.max(dQ) # find maximal modularity increase
if max_dq > 1e-10: # if maximal increase positive
j = np.argmax(dQ) # take only one value
# print max_dq,j,dQ[j]
Knm[:, j] += W[:, i] # change node-to-module degrees
Knm[:, ma] -= W[:, i]
Km[j] += k[i] # change module degrees
Km[ma] -= k[i]
m[i] = j + 1 # reassign module
flag = True
_, m = np.unique(m, return_inverse=True) # new module assignments
# print m,h
m += 1
h += 1
ci.append(np.zeros((n0,)))
# for i,mi in enumerate(m): #loop through initial module assignments
for i in range(n):
# print i, m[i], n0, h, len(m), n
# ci[h][np.where(ci[h-1]==i+1)]=mi #assign new modules
ci[h][np.where(ci[h - 1] == i + 1)] = m[i]
n = np.max(m) # new number of modules
W1 = np.zeros((n, n)) # new weighted matrix
for i in range(n):
for j in range(i, n):
# pool weights of nodes in same module
wp = np.sum(W[np.ix_(m == i + 1, m == j + 1)])
W1[i, j] = wp
W1[j, i] = wp
W = W1
q.append(0)
# compute modularity
q[h] = np.trace(W) / s - gamma * np.sum(np.dot(W / s, W / s))
if q[h] - q[h - 1] < 1e-10: # if modularity does not increase
break
ci = np.array(ci, dtype=int)
if hierarchy:
ci = ci[1:-1]
q = q[1:-1]
return ci, q
else:
return ci[h - 1], q[h - 1] |
def visible_fields(self):
"""
Returns the reduced set of visible fields to output from the form.
This method respects the provided ``fields`` configuration _and_ exlcudes
all fields from the ``exclude`` configuration.
If no ``fields`` where provided when configuring this fieldset, all visible
fields minus the excluded fields will be returned.
:return: List of bound field instances or empty tuple.
"""
form_visible_fields = self.form.visible_fields()
if self.render_fields:
fields = self.render_fields
else:
fields = [field.name for field in form_visible_fields]
filtered_fields = [field for field in fields if field not in self.exclude_fields]
return [field for field in form_visible_fields if field.name in filtered_fields] | Returns the reduced set of visible fields to output from the form.
This method respects the provided ``fields`` configuration _and_ exlcudes
all fields from the ``exclude`` configuration.
If no ``fields`` where provided when configuring this fieldset, all visible
fields minus the excluded fields will be returned.
:return: List of bound field instances or empty tuple. | Below is the the instruction that describes the task:
### Input:
Returns the reduced set of visible fields to output from the form.
This method respects the provided ``fields`` configuration _and_ exlcudes
all fields from the ``exclude`` configuration.
If no ``fields`` where provided when configuring this fieldset, all visible
fields minus the excluded fields will be returned.
:return: List of bound field instances or empty tuple.
### Response:
def visible_fields(self):
"""
Returns the reduced set of visible fields to output from the form.
This method respects the provided ``fields`` configuration _and_ exlcudes
all fields from the ``exclude`` configuration.
If no ``fields`` where provided when configuring this fieldset, all visible
fields minus the excluded fields will be returned.
:return: List of bound field instances or empty tuple.
"""
form_visible_fields = self.form.visible_fields()
if self.render_fields:
fields = self.render_fields
else:
fields = [field.name for field in form_visible_fields]
filtered_fields = [field for field in fields if field not in self.exclude_fields]
return [field for field in form_visible_fields if field.name in filtered_fields] |
def map_to_openapi_type(self, *args):
"""Decorator to set mapping for custom fields.
``*args`` can be:
- a pair of the form ``(type, format)``
- a core marshmallow field type (in which case we reuse that type's mapping)
"""
if len(args) == 1 and args[0] in self.field_mapping:
openapi_type_field = self.field_mapping[args[0]]
elif len(args) == 2:
openapi_type_field = args
else:
raise TypeError("Pass core marshmallow field type or (type, fmt) pair.")
def inner(field_type):
self.field_mapping[field_type] = openapi_type_field
return field_type
return inner | Decorator to set mapping for custom fields.
``*args`` can be:
- a pair of the form ``(type, format)``
- a core marshmallow field type (in which case we reuse that type's mapping) | Below is the the instruction that describes the task:
### Input:
Decorator to set mapping for custom fields.
``*args`` can be:
- a pair of the form ``(type, format)``
- a core marshmallow field type (in which case we reuse that type's mapping)
### Response:
def map_to_openapi_type(self, *args):
"""Decorator to set mapping for custom fields.
``*args`` can be:
- a pair of the form ``(type, format)``
- a core marshmallow field type (in which case we reuse that type's mapping)
"""
if len(args) == 1 and args[0] in self.field_mapping:
openapi_type_field = self.field_mapping[args[0]]
elif len(args) == 2:
openapi_type_field = args
else:
raise TypeError("Pass core marshmallow field type or (type, fmt) pair.")
def inner(field_type):
self.field_mapping[field_type] = openapi_type_field
return field_type
return inner |
def calc_hamiltonian(self, mass, omega_array):
"""
Calculates the standard (pot+kin) Hamiltonian of your system.
Parameters
----------
mass : float
The mass of the particle in kg
omega_array : array
array which represents omega at every point in your time trace
and should therefore have the same length as self.position_data
Requirements
------------
self.position_data : array
Already filtered for the degree of freedom of intrest and converted into meters.
Returns
-------
Hamiltonian : array
The calculated Hamiltonian
"""
Kappa_t= mass*omega_array**2
self.E_pot = 0.5*Kappa_t*self.position_data**2
self.E_kin = 0.5*mass*(_np.insert(_np.diff(self.position_data), 0, (self.position_data[1]-self.position_data[0]))*self.SampleFreq)**2
self.Hamiltonian = self.E_pot + self.E_kin
return self.Hamiltonian | Calculates the standard (pot+kin) Hamiltonian of your system.
Parameters
----------
mass : float
The mass of the particle in kg
omega_array : array
array which represents omega at every point in your time trace
and should therefore have the same length as self.position_data
Requirements
------------
self.position_data : array
Already filtered for the degree of freedom of intrest and converted into meters.
Returns
-------
Hamiltonian : array
The calculated Hamiltonian | Below is the the instruction that describes the task:
### Input:
Calculates the standard (pot+kin) Hamiltonian of your system.
Parameters
----------
mass : float
The mass of the particle in kg
omega_array : array
array which represents omega at every point in your time trace
and should therefore have the same length as self.position_data
Requirements
------------
self.position_data : array
Already filtered for the degree of freedom of intrest and converted into meters.
Returns
-------
Hamiltonian : array
The calculated Hamiltonian
### Response:
def calc_hamiltonian(self, mass, omega_array):
"""
Calculates the standard (pot+kin) Hamiltonian of your system.
Parameters
----------
mass : float
The mass of the particle in kg
omega_array : array
array which represents omega at every point in your time trace
and should therefore have the same length as self.position_data
Requirements
------------
self.position_data : array
Already filtered for the degree of freedom of intrest and converted into meters.
Returns
-------
Hamiltonian : array
The calculated Hamiltonian
"""
Kappa_t= mass*omega_array**2
self.E_pot = 0.5*Kappa_t*self.position_data**2
self.E_kin = 0.5*mass*(_np.insert(_np.diff(self.position_data), 0, (self.position_data[1]-self.position_data[0]))*self.SampleFreq)**2
self.Hamiltonian = self.E_pot + self.E_kin
return self.Hamiltonian |
def addKeyword(self, keyword, weight):
"""
add a relevant keyword to the topic page
@param keyword: keyword or phrase to be added
@param weight: importance of the provided keyword (typically in range 1 - 50)
"""
assert isinstance(weight, (float, int)), "weight value has to be a positive or negative integer"
self.topicPage["keywords"].append({"keyword": keyword, "wgt": weight}) | add a relevant keyword to the topic page
@param keyword: keyword or phrase to be added
@param weight: importance of the provided keyword (typically in range 1 - 50) | Below is the the instruction that describes the task:
### Input:
add a relevant keyword to the topic page
@param keyword: keyword or phrase to be added
@param weight: importance of the provided keyword (typically in range 1 - 50)
### Response:
def addKeyword(self, keyword, weight):
"""
add a relevant keyword to the topic page
@param keyword: keyword or phrase to be added
@param weight: importance of the provided keyword (typically in range 1 - 50)
"""
assert isinstance(weight, (float, int)), "weight value has to be a positive or negative integer"
self.topicPage["keywords"].append({"keyword": keyword, "wgt": weight}) |
def get_ordered_entries(self, queryset=False):
"""
Custom ordering. First we get the average views and rating for
the categories's entries. Second we created a rank by multiplying
both. Last, we sort categories by this rank from top to bottom.
Example:
- Cat_1
- Entry_1 (500 Views, Rating 2)
- Entry_2 (200 Views, Rating -4)
- Entry_3 (100 Views, Rating 3)
- Cat_2
- Entry_1 (200 Views, Rating 7)
- Entry_2 (50 Views, Rating 2)
Result:
Cat_1 has a rank by: 88.88 (avg. views: 266.66, avg. rating: 0.33)
Cat_2 has a rank by: 562.5 (avg. views: 125, avg. rating: 4.5)
Cat_2 will be displayed at the top. The algorithm is quality-oriented,
as you can see.
"""
if queryset:
self.queryset = queryset
else:
self.queryset = EntryCategory.objects.all()
if self.queryset:
for category in self.queryset:
entries = category.get_entries()
if entries:
amount_list = [e.amount_of_views for e in entries]
rating_list = [e.rating() for e in entries]
views_per_entry = fsum(amount_list) / len(amount_list)
rating_per_entry = fsum(rating_list) / len(rating_list)
category.last_rank = views_per_entry * rating_per_entry
category.save()
else:
self.queryset = self.queryset.exclude(pk=category.pk)
self.queryset = sorted(self.queryset, key=lambda c: c.last_rank,
reverse=True)
return self.queryset | Custom ordering. First we get the average views and rating for
the categories's entries. Second we created a rank by multiplying
both. Last, we sort categories by this rank from top to bottom.
Example:
- Cat_1
- Entry_1 (500 Views, Rating 2)
- Entry_2 (200 Views, Rating -4)
- Entry_3 (100 Views, Rating 3)
- Cat_2
- Entry_1 (200 Views, Rating 7)
- Entry_2 (50 Views, Rating 2)
Result:
Cat_1 has a rank by: 88.88 (avg. views: 266.66, avg. rating: 0.33)
Cat_2 has a rank by: 562.5 (avg. views: 125, avg. rating: 4.5)
Cat_2 will be displayed at the top. The algorithm is quality-oriented,
as you can see. | Below is the the instruction that describes the task:
### Input:
Custom ordering. First we get the average views and rating for
the categories's entries. Second we created a rank by multiplying
both. Last, we sort categories by this rank from top to bottom.
Example:
- Cat_1
- Entry_1 (500 Views, Rating 2)
- Entry_2 (200 Views, Rating -4)
- Entry_3 (100 Views, Rating 3)
- Cat_2
- Entry_1 (200 Views, Rating 7)
- Entry_2 (50 Views, Rating 2)
Result:
Cat_1 has a rank by: 88.88 (avg. views: 266.66, avg. rating: 0.33)
Cat_2 has a rank by: 562.5 (avg. views: 125, avg. rating: 4.5)
Cat_2 will be displayed at the top. The algorithm is quality-oriented,
as you can see.
### Response:
def get_ordered_entries(self, queryset=False):
"""
Custom ordering. First we get the average views and rating for
the categories's entries. Second we created a rank by multiplying
both. Last, we sort categories by this rank from top to bottom.
Example:
- Cat_1
- Entry_1 (500 Views, Rating 2)
- Entry_2 (200 Views, Rating -4)
- Entry_3 (100 Views, Rating 3)
- Cat_2
- Entry_1 (200 Views, Rating 7)
- Entry_2 (50 Views, Rating 2)
Result:
Cat_1 has a rank by: 88.88 (avg. views: 266.66, avg. rating: 0.33)
Cat_2 has a rank by: 562.5 (avg. views: 125, avg. rating: 4.5)
Cat_2 will be displayed at the top. The algorithm is quality-oriented,
as you can see.
"""
if queryset:
self.queryset = queryset
else:
self.queryset = EntryCategory.objects.all()
if self.queryset:
for category in self.queryset:
entries = category.get_entries()
if entries:
amount_list = [e.amount_of_views for e in entries]
rating_list = [e.rating() for e in entries]
views_per_entry = fsum(amount_list) / len(amount_list)
rating_per_entry = fsum(rating_list) / len(rating_list)
category.last_rank = views_per_entry * rating_per_entry
category.save()
else:
self.queryset = self.queryset.exclude(pk=category.pk)
self.queryset = sorted(self.queryset, key=lambda c: c.last_rank,
reverse=True)
return self.queryset |
def broadcast(self,
fromUserId,
objectName,
content,
pushContent=None,
pushData=None,
os=None):
"""
发送广播消息方法(发送消息给一个应用下的所有注册用户,如用户未在线会对满足条件(绑定手机终端)的用户发送 Push 信息,单条消息最大 128k,会话类型为 SYSTEM。每小时只能发送 1 次,每天最多发送 3 次。) 方法
@param fromUserId:发送人用户 Id。(必传)
@param txtMessage:文本消息。
@param pushContent:定义显示的 Push 内容,如果 objectName 为融云内置消息类型时,则发送后用户一定会收到 Push 信息. 如果为自定义消息,则 pushContent 为自定义消息显示的 Push 内容,如果不传则用户不会收到 Push 通知.(可选)
@param pushData:针对 iOS 平台为 Push 通知时附加到 payload 中,Android 客户端收到推送消息时对应字段名为 pushData。(可选)
@param os:针对操作系统发送 Push,值为 iOS 表示对 iOS 手机用户发送 Push ,为 Android 时表示对 Android 手机用户发送 Push ,如对所有用户发送 Push 信息,则不需要传 os 参数。(可选)
@return code:返回码,200 为正常。
@return errorMessage:错误信息。
"""
desc = {
"name": "CodeSuccessReslut",
"desc": " http 成功返回结果",
"fields": [{
"name": "code",
"type": "Integer",
"desc": "返回码,200 为正常。"
}, {
"name": "errorMessage",
"type": "String",
"desc": "错误信息。"
}]
}
r = self.call_api(
method=('API', 'POST', 'application/x-www-form-urlencoded'),
action='/message/broadcast.json',
params={
"fromUserId": fromUserId,
"objectName": objectName,
"content": content,
"pushContent": pushContent,
"pushData": pushData,
"os": os
})
return Response(r, desc) | 发送广播消息方法(发送消息给一个应用下的所有注册用户,如用户未在线会对满足条件(绑定手机终端)的用户发送 Push 信息,单条消息最大 128k,会话类型为 SYSTEM。每小时只能发送 1 次,每天最多发送 3 次。) 方法
@param fromUserId:发送人用户 Id。(必传)
@param txtMessage:文本消息。
@param pushContent:定义显示的 Push 内容,如果 objectName 为融云内置消息类型时,则发送后用户一定会收到 Push 信息. 如果为自定义消息,则 pushContent 为自定义消息显示的 Push 内容,如果不传则用户不会收到 Push 通知.(可选)
@param pushData:针对 iOS 平台为 Push 通知时附加到 payload 中,Android 客户端收到推送消息时对应字段名为 pushData。(可选)
@param os:针对操作系统发送 Push,值为 iOS 表示对 iOS 手机用户发送 Push ,为 Android 时表示对 Android 手机用户发送 Push ,如对所有用户发送 Push 信息,则不需要传 os 参数。(可选)
@return code:返回码,200 为正常。
@return errorMessage:错误信息。 | Below is the the instruction that describes the task:
### Input:
发送广播消息方法(发送消息给一个应用下的所有注册用户,如用户未在线会对满足条件(绑定手机终端)的用户发送 Push 信息,单条消息最大 128k,会话类型为 SYSTEM。每小时只能发送 1 次,每天最多发送 3 次。) 方法
@param fromUserId:发送人用户 Id。(必传)
@param txtMessage:文本消息。
@param pushContent:定义显示的 Push 内容,如果 objectName 为融云内置消息类型时,则发送后用户一定会收到 Push 信息. 如果为自定义消息,则 pushContent 为自定义消息显示的 Push 内容,如果不传则用户不会收到 Push 通知.(可选)
@param pushData:针对 iOS 平台为 Push 通知时附加到 payload 中,Android 客户端收到推送消息时对应字段名为 pushData。(可选)
@param os:针对操作系统发送 Push,值为 iOS 表示对 iOS 手机用户发送 Push ,为 Android 时表示对 Android 手机用户发送 Push ,如对所有用户发送 Push 信息,则不需要传 os 参数。(可选)
@return code:返回码,200 为正常。
@return errorMessage:错误信息。
### Response:
def broadcast(self,
fromUserId,
objectName,
content,
pushContent=None,
pushData=None,
os=None):
"""
发送广播消息方法(发送消息给一个应用下的所有注册用户,如用户未在线会对满足条件(绑定手机终端)的用户发送 Push 信息,单条消息最大 128k,会话类型为 SYSTEM。每小时只能发送 1 次,每天最多发送 3 次。) 方法
@param fromUserId:发送人用户 Id。(必传)
@param txtMessage:文本消息。
@param pushContent:定义显示的 Push 内容,如果 objectName 为融云内置消息类型时,则发送后用户一定会收到 Push 信息. 如果为自定义消息,则 pushContent 为自定义消息显示的 Push 内容,如果不传则用户不会收到 Push 通知.(可选)
@param pushData:针对 iOS 平台为 Push 通知时附加到 payload 中,Android 客户端收到推送消息时对应字段名为 pushData。(可选)
@param os:针对操作系统发送 Push,值为 iOS 表示对 iOS 手机用户发送 Push ,为 Android 时表示对 Android 手机用户发送 Push ,如对所有用户发送 Push 信息,则不需要传 os 参数。(可选)
@return code:返回码,200 为正常。
@return errorMessage:错误信息。
"""
desc = {
"name": "CodeSuccessReslut",
"desc": " http 成功返回结果",
"fields": [{
"name": "code",
"type": "Integer",
"desc": "返回码,200 为正常。"
}, {
"name": "errorMessage",
"type": "String",
"desc": "错误信息。"
}]
}
r = self.call_api(
method=('API', 'POST', 'application/x-www-form-urlencoded'),
action='/message/broadcast.json',
params={
"fromUserId": fromUserId,
"objectName": objectName,
"content": content,
"pushContent": pushContent,
"pushData": pushData,
"os": os
})
return Response(r, desc) |
def delete_untagged(collector, **kwargs):
"""Find the untagged images and remove them"""
configuration = collector.configuration
docker_api = configuration["harpoon"].docker_api
images = docker_api.images()
found = False
for image in images:
if image["RepoTags"] == ["<none>:<none>"]:
found = True
image_id = image["Id"]
log.info("Deleting untagged image\thash=%s", image_id)
try:
docker_api.remove_image(image["Id"])
except DockerAPIError as error:
log.error("Failed to delete image\thash=%s\terror=%s", image_id, error)
if not found:
log.info("Didn't find any untagged images to delete!") | Find the untagged images and remove them | Below is the the instruction that describes the task:
### Input:
Find the untagged images and remove them
### Response:
def delete_untagged(collector, **kwargs):
"""Find the untagged images and remove them"""
configuration = collector.configuration
docker_api = configuration["harpoon"].docker_api
images = docker_api.images()
found = False
for image in images:
if image["RepoTags"] == ["<none>:<none>"]:
found = True
image_id = image["Id"]
log.info("Deleting untagged image\thash=%s", image_id)
try:
docker_api.remove_image(image["Id"])
except DockerAPIError as error:
log.error("Failed to delete image\thash=%s\terror=%s", image_id, error)
if not found:
log.info("Didn't find any untagged images to delete!") |
def _get_site_class(self, vs30, mmi_mean):
"""
Return site class flag for:
Class E - Very Soft Soil vs30 < 180
Class D - Deep or Soft Soil vs30 >= 180 and vs30 <= 360
Class C - Shallow Soil vs30 > 360 and vs30 <= 760
Class B - Rock vs30 > 760 and vs30 <= 1500
Class A - Strong Rock vs30 >= 180 and vs30 <= 360
The S site class is equal to
S = c1 if MMI <= 7
S = c1 - d *(MMI - 7.0) if 7<MMI<9.5
S = c2 if MMI >= 9.5
"""
if vs30[0] < 180:
c1 = 1.0
c2 = -0.25
d = 0.5
elif vs30[0] >= 180 and vs30[0] <= 360:
c1 = 0.5
c2 = -0.125
d = 0.25
elif vs30[0] > 360 and vs30[0] <= 760:
c1 = 0.
c2 = 0.
d = 0.
elif vs30[0] > 760 and vs30[0] <= 1500:
c1 = -0.5
c2 = 0.125
d = -0.25
elif vs30[0] > 1500:
c1 = -1.0
c2 = 0.25
d = -0.5
S = np.zeros_like(vs30)
for i in range(vs30.size):
if mmi_mean[i] <= 7.0:
S[i] += c1
elif mmi_mean[i] > 7 and mmi_mean[i] < 9.5:
S[i] += c1 - d * (mmi_mean[i] - 7.0)
else:
S[i] += c2
return S | Return site class flag for:
Class E - Very Soft Soil vs30 < 180
Class D - Deep or Soft Soil vs30 >= 180 and vs30 <= 360
Class C - Shallow Soil vs30 > 360 and vs30 <= 760
Class B - Rock vs30 > 760 and vs30 <= 1500
Class A - Strong Rock vs30 >= 180 and vs30 <= 360
The S site class is equal to
S = c1 if MMI <= 7
S = c1 - d *(MMI - 7.0) if 7<MMI<9.5
S = c2 if MMI >= 9.5 | Below is the the instruction that describes the task:
### Input:
Return site class flag for:
Class E - Very Soft Soil vs30 < 180
Class D - Deep or Soft Soil vs30 >= 180 and vs30 <= 360
Class C - Shallow Soil vs30 > 360 and vs30 <= 760
Class B - Rock vs30 > 760 and vs30 <= 1500
Class A - Strong Rock vs30 >= 180 and vs30 <= 360
The S site class is equal to
S = c1 if MMI <= 7
S = c1 - d *(MMI - 7.0) if 7<MMI<9.5
S = c2 if MMI >= 9.5
### Response:
def _get_site_class(self, vs30, mmi_mean):
"""
Return site class flag for:
Class E - Very Soft Soil vs30 < 180
Class D - Deep or Soft Soil vs30 >= 180 and vs30 <= 360
Class C - Shallow Soil vs30 > 360 and vs30 <= 760
Class B - Rock vs30 > 760 and vs30 <= 1500
Class A - Strong Rock vs30 >= 180 and vs30 <= 360
The S site class is equal to
S = c1 if MMI <= 7
S = c1 - d *(MMI - 7.0) if 7<MMI<9.5
S = c2 if MMI >= 9.5
"""
if vs30[0] < 180:
c1 = 1.0
c2 = -0.25
d = 0.5
elif vs30[0] >= 180 and vs30[0] <= 360:
c1 = 0.5
c2 = -0.125
d = 0.25
elif vs30[0] > 360 and vs30[0] <= 760:
c1 = 0.
c2 = 0.
d = 0.
elif vs30[0] > 760 and vs30[0] <= 1500:
c1 = -0.5
c2 = 0.125
d = -0.25
elif vs30[0] > 1500:
c1 = -1.0
c2 = 0.25
d = -0.5
S = np.zeros_like(vs30)
for i in range(vs30.size):
if mmi_mean[i] <= 7.0:
S[i] += c1
elif mmi_mean[i] > 7 and mmi_mean[i] < 9.5:
S[i] += c1 - d * (mmi_mean[i] - 7.0)
else:
S[i] += c2
return S |
def make_report(self, outcome):
"""Make report in form of two notebooks.
Use nbdime diff-web to present the difference between reference
cells and test cells.
"""
failures = self.getreports('failed')
if not failures:
return
for rep in failures:
# Check if this is a notebook node
msg = self._getfailureheadline(rep)
lines = rep.longrepr.splitlines()
if len(lines) > 1:
self.section(msg, lines[1])
self._outrep_summary(rep)
tmpdir = tempfile.mkdtemp()
try:
ref_file = os.path.join(tmpdir, 'reference.ipynb')
test_file = os.path.join(tmpdir, 'test_result.ipynb')
with io.open(ref_file, "w", encoding="utf8") as f:
nbformat.write(self.nb_ref, f)
with io.open(test_file, "w", encoding="utf8") as f:
nbformat.write(self.nb_test, f)
run_server(
port=0, # Run on random port
cwd=tmpdir,
closable=True,
on_port=lambda port: browse(
port, ref_file, test_file, None))
finally:
shutil.rmtree(tmpdir) | Make report in form of two notebooks.
Use nbdime diff-web to present the difference between reference
cells and test cells. | Below is the the instruction that describes the task:
### Input:
Make report in form of two notebooks.
Use nbdime diff-web to present the difference between reference
cells and test cells.
### Response:
def make_report(self, outcome):
"""Make report in form of two notebooks.
Use nbdime diff-web to present the difference between reference
cells and test cells.
"""
failures = self.getreports('failed')
if not failures:
return
for rep in failures:
# Check if this is a notebook node
msg = self._getfailureheadline(rep)
lines = rep.longrepr.splitlines()
if len(lines) > 1:
self.section(msg, lines[1])
self._outrep_summary(rep)
tmpdir = tempfile.mkdtemp()
try:
ref_file = os.path.join(tmpdir, 'reference.ipynb')
test_file = os.path.join(tmpdir, 'test_result.ipynb')
with io.open(ref_file, "w", encoding="utf8") as f:
nbformat.write(self.nb_ref, f)
with io.open(test_file, "w", encoding="utf8") as f:
nbformat.write(self.nb_test, f)
run_server(
port=0, # Run on random port
cwd=tmpdir,
closable=True,
on_port=lambda port: browse(
port, ref_file, test_file, None))
finally:
shutil.rmtree(tmpdir) |
def currentpath(self) -> str:
"""Absolute path of the current working directory.
>>> from hydpy.core.filetools import FileManager
>>> filemanager = FileManager()
>>> filemanager.BASEDIR = 'basename'
>>> filemanager.projectdir = 'projectname'
>>> from hydpy import repr_, TestIO
>>> with TestIO():
... filemanager.currentdir = 'testdir'
... repr_(filemanager.currentpath) # doctest: +ELLIPSIS
'...hydpy/tests/iotesting/projectname/basename/testdir'
"""
return os.path.join(self.basepath, self.currentdir) | Absolute path of the current working directory.
>>> from hydpy.core.filetools import FileManager
>>> filemanager = FileManager()
>>> filemanager.BASEDIR = 'basename'
>>> filemanager.projectdir = 'projectname'
>>> from hydpy import repr_, TestIO
>>> with TestIO():
... filemanager.currentdir = 'testdir'
... repr_(filemanager.currentpath) # doctest: +ELLIPSIS
'...hydpy/tests/iotesting/projectname/basename/testdir' | Below is the the instruction that describes the task:
### Input:
Absolute path of the current working directory.
>>> from hydpy.core.filetools import FileManager
>>> filemanager = FileManager()
>>> filemanager.BASEDIR = 'basename'
>>> filemanager.projectdir = 'projectname'
>>> from hydpy import repr_, TestIO
>>> with TestIO():
... filemanager.currentdir = 'testdir'
... repr_(filemanager.currentpath) # doctest: +ELLIPSIS
'...hydpy/tests/iotesting/projectname/basename/testdir'
### Response:
def currentpath(self) -> str:
"""Absolute path of the current working directory.
>>> from hydpy.core.filetools import FileManager
>>> filemanager = FileManager()
>>> filemanager.BASEDIR = 'basename'
>>> filemanager.projectdir = 'projectname'
>>> from hydpy import repr_, TestIO
>>> with TestIO():
... filemanager.currentdir = 'testdir'
... repr_(filemanager.currentpath) # doctest: +ELLIPSIS
'...hydpy/tests/iotesting/projectname/basename/testdir'
"""
return os.path.join(self.basepath, self.currentdir) |
def from_analysis_period(cls, analysis_period, clearness=1,
daylight_savings_indicator='No'):
""""Initialize a OriginalClearSkyCondition from an analysis_period"""
_check_analysis_period(analysis_period)
return cls(analysis_period.st_month, analysis_period.st_day, clearness,
daylight_savings_indicator) | Initialize a OriginalClearSkyCondition from an analysis_period | Below is the the instruction that describes the task:
### Input:
Initialize a OriginalClearSkyCondition from an analysis_period
### Response:
def from_analysis_period(cls, analysis_period, clearness=1,
daylight_savings_indicator='No'):
""""Initialize a OriginalClearSkyCondition from an analysis_period"""
_check_analysis_period(analysis_period)
return cls(analysis_period.st_month, analysis_period.st_day, clearness,
daylight_savings_indicator) |
def parse_model_table_file(path, f):
"""Parse a file as a list of model reactions
Yields reactions IDs. Path can be given as a string or a context.
"""
for line in f:
line, _, comment = line.partition('#')
line = line.strip()
if line == '':
continue
yield line | Parse a file as a list of model reactions
Yields reactions IDs. Path can be given as a string or a context. | Below is the the instruction that describes the task:
### Input:
Parse a file as a list of model reactions
Yields reactions IDs. Path can be given as a string or a context.
### Response:
def parse_model_table_file(path, f):
"""Parse a file as a list of model reactions
Yields reactions IDs. Path can be given as a string or a context.
"""
for line in f:
line, _, comment = line.partition('#')
line = line.strip()
if line == '':
continue
yield line |
async def get_entry(config, url):
""" Given an entry URL, return the entry
Arguments:
config -- the configuration
url -- the URL of the entry
Returns: 3-tuple of (current, previous, updated) """
previous = config.cache.get(
'entry', url,
schema_version=SCHEMA_VERSION) if config.cache else None
headers = previous.caching if previous else None
request = await utils.retry_get(config, url, headers=headers)
if not request or not request.success:
LOGGER.error("Could not get entry %s: %d", url,
request.status if request else -1)
return None, previous, False
# cache hit
if request.cached:
return previous, previous, False
current = Entry(request)
# Content updated
if config.cache:
config.cache.set('entry', url, current)
return current, previous, (not previous
or previous.digest != current.digest
or previous.status != current.status) | Given an entry URL, return the entry
Arguments:
config -- the configuration
url -- the URL of the entry
Returns: 3-tuple of (current, previous, updated) | Below is the the instruction that describes the task:
### Input:
Given an entry URL, return the entry
Arguments:
config -- the configuration
url -- the URL of the entry
Returns: 3-tuple of (current, previous, updated)
### Response:
async def get_entry(config, url):
""" Given an entry URL, return the entry
Arguments:
config -- the configuration
url -- the URL of the entry
Returns: 3-tuple of (current, previous, updated) """
previous = config.cache.get(
'entry', url,
schema_version=SCHEMA_VERSION) if config.cache else None
headers = previous.caching if previous else None
request = await utils.retry_get(config, url, headers=headers)
if not request or not request.success:
LOGGER.error("Could not get entry %s: %d", url,
request.status if request else -1)
return None, previous, False
# cache hit
if request.cached:
return previous, previous, False
current = Entry(request)
# Content updated
if config.cache:
config.cache.set('entry', url, current)
return current, previous, (not previous
or previous.digest != current.digest
or previous.status != current.status) |
def _stellingwerf_pdm_worker(task):
'''
This is a parallel worker for the function below.
Parameters
----------
task : tuple
This is of the form below::
task[0] = times
task[1] = mags
task[2] = errs
task[3] = frequency
task[4] = binsize
task[5] = minbin
Returns
-------
theta_pdm : float
The theta value at the specified frequency. nan if the calculation
fails.
'''
times, mags, errs, frequency, binsize, minbin = task
try:
theta = stellingwerf_pdm_theta(times, mags, errs, frequency,
binsize=binsize, minbin=minbin)
return theta
except Exception as e:
return npnan | This is a parallel worker for the function below.
Parameters
----------
task : tuple
This is of the form below::
task[0] = times
task[1] = mags
task[2] = errs
task[3] = frequency
task[4] = binsize
task[5] = minbin
Returns
-------
theta_pdm : float
The theta value at the specified frequency. nan if the calculation
fails. | Below is the the instruction that describes the task:
### Input:
This is a parallel worker for the function below.
Parameters
----------
task : tuple
This is of the form below::
task[0] = times
task[1] = mags
task[2] = errs
task[3] = frequency
task[4] = binsize
task[5] = minbin
Returns
-------
theta_pdm : float
The theta value at the specified frequency. nan if the calculation
fails.
### Response:
def _stellingwerf_pdm_worker(task):
'''
This is a parallel worker for the function below.
Parameters
----------
task : tuple
This is of the form below::
task[0] = times
task[1] = mags
task[2] = errs
task[3] = frequency
task[4] = binsize
task[5] = minbin
Returns
-------
theta_pdm : float
The theta value at the specified frequency. nan if the calculation
fails.
'''
times, mags, errs, frequency, binsize, minbin = task
try:
theta = stellingwerf_pdm_theta(times, mags, errs, frequency,
binsize=binsize, minbin=minbin)
return theta
except Exception as e:
return npnan |
def _init_params_default(self):
"""
Internal method for default parameter initialization
"""
# if there are some nan -> mean impute
Yimp = self.Y.copy()
Inan = sp.isnan(Yimp)
Yimp[Inan] = Yimp[~Inan].mean()
if self.P==1: C = sp.array([[Yimp.var()]])
else: C = sp.cov(Yimp.T)
C /= float(self.n_randEffs)
for ti in range(self.n_randEffs):
self.getTraitCovarFun(ti).setCovariance(C) | Internal method for default parameter initialization | Below is the the instruction that describes the task:
### Input:
Internal method for default parameter initialization
### Response:
def _init_params_default(self):
"""
Internal method for default parameter initialization
"""
# if there are some nan -> mean impute
Yimp = self.Y.copy()
Inan = sp.isnan(Yimp)
Yimp[Inan] = Yimp[~Inan].mean()
if self.P==1: C = sp.array([[Yimp.var()]])
else: C = sp.cov(Yimp.T)
C /= float(self.n_randEffs)
for ti in range(self.n_randEffs):
self.getTraitCovarFun(ti).setCovariance(C) |
def get_shape(kind='line',x=None,y=None,x0=None,y0=None,x1=None,y1=None,span=0,color='red',dash='solid',width=1,
fillcolor=None,fill=False,opacity=1,xref='x',yref='y'):
"""
Returns a plotly shape
Parameters:
-----------
kind : string
Shape kind
line
rect
circle
x : float
x values for the shape.
This assumes x0=x1
x0 : float
x0 value for the shape
x1 : float
x1 value for the shape
y : float
y values for the shape.
This assumes y0=y1
y0 : float
y0 value for the shape
y1 : float
y1 value for the shape
color : string
color for shape line
dash : string
line style
solid
dash
dashdot
dot
width : int
line width
fillcolor : string
shape fill color
fill : bool
If True then fill shape
If not fillcolor then the
line color will be used
opacity : float [0,1]
opacity of the fill
xref : string
Sets the x coordinate system
which this object refers to
'x'
'paper'
'x2' etc
yref : string
Sets the y coordinate system
which this object refers to
'y'
'paper'
'y2' etc
"""
if x1 is None:
if x0 is None:
if x is None:
xref='paper'
x0=0
x1=1
else:
x0=x1=x
else:
x1=x0
else:
x
if y1 is None:
if y0 is None:
if y is None:
yref='paper'
y0=0
y1=1
else:
y0=y1=y
else:
y1=y0
shape = { 'x0':x0,
'y0':y0,
'x1':x1,
'y1':y1,
'line' : {
'color':normalize(color),
'width':width,
'dash':dash
},
'xref':xref,
'yref':yref
}
if kind=='line':
shape['type']='line'
elif kind=='circle':
shape['type']='circle'
elif kind=='rect':
shape['type']='rect'
else:
raise Exception("Invalid or unkown shape type : {0}".format(kind))
if (fill or fillcolor) and kind!='line':
fillcolor = color if not fillcolor else fillcolor
fillcolor=to_rgba(normalize(fillcolor),opacity)
shape['fillcolor']=fillcolor
return shape | Returns a plotly shape
Parameters:
-----------
kind : string
Shape kind
line
rect
circle
x : float
x values for the shape.
This assumes x0=x1
x0 : float
x0 value for the shape
x1 : float
x1 value for the shape
y : float
y values for the shape.
This assumes y0=y1
y0 : float
y0 value for the shape
y1 : float
y1 value for the shape
color : string
color for shape line
dash : string
line style
solid
dash
dashdot
dot
width : int
line width
fillcolor : string
shape fill color
fill : bool
If True then fill shape
If not fillcolor then the
line color will be used
opacity : float [0,1]
opacity of the fill
xref : string
Sets the x coordinate system
which this object refers to
'x'
'paper'
'x2' etc
yref : string
Sets the y coordinate system
which this object refers to
'y'
'paper'
'y2' etc | Below is the the instruction that describes the task:
### Input:
Returns a plotly shape
Parameters:
-----------
kind : string
Shape kind
line
rect
circle
x : float
x values for the shape.
This assumes x0=x1
x0 : float
x0 value for the shape
x1 : float
x1 value for the shape
y : float
y values for the shape.
This assumes y0=y1
y0 : float
y0 value for the shape
y1 : float
y1 value for the shape
color : string
color for shape line
dash : string
line style
solid
dash
dashdot
dot
width : int
line width
fillcolor : string
shape fill color
fill : bool
If True then fill shape
If not fillcolor then the
line color will be used
opacity : float [0,1]
opacity of the fill
xref : string
Sets the x coordinate system
which this object refers to
'x'
'paper'
'x2' etc
yref : string
Sets the y coordinate system
which this object refers to
'y'
'paper'
'y2' etc
### Response:
def get_shape(kind='line',x=None,y=None,x0=None,y0=None,x1=None,y1=None,span=0,color='red',dash='solid',width=1,
fillcolor=None,fill=False,opacity=1,xref='x',yref='y'):
"""
Returns a plotly shape
Parameters:
-----------
kind : string
Shape kind
line
rect
circle
x : float
x values for the shape.
This assumes x0=x1
x0 : float
x0 value for the shape
x1 : float
x1 value for the shape
y : float
y values for the shape.
This assumes y0=y1
y0 : float
y0 value for the shape
y1 : float
y1 value for the shape
color : string
color for shape line
dash : string
line style
solid
dash
dashdot
dot
width : int
line width
fillcolor : string
shape fill color
fill : bool
If True then fill shape
If not fillcolor then the
line color will be used
opacity : float [0,1]
opacity of the fill
xref : string
Sets the x coordinate system
which this object refers to
'x'
'paper'
'x2' etc
yref : string
Sets the y coordinate system
which this object refers to
'y'
'paper'
'y2' etc
"""
if x1 is None:
if x0 is None:
if x is None:
xref='paper'
x0=0
x1=1
else:
x0=x1=x
else:
x1=x0
else:
x
if y1 is None:
if y0 is None:
if y is None:
yref='paper'
y0=0
y1=1
else:
y0=y1=y
else:
y1=y0
shape = { 'x0':x0,
'y0':y0,
'x1':x1,
'y1':y1,
'line' : {
'color':normalize(color),
'width':width,
'dash':dash
},
'xref':xref,
'yref':yref
}
if kind=='line':
shape['type']='line'
elif kind=='circle':
shape['type']='circle'
elif kind=='rect':
shape['type']='rect'
else:
raise Exception("Invalid or unkown shape type : {0}".format(kind))
if (fill or fillcolor) and kind!='line':
fillcolor = color if not fillcolor else fillcolor
fillcolor=to_rgba(normalize(fillcolor),opacity)
shape['fillcolor']=fillcolor
return shape |
def saml_provider_absent(name, region=None, key=None, keyid=None, profile=None):
'''
.. versionadded:: 2016.11.0
Ensure the SAML provider with the specified name is absent.
name (string)
The name of the SAML provider.
saml_metadata_document (string)
The xml document of the SAML provider.
region (string)
Region to connect to.
key (string)
Secret key to be used.
keyid (string)
Access key to be used.
profile (dict)
A dict with region, key and keyid, or a pillar key (string)
that contains a dict with region, key and keyid.
'''
ret = {'name': name, 'result': True, 'comment': '', 'changes': {}}
provider = __salt__['boto_iam.list_saml_providers'](region=region,
key=key, keyid=keyid,
profile=profile)
if not provider:
ret['comment'] = 'SAML provider {0} is absent.'.format(name)
return ret
if __opts__['test']:
ret['comment'] = 'SAML provider {0} is set to be removed.'.format(name)
ret['result'] = None
return ret
deleted = __salt__['boto_iam.delete_saml_provider'](name, region=region,
key=key, keyid=keyid,
profile=profile)
if deleted is not False:
ret['comment'] = 'SAML provider {0} was deleted.'.format(name)
ret['changes']['old'] = name
return ret
ret['result'] = False
ret['comment'] = 'SAML provider {0} failed to be deleted.'.format(name)
return ret | .. versionadded:: 2016.11.0
Ensure the SAML provider with the specified name is absent.
name (string)
The name of the SAML provider.
saml_metadata_document (string)
The xml document of the SAML provider.
region (string)
Region to connect to.
key (string)
Secret key to be used.
keyid (string)
Access key to be used.
profile (dict)
A dict with region, key and keyid, or a pillar key (string)
that contains a dict with region, key and keyid. | Below is the the instruction that describes the task:
### Input:
.. versionadded:: 2016.11.0
Ensure the SAML provider with the specified name is absent.
name (string)
The name of the SAML provider.
saml_metadata_document (string)
The xml document of the SAML provider.
region (string)
Region to connect to.
key (string)
Secret key to be used.
keyid (string)
Access key to be used.
profile (dict)
A dict with region, key and keyid, or a pillar key (string)
that contains a dict with region, key and keyid.
### Response:
def saml_provider_absent(name, region=None, key=None, keyid=None, profile=None):
'''
.. versionadded:: 2016.11.0
Ensure the SAML provider with the specified name is absent.
name (string)
The name of the SAML provider.
saml_metadata_document (string)
The xml document of the SAML provider.
region (string)
Region to connect to.
key (string)
Secret key to be used.
keyid (string)
Access key to be used.
profile (dict)
A dict with region, key and keyid, or a pillar key (string)
that contains a dict with region, key and keyid.
'''
ret = {'name': name, 'result': True, 'comment': '', 'changes': {}}
provider = __salt__['boto_iam.list_saml_providers'](region=region,
key=key, keyid=keyid,
profile=profile)
if not provider:
ret['comment'] = 'SAML provider {0} is absent.'.format(name)
return ret
if __opts__['test']:
ret['comment'] = 'SAML provider {0} is set to be removed.'.format(name)
ret['result'] = None
return ret
deleted = __salt__['boto_iam.delete_saml_provider'](name, region=region,
key=key, keyid=keyid,
profile=profile)
if deleted is not False:
ret['comment'] = 'SAML provider {0} was deleted.'.format(name)
ret['changes']['old'] = name
return ret
ret['result'] = False
ret['comment'] = 'SAML provider {0} failed to be deleted.'.format(name)
return ret |
def redirect_to_assignment_override_for_group(self, group_id, assignment_id):
"""
Redirect to the assignment override for a group.
Responds with a redirect to the override for the given group, if any
(404 otherwise).
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - group_id
"""ID"""
path["group_id"] = group_id
# REQUIRED - PATH - assignment_id
"""ID"""
path["assignment_id"] = assignment_id
self.logger.debug("GET /api/v1/groups/{group_id}/assignments/{assignment_id}/override with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("GET", "/api/v1/groups/{group_id}/assignments/{assignment_id}/override".format(**path), data=data, params=params, no_data=True) | Redirect to the assignment override for a group.
Responds with a redirect to the override for the given group, if any
(404 otherwise). | Below is the the instruction that describes the task:
### Input:
Redirect to the assignment override for a group.
Responds with a redirect to the override for the given group, if any
(404 otherwise).
### Response:
def redirect_to_assignment_override_for_group(self, group_id, assignment_id):
"""
Redirect to the assignment override for a group.
Responds with a redirect to the override for the given group, if any
(404 otherwise).
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - group_id
"""ID"""
path["group_id"] = group_id
# REQUIRED - PATH - assignment_id
"""ID"""
path["assignment_id"] = assignment_id
self.logger.debug("GET /api/v1/groups/{group_id}/assignments/{assignment_id}/override with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("GET", "/api/v1/groups/{group_id}/assignments/{assignment_id}/override".format(**path), data=data, params=params, no_data=True) |
def mouse_click(self, widget, event=None):
"""Implements shift- and control-key handling features for mouse button press events explicit
The method is implements a fully defined mouse pattern to use shift- and control-key for multi-selection in a
TreeView and a ListStore as model. It avoid problems caused by special renderer types like the text combo
renderer by stopping the callback handler to continue with notifications.
:param Gtk.Object widget: Object which is the source of the event
:param Gtk.Event event: Event generated by mouse click
:rtype: bool
"""
if event.type == Gdk.EventType.BUTTON_PRESS:
pthinfo = self.tree_view.get_path_at_pos(int(event.x), int(event.y))
if not bool(event.get_state() & Gdk.ModifierType.CONTROL_MASK) \
and not bool(event.get_state() & Gdk.ModifierType.SHIFT_MASK) and \
event.type == Gdk.EventType.BUTTON_PRESS and event.get_button()[1] == 3:
if pthinfo is not None:
model, paths = self._tree_selection.get_selected_rows()
# print(paths)
if pthinfo[0] not in paths:
# self._logger.info("force single selection for right click")
self.tree_view.set_cursor(pthinfo[0])
self._last_path_selection = pthinfo[0]
else:
# self._logger.info("single- or multi-selection for right click")
pass
self.on_right_click_menu()
return True
if (bool(event.get_state() & Gdk.ModifierType.CONTROL_MASK) or \
bool(event.get_state() & Gdk.ModifierType.SHIFT_MASK)) and \
event.type == Gdk.EventType.BUTTON_PRESS and event.get_button()[1] == 3:
return True
if not bool(event.get_state() & Gdk.ModifierType.SHIFT_MASK) and event.get_button()[1] == 1:
if pthinfo is not None:
# self._logger.info("last select row {}".format(pthinfo[0]))
self._last_path_selection = pthinfo[0]
# else:
# self._logger.info("deselect rows")
# self.tree_selection.unselect_all()
if bool(event.get_state() & Gdk.ModifierType.SHIFT_MASK) and event.get_button()[1] == 1:
# self._logger.info("SHIFT adjust selection range")
model, paths = self._tree_selection.get_selected_rows()
# print(model, paths, pthinfo[0])
if paths and pthinfo and pthinfo[0]:
if self._last_path_selection[0] <= pthinfo[0][0]:
new_row_ids_selected = list(range(self._last_path_selection[0], pthinfo[0][0]+1))
else:
new_row_ids_selected = list(range(self._last_path_selection[0], pthinfo[0][0]-1, -1))
# self._logger.info("range to select {0}, {1}".format(new_row_ids_selected, model))
self._tree_selection.unselect_all()
for path in new_row_ids_selected:
self._tree_selection.select_path(path)
return True
else:
# self._logger.info("nothing selected {}".format(model))
if pthinfo and pthinfo[0]:
self._last_path_selection = pthinfo[0]
if bool(event.get_state() & Gdk.ModifierType.CONTROL_MASK) and event.get_button()[1] == 1:
# self._logger.info("CONTROL adjust selection range")
model, paths = self._tree_selection.get_selected_rows()
# print(model, paths, pthinfo[0])
if paths and pthinfo and pthinfo[0]:
if pthinfo[0] in paths:
self._tree_selection.unselect_path(pthinfo[0])
else:
self._tree_selection.select_path(pthinfo[0])
return True
elif pthinfo and pthinfo[0]:
self._tree_selection.select_path(pthinfo[0])
return True
elif event.type == Gdk.EventType._2BUTTON_PRESS:
self._handle_double_click(event) | Implements shift- and control-key handling features for mouse button press events explicit
The method is implements a fully defined mouse pattern to use shift- and control-key for multi-selection in a
TreeView and a ListStore as model. It avoid problems caused by special renderer types like the text combo
renderer by stopping the callback handler to continue with notifications.
:param Gtk.Object widget: Object which is the source of the event
:param Gtk.Event event: Event generated by mouse click
:rtype: bool | Below is the the instruction that describes the task:
### Input:
Implements shift- and control-key handling features for mouse button press events explicit
The method is implements a fully defined mouse pattern to use shift- and control-key for multi-selection in a
TreeView and a ListStore as model. It avoid problems caused by special renderer types like the text combo
renderer by stopping the callback handler to continue with notifications.
:param Gtk.Object widget: Object which is the source of the event
:param Gtk.Event event: Event generated by mouse click
:rtype: bool
### Response:
def mouse_click(self, widget, event=None):
"""Implements shift- and control-key handling features for mouse button press events explicit
The method is implements a fully defined mouse pattern to use shift- and control-key for multi-selection in a
TreeView and a ListStore as model. It avoid problems caused by special renderer types like the text combo
renderer by stopping the callback handler to continue with notifications.
:param Gtk.Object widget: Object which is the source of the event
:param Gtk.Event event: Event generated by mouse click
:rtype: bool
"""
if event.type == Gdk.EventType.BUTTON_PRESS:
pthinfo = self.tree_view.get_path_at_pos(int(event.x), int(event.y))
if not bool(event.get_state() & Gdk.ModifierType.CONTROL_MASK) \
and not bool(event.get_state() & Gdk.ModifierType.SHIFT_MASK) and \
event.type == Gdk.EventType.BUTTON_PRESS and event.get_button()[1] == 3:
if pthinfo is not None:
model, paths = self._tree_selection.get_selected_rows()
# print(paths)
if pthinfo[0] not in paths:
# self._logger.info("force single selection for right click")
self.tree_view.set_cursor(pthinfo[0])
self._last_path_selection = pthinfo[0]
else:
# self._logger.info("single- or multi-selection for right click")
pass
self.on_right_click_menu()
return True
if (bool(event.get_state() & Gdk.ModifierType.CONTROL_MASK) or \
bool(event.get_state() & Gdk.ModifierType.SHIFT_MASK)) and \
event.type == Gdk.EventType.BUTTON_PRESS and event.get_button()[1] == 3:
return True
if not bool(event.get_state() & Gdk.ModifierType.SHIFT_MASK) and event.get_button()[1] == 1:
if pthinfo is not None:
# self._logger.info("last select row {}".format(pthinfo[0]))
self._last_path_selection = pthinfo[0]
# else:
# self._logger.info("deselect rows")
# self.tree_selection.unselect_all()
if bool(event.get_state() & Gdk.ModifierType.SHIFT_MASK) and event.get_button()[1] == 1:
# self._logger.info("SHIFT adjust selection range")
model, paths = self._tree_selection.get_selected_rows()
# print(model, paths, pthinfo[0])
if paths and pthinfo and pthinfo[0]:
if self._last_path_selection[0] <= pthinfo[0][0]:
new_row_ids_selected = list(range(self._last_path_selection[0], pthinfo[0][0]+1))
else:
new_row_ids_selected = list(range(self._last_path_selection[0], pthinfo[0][0]-1, -1))
# self._logger.info("range to select {0}, {1}".format(new_row_ids_selected, model))
self._tree_selection.unselect_all()
for path in new_row_ids_selected:
self._tree_selection.select_path(path)
return True
else:
# self._logger.info("nothing selected {}".format(model))
if pthinfo and pthinfo[0]:
self._last_path_selection = pthinfo[0]
if bool(event.get_state() & Gdk.ModifierType.CONTROL_MASK) and event.get_button()[1] == 1:
# self._logger.info("CONTROL adjust selection range")
model, paths = self._tree_selection.get_selected_rows()
# print(model, paths, pthinfo[0])
if paths and pthinfo and pthinfo[0]:
if pthinfo[0] in paths:
self._tree_selection.unselect_path(pthinfo[0])
else:
self._tree_selection.select_path(pthinfo[0])
return True
elif pthinfo and pthinfo[0]:
self._tree_selection.select_path(pthinfo[0])
return True
elif event.type == Gdk.EventType._2BUTTON_PRESS:
self._handle_double_click(event) |
def collection(self, collection_id):
"""Create a sub-collection underneath the current document.
Args:
collection_id (str): The sub-collection identifier (sometimes
referred to as the "kind").
Returns:
~.firestore_v1beta1.collection.CollectionReference: The
child collection.
"""
child_path = self._path + (collection_id,)
return self._client.collection(*child_path) | Create a sub-collection underneath the current document.
Args:
collection_id (str): The sub-collection identifier (sometimes
referred to as the "kind").
Returns:
~.firestore_v1beta1.collection.CollectionReference: The
child collection. | Below is the the instruction that describes the task:
### Input:
Create a sub-collection underneath the current document.
Args:
collection_id (str): The sub-collection identifier (sometimes
referred to as the "kind").
Returns:
~.firestore_v1beta1.collection.CollectionReference: The
child collection.
### Response:
def collection(self, collection_id):
"""Create a sub-collection underneath the current document.
Args:
collection_id (str): The sub-collection identifier (sometimes
referred to as the "kind").
Returns:
~.firestore_v1beta1.collection.CollectionReference: The
child collection.
"""
child_path = self._path + (collection_id,)
return self._client.collection(*child_path) |
def shutdown(self, msg, args):
"""Causes the bot to gracefully shutdown."""
self.log.info("Received shutdown from %s", msg.user.username)
self._bot.runnable = False
return "Shutting down..." | Causes the bot to gracefully shutdown. | Below is the the instruction that describes the task:
### Input:
Causes the bot to gracefully shutdown.
### Response:
def shutdown(self, msg, args):
"""Causes the bot to gracefully shutdown."""
self.log.info("Received shutdown from %s", msg.user.username)
self._bot.runnable = False
return "Shutting down..." |
def printImportedNames(self):
"""Produce a report of imported names."""
for module in self.listModules():
print("%s:" % module.modname)
print(" %s" % "\n ".join(imp.name for imp in module.imported_names)) | Produce a report of imported names. | Below is the the instruction that describes the task:
### Input:
Produce a report of imported names.
### Response:
def printImportedNames(self):
"""Produce a report of imported names."""
for module in self.listModules():
print("%s:" % module.modname)
print(" %s" % "\n ".join(imp.name for imp in module.imported_names)) |
def with_arg_count(self, count):
"""Set the last call to expect an exact argument count.
I.E.::
>>> auth = Fake('auth').provides('login').with_arg_count(2)
>>> auth.login('joe_user') # forgot password
Traceback (most recent call last):
...
AssertionError: fake:auth.login() was called with 1 arg(s) but expected 2
"""
exp = self._get_current_call()
exp.expected_arg_count = count
return self | Set the last call to expect an exact argument count.
I.E.::
>>> auth = Fake('auth').provides('login').with_arg_count(2)
>>> auth.login('joe_user') # forgot password
Traceback (most recent call last):
...
AssertionError: fake:auth.login() was called with 1 arg(s) but expected 2 | Below is the the instruction that describes the task:
### Input:
Set the last call to expect an exact argument count.
I.E.::
>>> auth = Fake('auth').provides('login').with_arg_count(2)
>>> auth.login('joe_user') # forgot password
Traceback (most recent call last):
...
AssertionError: fake:auth.login() was called with 1 arg(s) but expected 2
### Response:
def with_arg_count(self, count):
"""Set the last call to expect an exact argument count.
I.E.::
>>> auth = Fake('auth').provides('login').with_arg_count(2)
>>> auth.login('joe_user') # forgot password
Traceback (most recent call last):
...
AssertionError: fake:auth.login() was called with 1 arg(s) but expected 2
"""
exp = self._get_current_call()
exp.expected_arg_count = count
return self |
def trigger(cls, streams):
"""
Given a list of streams, collect all the stream parameters into
a dictionary and pass it to the union set of subscribers.
Passing multiple streams at once to trigger can be useful when a
subscriber may be set multiple times across streams but only
needs to be called once.
"""
# Union of stream contents
items = [stream.contents.items() for stream in set(streams)]
union = [kv for kvs in items for kv in kvs]
klist = [k for k, _ in union]
key_clashes = set([k for k in klist if klist.count(k) > 1])
if key_clashes:
clashes = []
dicts = [dict(kvs) for kvs in items]
for clash in key_clashes:
values = set(d[clash] for d in dicts if clash in d)
if len(values) > 1:
clashes.append((clash, values))
if clashes:
msg = ', '.join(['%r has values %r' % (k, v) for k, v in clashes])
print('Parameter value clashes where %s' % msg)
# Group subscribers by precedence while keeping the ordering
# within each group
subscriber_precedence = defaultdict(list)
for stream in streams:
stream._on_trigger()
for precedence, subscriber in stream._subscribers:
subscriber_precedence[precedence].append(subscriber)
sorted_subscribers = sorted(subscriber_precedence.items(), key=lambda x: x[0])
subscribers = util.unique_iterator([s for _, subscribers in sorted_subscribers
for s in subscribers])
with triggering_streams(streams):
for subscriber in subscribers:
subscriber(**dict(union))
for stream in streams:
with util.disable_constant(stream):
if stream.transient:
stream.reset() | Given a list of streams, collect all the stream parameters into
a dictionary and pass it to the union set of subscribers.
Passing multiple streams at once to trigger can be useful when a
subscriber may be set multiple times across streams but only
needs to be called once. | Below is the the instruction that describes the task:
### Input:
Given a list of streams, collect all the stream parameters into
a dictionary and pass it to the union set of subscribers.
Passing multiple streams at once to trigger can be useful when a
subscriber may be set multiple times across streams but only
needs to be called once.
### Response:
def trigger(cls, streams):
"""
Given a list of streams, collect all the stream parameters into
a dictionary and pass it to the union set of subscribers.
Passing multiple streams at once to trigger can be useful when a
subscriber may be set multiple times across streams but only
needs to be called once.
"""
# Union of stream contents
items = [stream.contents.items() for stream in set(streams)]
union = [kv for kvs in items for kv in kvs]
klist = [k for k, _ in union]
key_clashes = set([k for k in klist if klist.count(k) > 1])
if key_clashes:
clashes = []
dicts = [dict(kvs) for kvs in items]
for clash in key_clashes:
values = set(d[clash] for d in dicts if clash in d)
if len(values) > 1:
clashes.append((clash, values))
if clashes:
msg = ', '.join(['%r has values %r' % (k, v) for k, v in clashes])
print('Parameter value clashes where %s' % msg)
# Group subscribers by precedence while keeping the ordering
# within each group
subscriber_precedence = defaultdict(list)
for stream in streams:
stream._on_trigger()
for precedence, subscriber in stream._subscribers:
subscriber_precedence[precedence].append(subscriber)
sorted_subscribers = sorted(subscriber_precedence.items(), key=lambda x: x[0])
subscribers = util.unique_iterator([s for _, subscribers in sorted_subscribers
for s in subscribers])
with triggering_streams(streams):
for subscriber in subscribers:
subscriber(**dict(union))
for stream in streams:
with util.disable_constant(stream):
if stream.transient:
stream.reset() |
def mdr_conditional_entropy(X, Y, labels, base=2):
"""Calculates the MDR conditional entropy, H(XY|labels), in the given base
MDR conditional entropy is calculated by combining variables X and Y into a single MDR model then calculating
the entropy of the resulting model's predictions conditional on the provided labels.
Parameters
----------
X: array-like (# samples)
An array of values corresponding to one feature in the MDR model
Y: array-like (# samples)
An array of values corresponding to one feature in the MDR model
labels: array-like (# samples)
The class labels corresponding to features X and Y
base: integer (default: 2)
The base in which to calculate MDR conditional entropy
Returns
----------
mdr_conditional_entropy: float
The MDR conditional entropy calculated according to the equation H(XY|labels) = H(XY,labels) - H(labels)
"""
return conditional_entropy(_mdr_predict(X, Y, labels), labels, base=base) | Calculates the MDR conditional entropy, H(XY|labels), in the given base
MDR conditional entropy is calculated by combining variables X and Y into a single MDR model then calculating
the entropy of the resulting model's predictions conditional on the provided labels.
Parameters
----------
X: array-like (# samples)
An array of values corresponding to one feature in the MDR model
Y: array-like (# samples)
An array of values corresponding to one feature in the MDR model
labels: array-like (# samples)
The class labels corresponding to features X and Y
base: integer (default: 2)
The base in which to calculate MDR conditional entropy
Returns
----------
mdr_conditional_entropy: float
The MDR conditional entropy calculated according to the equation H(XY|labels) = H(XY,labels) - H(labels) | Below is the the instruction that describes the task:
### Input:
Calculates the MDR conditional entropy, H(XY|labels), in the given base
MDR conditional entropy is calculated by combining variables X and Y into a single MDR model then calculating
the entropy of the resulting model's predictions conditional on the provided labels.
Parameters
----------
X: array-like (# samples)
An array of values corresponding to one feature in the MDR model
Y: array-like (# samples)
An array of values corresponding to one feature in the MDR model
labels: array-like (# samples)
The class labels corresponding to features X and Y
base: integer (default: 2)
The base in which to calculate MDR conditional entropy
Returns
----------
mdr_conditional_entropy: float
The MDR conditional entropy calculated according to the equation H(XY|labels) = H(XY,labels) - H(labels)
### Response:
def mdr_conditional_entropy(X, Y, labels, base=2):
"""Calculates the MDR conditional entropy, H(XY|labels), in the given base
MDR conditional entropy is calculated by combining variables X and Y into a single MDR model then calculating
the entropy of the resulting model's predictions conditional on the provided labels.
Parameters
----------
X: array-like (# samples)
An array of values corresponding to one feature in the MDR model
Y: array-like (# samples)
An array of values corresponding to one feature in the MDR model
labels: array-like (# samples)
The class labels corresponding to features X and Y
base: integer (default: 2)
The base in which to calculate MDR conditional entropy
Returns
----------
mdr_conditional_entropy: float
The MDR conditional entropy calculated according to the equation H(XY|labels) = H(XY,labels) - H(labels)
"""
return conditional_entropy(_mdr_predict(X, Y, labels), labels, base=base) |
def fragment6(pkt, fragSize):
"""
Performs fragmentation of an IPv6 packet. Provided packet ('pkt') must
already contain an IPv6ExtHdrFragment() class. 'fragSize' argument is the
expected maximum size of fragments (MTU). The list of packets is returned.
If packet does not contain an IPv6ExtHdrFragment class, it is returned in
result list.
"""
pkt = pkt.copy()
if IPv6ExtHdrFragment not in pkt:
# TODO : automatically add a fragment before upper Layer
# at the moment, we do nothing and return initial packet
# as single element of a list
return [pkt]
# If the payload is bigger than 65535, a Jumbo payload must be used, as
# an IPv6 packet can't be bigger than 65535 bytes.
if len(raw(pkt[IPv6ExtHdrFragment])) > 65535:
warning("An IPv6 packet can'be bigger than 65535, please use a Jumbo payload.") # noqa: E501
return []
s = raw(pkt) # for instantiation to get upper layer checksum right
if len(s) <= fragSize:
return [pkt]
# Fragmentable part : fake IPv6 for Fragmentable part length computation
fragPart = pkt[IPv6ExtHdrFragment].payload
tmp = raw(IPv6(src="::1", dst="::1") / fragPart)
fragPartLen = len(tmp) - 40 # basic IPv6 header length
fragPartStr = s[-fragPartLen:]
# Grab Next Header for use in Fragment Header
nh = pkt[IPv6ExtHdrFragment].nh
# Keep fragment header
fragHeader = pkt[IPv6ExtHdrFragment]
del fragHeader.payload # detach payload
# Unfragmentable Part
unfragPartLen = len(s) - fragPartLen - 8
unfragPart = pkt
del pkt[IPv6ExtHdrFragment].underlayer.payload # detach payload
# Cut the fragmentable part to fit fragSize. Inner fragments have
# a length that is an integer multiple of 8 octets. last Frag MTU
# can be anything below MTU
lastFragSize = fragSize - unfragPartLen - 8
innerFragSize = lastFragSize - (lastFragSize % 8)
if lastFragSize <= 0 or innerFragSize == 0:
warning("Provided fragment size value is too low. " +
"Should be more than %d" % (unfragPartLen + 8))
return [unfragPart / fragHeader / fragPart]
remain = fragPartStr
res = []
fragOffset = 0 # offset, incremeted during creation
fragId = random.randint(0, 0xffffffff) # random id ...
if fragHeader.id is not None: # ... except id provided by user
fragId = fragHeader.id
fragHeader.m = 1
fragHeader.id = fragId
fragHeader.nh = nh
# Main loop : cut, fit to FRAGSIZEs, fragOffset, Id ...
while True:
if (len(remain) > lastFragSize):
tmp = remain[:innerFragSize]
remain = remain[innerFragSize:]
fragHeader.offset = fragOffset # update offset
fragOffset += (innerFragSize // 8) # compute new one
if IPv6 in unfragPart:
unfragPart[IPv6].plen = None
tempo = unfragPart / fragHeader / conf.raw_layer(load=tmp)
res.append(tempo)
else:
fragHeader.offset = fragOffset # update offSet
fragHeader.m = 0
if IPv6 in unfragPart:
unfragPart[IPv6].plen = None
tempo = unfragPart / fragHeader / conf.raw_layer(load=remain)
res.append(tempo)
break
return res | Performs fragmentation of an IPv6 packet. Provided packet ('pkt') must
already contain an IPv6ExtHdrFragment() class. 'fragSize' argument is the
expected maximum size of fragments (MTU). The list of packets is returned.
If packet does not contain an IPv6ExtHdrFragment class, it is returned in
result list. | Below is the the instruction that describes the task:
### Input:
Performs fragmentation of an IPv6 packet. Provided packet ('pkt') must
already contain an IPv6ExtHdrFragment() class. 'fragSize' argument is the
expected maximum size of fragments (MTU). The list of packets is returned.
If packet does not contain an IPv6ExtHdrFragment class, it is returned in
result list.
### Response:
def fragment6(pkt, fragSize):
"""
Performs fragmentation of an IPv6 packet. Provided packet ('pkt') must
already contain an IPv6ExtHdrFragment() class. 'fragSize' argument is the
expected maximum size of fragments (MTU). The list of packets is returned.
If packet does not contain an IPv6ExtHdrFragment class, it is returned in
result list.
"""
pkt = pkt.copy()
if IPv6ExtHdrFragment not in pkt:
# TODO : automatically add a fragment before upper Layer
# at the moment, we do nothing and return initial packet
# as single element of a list
return [pkt]
# If the payload is bigger than 65535, a Jumbo payload must be used, as
# an IPv6 packet can't be bigger than 65535 bytes.
if len(raw(pkt[IPv6ExtHdrFragment])) > 65535:
warning("An IPv6 packet can'be bigger than 65535, please use a Jumbo payload.") # noqa: E501
return []
s = raw(pkt) # for instantiation to get upper layer checksum right
if len(s) <= fragSize:
return [pkt]
# Fragmentable part : fake IPv6 for Fragmentable part length computation
fragPart = pkt[IPv6ExtHdrFragment].payload
tmp = raw(IPv6(src="::1", dst="::1") / fragPart)
fragPartLen = len(tmp) - 40 # basic IPv6 header length
fragPartStr = s[-fragPartLen:]
# Grab Next Header for use in Fragment Header
nh = pkt[IPv6ExtHdrFragment].nh
# Keep fragment header
fragHeader = pkt[IPv6ExtHdrFragment]
del fragHeader.payload # detach payload
# Unfragmentable Part
unfragPartLen = len(s) - fragPartLen - 8
unfragPart = pkt
del pkt[IPv6ExtHdrFragment].underlayer.payload # detach payload
# Cut the fragmentable part to fit fragSize. Inner fragments have
# a length that is an integer multiple of 8 octets. last Frag MTU
# can be anything below MTU
lastFragSize = fragSize - unfragPartLen - 8
innerFragSize = lastFragSize - (lastFragSize % 8)
if lastFragSize <= 0 or innerFragSize == 0:
warning("Provided fragment size value is too low. " +
"Should be more than %d" % (unfragPartLen + 8))
return [unfragPart / fragHeader / fragPart]
remain = fragPartStr
res = []
fragOffset = 0 # offset, incremeted during creation
fragId = random.randint(0, 0xffffffff) # random id ...
if fragHeader.id is not None: # ... except id provided by user
fragId = fragHeader.id
fragHeader.m = 1
fragHeader.id = fragId
fragHeader.nh = nh
# Main loop : cut, fit to FRAGSIZEs, fragOffset, Id ...
while True:
if (len(remain) > lastFragSize):
tmp = remain[:innerFragSize]
remain = remain[innerFragSize:]
fragHeader.offset = fragOffset # update offset
fragOffset += (innerFragSize // 8) # compute new one
if IPv6 in unfragPart:
unfragPart[IPv6].plen = None
tempo = unfragPart / fragHeader / conf.raw_layer(load=tmp)
res.append(tempo)
else:
fragHeader.offset = fragOffset # update offSet
fragHeader.m = 0
if IPv6 in unfragPart:
unfragPart[IPv6].plen = None
tempo = unfragPart / fragHeader / conf.raw_layer(load=remain)
res.append(tempo)
break
return res |
def add_to_results(self, data, label, results):
"""
responsible for updating the running `results` variable with the
data from this queryset/serializer combo
"""
raise NotImplementedError(
'{} must specify how to add data to the running results tally '
'by overriding the `add_to_results` method.'.format(
self.__class__.__name__
)
) | responsible for updating the running `results` variable with the
data from this queryset/serializer combo | Below is the the instruction that describes the task:
### Input:
responsible for updating the running `results` variable with the
data from this queryset/serializer combo
### Response:
def add_to_results(self, data, label, results):
"""
responsible for updating the running `results` variable with the
data from this queryset/serializer combo
"""
raise NotImplementedError(
'{} must specify how to add data to the running results tally '
'by overriding the `add_to_results` method.'.format(
self.__class__.__name__
)
) |
def MeetsConditions(knowledge_base, source):
"""Check conditions on the source."""
source_conditions_met = True
os_conditions = ConvertSupportedOSToConditions(source)
if os_conditions:
source.conditions.append(os_conditions)
for condition in source.conditions:
source_conditions_met &= artifact_utils.CheckCondition(
condition, knowledge_base)
return source_conditions_met | Check conditions on the source. | Below is the the instruction that describes the task:
### Input:
Check conditions on the source.
### Response:
def MeetsConditions(knowledge_base, source):
"""Check conditions on the source."""
source_conditions_met = True
os_conditions = ConvertSupportedOSToConditions(source)
if os_conditions:
source.conditions.append(os_conditions)
for condition in source.conditions:
source_conditions_met &= artifact_utils.CheckCondition(
condition, knowledge_base)
return source_conditions_met |
def voxel_count(dset,p=None,positive_only=False,mask=None,ROI=None):
''' returns the number of non-zero voxels
:p: threshold the dataset at the given *p*-value, then count
:positive_only: only count positive values
:mask: count within the given mask
:ROI: only use the ROI with the given value (or list of values) within the mask
if ROI is 'all' then return the voxel count of each ROI
as a dictionary
'''
if p:
dset = nl.thresh(dset,p,positive_only)
else:
if positive_only:
dset = nl.calc(dset,'step(a)')
count = 0
devnull = open(os.devnull,"w")
if mask:
cmd = ['3dROIstats','-1Dformat','-nomeanout','-nobriklab', '-nzvoxels']
cmd += ['-mask',str(mask),str(dset)]
out = subprocess.check_output(cmd,stderr=devnull).split('\n')
if len(out)<4:
return 0
rois = [int(x.replace('NZcount_','')) for x in out[1].strip()[1:].split()]
counts = [int(x.replace('NZcount_','')) for x in out[3].strip().split()]
count_dict = None
if ROI==None:
ROI = rois
if ROI=='all':
count_dict = {}
ROI = rois
else:
if not isinstance(ROI,list):
ROI = [ROI]
for r in ROI:
if r in rois:
roi_count = counts[rois.index(r)]
if count_dict!=None:
count_dict[r] = roi_count
else:
count += roi_count
else:
cmd = ['3dBrickStat', '-slow', '-count', '-non-zero', str(dset)]
count = int(subprocess.check_output(cmd,stderr=devnull).strip())
if count_dict:
return count_dict
return count | returns the number of non-zero voxels
:p: threshold the dataset at the given *p*-value, then count
:positive_only: only count positive values
:mask: count within the given mask
:ROI: only use the ROI with the given value (or list of values) within the mask
if ROI is 'all' then return the voxel count of each ROI
as a dictionary | Below is the the instruction that describes the task:
### Input:
returns the number of non-zero voxels
:p: threshold the dataset at the given *p*-value, then count
:positive_only: only count positive values
:mask: count within the given mask
:ROI: only use the ROI with the given value (or list of values) within the mask
if ROI is 'all' then return the voxel count of each ROI
as a dictionary
### Response:
def voxel_count(dset,p=None,positive_only=False,mask=None,ROI=None):
''' returns the number of non-zero voxels
:p: threshold the dataset at the given *p*-value, then count
:positive_only: only count positive values
:mask: count within the given mask
:ROI: only use the ROI with the given value (or list of values) within the mask
if ROI is 'all' then return the voxel count of each ROI
as a dictionary
'''
if p:
dset = nl.thresh(dset,p,positive_only)
else:
if positive_only:
dset = nl.calc(dset,'step(a)')
count = 0
devnull = open(os.devnull,"w")
if mask:
cmd = ['3dROIstats','-1Dformat','-nomeanout','-nobriklab', '-nzvoxels']
cmd += ['-mask',str(mask),str(dset)]
out = subprocess.check_output(cmd,stderr=devnull).split('\n')
if len(out)<4:
return 0
rois = [int(x.replace('NZcount_','')) for x in out[1].strip()[1:].split()]
counts = [int(x.replace('NZcount_','')) for x in out[3].strip().split()]
count_dict = None
if ROI==None:
ROI = rois
if ROI=='all':
count_dict = {}
ROI = rois
else:
if not isinstance(ROI,list):
ROI = [ROI]
for r in ROI:
if r in rois:
roi_count = counts[rois.index(r)]
if count_dict!=None:
count_dict[r] = roi_count
else:
count += roi_count
else:
cmd = ['3dBrickStat', '-slow', '-count', '-non-zero', str(dset)]
count = int(subprocess.check_output(cmd,stderr=devnull).strip())
if count_dict:
return count_dict
return count |
def poll_integration_information_for_waiting_integration_alerts():
"""poll_integration_information_for_waiting_integration_alerts."""
if not polling_integration_alerts:
return
logger.debug("Polling information for waiting integration alerts")
for integration_alert in polling_integration_alerts:
configured_integration = integration_alert.configured_integration
integration = configured_integration.integration
polling_duration = integration.polling_duration
if get_current_datetime_utc() - integration_alert.send_time > polling_duration:
logger.debug("Polling duration expired for integration alert %s", integration_alert)
integration_alert.status = IntegrationAlertStatuses.ERROR_POLLING.name
else:
integration_alert.status = IntegrationAlertStatuses.IN_POLLING.name
poll_integration_alert_data(integration_alert) | poll_integration_information_for_waiting_integration_alerts. | Below is the the instruction that describes the task:
### Input:
poll_integration_information_for_waiting_integration_alerts.
### Response:
def poll_integration_information_for_waiting_integration_alerts():
"""poll_integration_information_for_waiting_integration_alerts."""
if not polling_integration_alerts:
return
logger.debug("Polling information for waiting integration alerts")
for integration_alert in polling_integration_alerts:
configured_integration = integration_alert.configured_integration
integration = configured_integration.integration
polling_duration = integration.polling_duration
if get_current_datetime_utc() - integration_alert.send_time > polling_duration:
logger.debug("Polling duration expired for integration alert %s", integration_alert)
integration_alert.status = IntegrationAlertStatuses.ERROR_POLLING.name
else:
integration_alert.status = IntegrationAlertStatuses.IN_POLLING.name
poll_integration_alert_data(integration_alert) |
def _get_bucket_region(self, bucket_name):
"""
Get region based on the bucket name.
:param bucket_name: Bucket name for which region will be fetched.
:return: Region of bucket name.
"""
# Region set in constructor, return right here.
if self._region:
return self._region
# get bucket location for Amazon S3.
region = 'us-east-1' # default to US standard.
if bucket_name in self._region_map:
region = self._region_map[bucket_name]
else:
region = self._get_bucket_location(bucket_name)
self._region_map[bucket_name] = region
# Success.
return region | Get region based on the bucket name.
:param bucket_name: Bucket name for which region will be fetched.
:return: Region of bucket name. | Below is the the instruction that describes the task:
### Input:
Get region based on the bucket name.
:param bucket_name: Bucket name for which region will be fetched.
:return: Region of bucket name.
### Response:
def _get_bucket_region(self, bucket_name):
"""
Get region based on the bucket name.
:param bucket_name: Bucket name for which region will be fetched.
:return: Region of bucket name.
"""
# Region set in constructor, return right here.
if self._region:
return self._region
# get bucket location for Amazon S3.
region = 'us-east-1' # default to US standard.
if bucket_name in self._region_map:
region = self._region_map[bucket_name]
else:
region = self._get_bucket_location(bucket_name)
self._region_map[bucket_name] = region
# Success.
return region |
def snapshots():
'''
List current description snapshots.
CLI Example:
.. code-block:: bash
salt myminion inspector.snapshots
'''
try:
return _("collector").Inspector(cachedir=__opts__['cachedir'],
piddir=os.path.dirname(__opts__['pidfile'])).db.list()
except InspectorSnapshotException as err:
raise CommandExecutionError(err)
except Exception as err:
log.error(_get_error_message(err))
raise Exception(err) | List current description snapshots.
CLI Example:
.. code-block:: bash
salt myminion inspector.snapshots | Below is the the instruction that describes the task:
### Input:
List current description snapshots.
CLI Example:
.. code-block:: bash
salt myminion inspector.snapshots
### Response:
def snapshots():
'''
List current description snapshots.
CLI Example:
.. code-block:: bash
salt myminion inspector.snapshots
'''
try:
return _("collector").Inspector(cachedir=__opts__['cachedir'],
piddir=os.path.dirname(__opts__['pidfile'])).db.list()
except InspectorSnapshotException as err:
raise CommandExecutionError(err)
except Exception as err:
log.error(_get_error_message(err))
raise Exception(err) |
def htmlFormat(output, pathParts = (), statDict = None, query = None):
"""Formats as HTML, writing to the given object."""
statDict = statDict or scales.getStats()
if query:
statDict = runQuery(statDict, query)
_htmlRenderDict(pathParts, statDict, output) | Formats as HTML, writing to the given object. | Below is the the instruction that describes the task:
### Input:
Formats as HTML, writing to the given object.
### Response:
def htmlFormat(output, pathParts = (), statDict = None, query = None):
"""Formats as HTML, writing to the given object."""
statDict = statDict or scales.getStats()
if query:
statDict = runQuery(statDict, query)
_htmlRenderDict(pathParts, statDict, output) |
def area_uri(self, area_uuid):
"""
Return the URI for an Upload Area
:param area_uuid: UUID of area for which we want URI
:return: Upload Area URI object
:rtype: UploadAreaURI
:raises UploadException: if area does not exist
"""
if area_uuid not in self.areas:
raise UploadException("I don't know about area {uuid}".format(uuid=area_uuid))
return UploadAreaURI(self._config.upload.areas[area_uuid]['uri']) | Return the URI for an Upload Area
:param area_uuid: UUID of area for which we want URI
:return: Upload Area URI object
:rtype: UploadAreaURI
:raises UploadException: if area does not exist | Below is the the instruction that describes the task:
### Input:
Return the URI for an Upload Area
:param area_uuid: UUID of area for which we want URI
:return: Upload Area URI object
:rtype: UploadAreaURI
:raises UploadException: if area does not exist
### Response:
def area_uri(self, area_uuid):
"""
Return the URI for an Upload Area
:param area_uuid: UUID of area for which we want URI
:return: Upload Area URI object
:rtype: UploadAreaURI
:raises UploadException: if area does not exist
"""
if area_uuid not in self.areas:
raise UploadException("I don't know about area {uuid}".format(uuid=area_uuid))
return UploadAreaURI(self._config.upload.areas[area_uuid]['uri']) |
def kill_log_monitor(self, check_alive=True):
"""Kill the log monitor.
Args:
check_alive (bool): Raise an exception if the process was already
dead.
"""
self._kill_process_type(
ray_constants.PROCESS_TYPE_LOG_MONITOR, check_alive=check_alive) | Kill the log monitor.
Args:
check_alive (bool): Raise an exception if the process was already
dead. | Below is the the instruction that describes the task:
### Input:
Kill the log monitor.
Args:
check_alive (bool): Raise an exception if the process was already
dead.
### Response:
def kill_log_monitor(self, check_alive=True):
"""Kill the log monitor.
Args:
check_alive (bool): Raise an exception if the process was already
dead.
"""
self._kill_process_type(
ray_constants.PROCESS_TYPE_LOG_MONITOR, check_alive=check_alive) |
def write_temp_file(text=""):
"""Create a new temporary file and write some initial text to it.
:param text: the text to write to the temp file
:type text: str
:returns: the file name of the newly created temp file
:rtype: str
"""
with NamedTemporaryFile(mode='w+t', suffix='.yml', delete=False) \
as tempfile:
tempfile.write(text)
return tempfile.name | Create a new temporary file and write some initial text to it.
:param text: the text to write to the temp file
:type text: str
:returns: the file name of the newly created temp file
:rtype: str | Below is the the instruction that describes the task:
### Input:
Create a new temporary file and write some initial text to it.
:param text: the text to write to the temp file
:type text: str
:returns: the file name of the newly created temp file
:rtype: str
### Response:
def write_temp_file(text=""):
"""Create a new temporary file and write some initial text to it.
:param text: the text to write to the temp file
:type text: str
:returns: the file name of the newly created temp file
:rtype: str
"""
with NamedTemporaryFile(mode='w+t', suffix='.yml', delete=False) \
as tempfile:
tempfile.write(text)
return tempfile.name |
def create_aaaa_record(self, name, values, ttl=60, weight=None, region=None,
set_identifier=None):
"""
Creates an AAAA record attached to this hosted zone.
:param str name: The fully qualified name of the record to add.
:param list values: A list of value strings for the record.
:keyword int ttl: The time-to-live of the record (in seconds).
:keyword int weight: *For weighted record sets only*. Among resource record
sets that have the same combination of DNS name and type, a value
that determines what portion of traffic for the current resource
record set is routed to the associated location. Ranges from 0-255.
:keyword str region: *For latency-based record sets*. The Amazon EC2 region
where the resource that is specified in this resource record set
resides.
:keyword str set_identifier: *For weighted and latency resource record
sets only*. An identifier that differentiates among multiple
resource record sets that have the same combination of DNS name
and type. 1-128 chars.
:rtype: tuple
:returns: A tuple in the form of ``(rrset, change_info)``, where
``rrset`` is the newly created AAAAResourceRecordSet instance.
"""
self._halt_if_already_deleted()
# Grab the params/kwargs here for brevity's sake.
values = locals()
del values['self']
return self._add_record(AAAAResourceRecordSet, **values) | Creates an AAAA record attached to this hosted zone.
:param str name: The fully qualified name of the record to add.
:param list values: A list of value strings for the record.
:keyword int ttl: The time-to-live of the record (in seconds).
:keyword int weight: *For weighted record sets only*. Among resource record
sets that have the same combination of DNS name and type, a value
that determines what portion of traffic for the current resource
record set is routed to the associated location. Ranges from 0-255.
:keyword str region: *For latency-based record sets*. The Amazon EC2 region
where the resource that is specified in this resource record set
resides.
:keyword str set_identifier: *For weighted and latency resource record
sets only*. An identifier that differentiates among multiple
resource record sets that have the same combination of DNS name
and type. 1-128 chars.
:rtype: tuple
:returns: A tuple in the form of ``(rrset, change_info)``, where
``rrset`` is the newly created AAAAResourceRecordSet instance. | Below is the the instruction that describes the task:
### Input:
Creates an AAAA record attached to this hosted zone.
:param str name: The fully qualified name of the record to add.
:param list values: A list of value strings for the record.
:keyword int ttl: The time-to-live of the record (in seconds).
:keyword int weight: *For weighted record sets only*. Among resource record
sets that have the same combination of DNS name and type, a value
that determines what portion of traffic for the current resource
record set is routed to the associated location. Ranges from 0-255.
:keyword str region: *For latency-based record sets*. The Amazon EC2 region
where the resource that is specified in this resource record set
resides.
:keyword str set_identifier: *For weighted and latency resource record
sets only*. An identifier that differentiates among multiple
resource record sets that have the same combination of DNS name
and type. 1-128 chars.
:rtype: tuple
:returns: A tuple in the form of ``(rrset, change_info)``, where
``rrset`` is the newly created AAAAResourceRecordSet instance.
### Response:
def create_aaaa_record(self, name, values, ttl=60, weight=None, region=None,
set_identifier=None):
"""
Creates an AAAA record attached to this hosted zone.
:param str name: The fully qualified name of the record to add.
:param list values: A list of value strings for the record.
:keyword int ttl: The time-to-live of the record (in seconds).
:keyword int weight: *For weighted record sets only*. Among resource record
sets that have the same combination of DNS name and type, a value
that determines what portion of traffic for the current resource
record set is routed to the associated location. Ranges from 0-255.
:keyword str region: *For latency-based record sets*. The Amazon EC2 region
where the resource that is specified in this resource record set
resides.
:keyword str set_identifier: *For weighted and latency resource record
sets only*. An identifier that differentiates among multiple
resource record sets that have the same combination of DNS name
and type. 1-128 chars.
:rtype: tuple
:returns: A tuple in the form of ``(rrset, change_info)``, where
``rrset`` is the newly created AAAAResourceRecordSet instance.
"""
self._halt_if_already_deleted()
# Grab the params/kwargs here for brevity's sake.
values = locals()
del values['self']
return self._add_record(AAAAResourceRecordSet, **values) |
def get_graph_by_ids(self, network_ids: List[int]) -> BELGraph:
"""Get a combine BEL Graph from a list of network identifiers."""
if len(network_ids) == 1:
return self.get_graph_by_id(network_ids[0])
log.debug('getting graph by identifiers: %s', network_ids)
graphs = self.get_graphs_by_ids(network_ids)
log.debug('getting union of graphs: %s', network_ids)
rv = union(graphs)
return rv | Get a combine BEL Graph from a list of network identifiers. | Below is the the instruction that describes the task:
### Input:
Get a combine BEL Graph from a list of network identifiers.
### Response:
def get_graph_by_ids(self, network_ids: List[int]) -> BELGraph:
"""Get a combine BEL Graph from a list of network identifiers."""
if len(network_ids) == 1:
return self.get_graph_by_id(network_ids[0])
log.debug('getting graph by identifiers: %s', network_ids)
graphs = self.get_graphs_by_ids(network_ids)
log.debug('getting union of graphs: %s', network_ids)
rv = union(graphs)
return rv |
def kremove(self, key, value=None):
"""Removes the given key/value from all elements.
If value is not specified, the whole key is removed.
If value is not None and the key is present but with a
different value, or if the key is not present, silently passes.
"""
for item in self:
if value is None:
# Just pop the key if present,
# otherwise return None
# (shortcut to ignore the exception)
item.pop(key, None)
else:
try:
# Use the key as a set
item[key].remove(value)
# If the set contains a single element
# just store the latter
if len(item[key]) == 1:
item[key] = item[key].pop()
except KeyError:
# This happens when the item
# does not contain the key
pass
except AttributeError:
# This happens when the key is not a set
# and shall be removed only if values match
if item[key] == value:
item.pop(key) | Removes the given key/value from all elements.
If value is not specified, the whole key is removed.
If value is not None and the key is present but with a
different value, or if the key is not present, silently passes. | Below is the the instruction that describes the task:
### Input:
Removes the given key/value from all elements.
If value is not specified, the whole key is removed.
If value is not None and the key is present but with a
different value, or if the key is not present, silently passes.
### Response:
def kremove(self, key, value=None):
"""Removes the given key/value from all elements.
If value is not specified, the whole key is removed.
If value is not None and the key is present but with a
different value, or if the key is not present, silently passes.
"""
for item in self:
if value is None:
# Just pop the key if present,
# otherwise return None
# (shortcut to ignore the exception)
item.pop(key, None)
else:
try:
# Use the key as a set
item[key].remove(value)
# If the set contains a single element
# just store the latter
if len(item[key]) == 1:
item[key] = item[key].pop()
except KeyError:
# This happens when the item
# does not contain the key
pass
except AttributeError:
# This happens when the key is not a set
# and shall be removed only if values match
if item[key] == value:
item.pop(key) |
def sasl_mechanism(name, secure, preference = 50):
"""Class decorator generator for `ClientAuthenticator` or
`ServerAuthenticator` subclasses. Adds the class to the pyxmpp.sasl
mechanism registry.
:Parameters:
- `name`: SASL mechanism name
- `secure`: if the mechanims can be considered secure - `True`
if it can be used over plain-text channel
- `preference`: mechanism preference level (the higher the better)
:Types:
- `name`: `unicode`
- `secure`: `bool`
- `preference`: `int`
"""
# pylint: disable-msg=W0212
def decorator(klass):
"""The decorator."""
klass._pyxmpp_sasl_secure = secure
klass._pyxmpp_sasl_preference = preference
if issubclass(klass, ClientAuthenticator):
_register_client_authenticator(klass, name)
elif issubclass(klass, ServerAuthenticator):
_register_server_authenticator(klass, name)
else:
raise TypeError("Not a ClientAuthenticator"
" or ServerAuthenticator class")
return klass
return decorator | Class decorator generator for `ClientAuthenticator` or
`ServerAuthenticator` subclasses. Adds the class to the pyxmpp.sasl
mechanism registry.
:Parameters:
- `name`: SASL mechanism name
- `secure`: if the mechanims can be considered secure - `True`
if it can be used over plain-text channel
- `preference`: mechanism preference level (the higher the better)
:Types:
- `name`: `unicode`
- `secure`: `bool`
- `preference`: `int` | Below is the the instruction that describes the task:
### Input:
Class decorator generator for `ClientAuthenticator` or
`ServerAuthenticator` subclasses. Adds the class to the pyxmpp.sasl
mechanism registry.
:Parameters:
- `name`: SASL mechanism name
- `secure`: if the mechanims can be considered secure - `True`
if it can be used over plain-text channel
- `preference`: mechanism preference level (the higher the better)
:Types:
- `name`: `unicode`
- `secure`: `bool`
- `preference`: `int`
### Response:
def sasl_mechanism(name, secure, preference = 50):
"""Class decorator generator for `ClientAuthenticator` or
`ServerAuthenticator` subclasses. Adds the class to the pyxmpp.sasl
mechanism registry.
:Parameters:
- `name`: SASL mechanism name
- `secure`: if the mechanims can be considered secure - `True`
if it can be used over plain-text channel
- `preference`: mechanism preference level (the higher the better)
:Types:
- `name`: `unicode`
- `secure`: `bool`
- `preference`: `int`
"""
# pylint: disable-msg=W0212
def decorator(klass):
"""The decorator."""
klass._pyxmpp_sasl_secure = secure
klass._pyxmpp_sasl_preference = preference
if issubclass(klass, ClientAuthenticator):
_register_client_authenticator(klass, name)
elif issubclass(klass, ServerAuthenticator):
_register_server_authenticator(klass, name)
else:
raise TypeError("Not a ClientAuthenticator"
" or ServerAuthenticator class")
return klass
return decorator |
def show_stories(self, raw=False, limit=None):
"""Returns list of item ids of latest Show HN stories
Args:
limit (int): specifies the number of stories to be returned.
raw (bool): Flag to indicate whether to transform all
objects into raw json.
Returns:
`list` object containing ids of Show HN stories.
"""
show_stories = self._get_stories('showstories', limit)
if raw:
show_stories = [story.raw for story in show_stories]
return show_stories | Returns list of item ids of latest Show HN stories
Args:
limit (int): specifies the number of stories to be returned.
raw (bool): Flag to indicate whether to transform all
objects into raw json.
Returns:
`list` object containing ids of Show HN stories. | Below is the the instruction that describes the task:
### Input:
Returns list of item ids of latest Show HN stories
Args:
limit (int): specifies the number of stories to be returned.
raw (bool): Flag to indicate whether to transform all
objects into raw json.
Returns:
`list` object containing ids of Show HN stories.
### Response:
def show_stories(self, raw=False, limit=None):
"""Returns list of item ids of latest Show HN stories
Args:
limit (int): specifies the number of stories to be returned.
raw (bool): Flag to indicate whether to transform all
objects into raw json.
Returns:
`list` object containing ids of Show HN stories.
"""
show_stories = self._get_stories('showstories', limit)
if raw:
show_stories = [story.raw for story in show_stories]
return show_stories |
def burstColumn(self, column, columnMatchingSegments, prevActiveCells,
prevWinnerCells, learn):
"""
Activates all of the cells in an unpredicted active column, chooses a winner
cell, and, if learning is turned on, learns on one segment, growing a new
segment if necessary.
@param column (int)
Index of bursting column.
@param columnMatchingSegments (iter)
Matching segments in this column, or None if there aren't any.
@param prevActiveCells (list)
Active cells in `t-1`.
@param prevWinnerCells (list)
Winner cells in `t-1`.
@param learn (bool)
Whether or not learning is enabled.
@return (tuple) Contains:
`cells` (iter),
`winnerCell` (int),
"""
start = self.cellsPerColumn * column
# Strip out destroyed cells before passing along to base _burstColumn()
cellsForColumn = [cellIdx
for cellIdx
in xrange(start, start + self.cellsPerColumn)
if cellIdx not in self.deadCells]
return self._burstColumn(
self.connections, self._random, self.lastUsedIterationForSegment, column,
columnMatchingSegments, prevActiveCells, prevWinnerCells, cellsForColumn,
self.numActivePotentialSynapsesForSegment, self.iteration,
self.maxNewSynapseCount, self.initialPermanence, self.permanenceIncrement,
self.permanenceDecrement, self.maxSegmentsPerCell,
self.maxSynapsesPerSegment, learn) | Activates all of the cells in an unpredicted active column, chooses a winner
cell, and, if learning is turned on, learns on one segment, growing a new
segment if necessary.
@param column (int)
Index of bursting column.
@param columnMatchingSegments (iter)
Matching segments in this column, or None if there aren't any.
@param prevActiveCells (list)
Active cells in `t-1`.
@param prevWinnerCells (list)
Winner cells in `t-1`.
@param learn (bool)
Whether or not learning is enabled.
@return (tuple) Contains:
`cells` (iter),
`winnerCell` (int), | Below is the the instruction that describes the task:
### Input:
Activates all of the cells in an unpredicted active column, chooses a winner
cell, and, if learning is turned on, learns on one segment, growing a new
segment if necessary.
@param column (int)
Index of bursting column.
@param columnMatchingSegments (iter)
Matching segments in this column, or None if there aren't any.
@param prevActiveCells (list)
Active cells in `t-1`.
@param prevWinnerCells (list)
Winner cells in `t-1`.
@param learn (bool)
Whether or not learning is enabled.
@return (tuple) Contains:
`cells` (iter),
`winnerCell` (int),
### Response:
def burstColumn(self, column, columnMatchingSegments, prevActiveCells,
prevWinnerCells, learn):
"""
Activates all of the cells in an unpredicted active column, chooses a winner
cell, and, if learning is turned on, learns on one segment, growing a new
segment if necessary.
@param column (int)
Index of bursting column.
@param columnMatchingSegments (iter)
Matching segments in this column, or None if there aren't any.
@param prevActiveCells (list)
Active cells in `t-1`.
@param prevWinnerCells (list)
Winner cells in `t-1`.
@param learn (bool)
Whether or not learning is enabled.
@return (tuple) Contains:
`cells` (iter),
`winnerCell` (int),
"""
start = self.cellsPerColumn * column
# Strip out destroyed cells before passing along to base _burstColumn()
cellsForColumn = [cellIdx
for cellIdx
in xrange(start, start + self.cellsPerColumn)
if cellIdx not in self.deadCells]
return self._burstColumn(
self.connections, self._random, self.lastUsedIterationForSegment, column,
columnMatchingSegments, prevActiveCells, prevWinnerCells, cellsForColumn,
self.numActivePotentialSynapsesForSegment, self.iteration,
self.maxNewSynapseCount, self.initialPermanence, self.permanenceIncrement,
self.permanenceDecrement, self.maxSegmentsPerCell,
self.maxSynapsesPerSegment, learn) |
def get_provider(vm_=None):
'''
Extract the provider name from vm
'''
if vm_ is None:
provider = __active_provider_name__ or 'ec2'
else:
provider = vm_.get('provider', 'ec2')
if ':' in provider:
prov_comps = provider.split(':')
provider = prov_comps[0]
return provider | Extract the provider name from vm | Below is the the instruction that describes the task:
### Input:
Extract the provider name from vm
### Response:
def get_provider(vm_=None):
'''
Extract the provider name from vm
'''
if vm_ is None:
provider = __active_provider_name__ or 'ec2'
else:
provider = vm_.get('provider', 'ec2')
if ':' in provider:
prov_comps = provider.split(':')
provider = prov_comps[0]
return provider |
def add_note(self, note):
"""Add a note to the selected tracks.
Everything container.Track supports in __add__ is accepted.
"""
for n in self.selected_tracks:
self.tracks[n] + note | Add a note to the selected tracks.
Everything container.Track supports in __add__ is accepted. | Below is the the instruction that describes the task:
### Input:
Add a note to the selected tracks.
Everything container.Track supports in __add__ is accepted.
### Response:
def add_note(self, note):
"""Add a note to the selected tracks.
Everything container.Track supports in __add__ is accepted.
"""
for n in self.selected_tracks:
self.tracks[n] + note |
def getTaskInfo(self, task_id, **kwargs):
"""
Load all information about a task and return a custom Task class.
Calls "getTaskInfo" XML-RPC (with request=True to get the full
information.)
:param task_id: ``int``, for example 12345
:returns: deferred that when fired returns a Task (Munch, dict-like)
object representing this Koji task, or none if no task was
found.
"""
kwargs['request'] = True
taskinfo = yield self.call('getTaskInfo', task_id, **kwargs)
task = Task.fromDict(taskinfo)
if task:
task.connection = self
defer.returnValue(task) | Load all information about a task and return a custom Task class.
Calls "getTaskInfo" XML-RPC (with request=True to get the full
information.)
:param task_id: ``int``, for example 12345
:returns: deferred that when fired returns a Task (Munch, dict-like)
object representing this Koji task, or none if no task was
found. | Below is the the instruction that describes the task:
### Input:
Load all information about a task and return a custom Task class.
Calls "getTaskInfo" XML-RPC (with request=True to get the full
information.)
:param task_id: ``int``, for example 12345
:returns: deferred that when fired returns a Task (Munch, dict-like)
object representing this Koji task, or none if no task was
found.
### Response:
def getTaskInfo(self, task_id, **kwargs):
"""
Load all information about a task and return a custom Task class.
Calls "getTaskInfo" XML-RPC (with request=True to get the full
information.)
:param task_id: ``int``, for example 12345
:returns: deferred that when fired returns a Task (Munch, dict-like)
object representing this Koji task, or none if no task was
found.
"""
kwargs['request'] = True
taskinfo = yield self.call('getTaskInfo', task_id, **kwargs)
task = Task.fromDict(taskinfo)
if task:
task.connection = self
defer.returnValue(task) |
def attr(*args, **kwargs):
'''
Set attributes on the current active tag context
'''
ctx = dom_tag._with_contexts[_get_thread_context()]
if ctx and ctx[-1]:
dicts = args + (kwargs,)
for d in dicts:
for attr, value in d.items():
ctx[-1].tag.set_attribute(*dom_tag.clean_pair(attr, value))
else:
raise ValueError('not in a tag context') | Set attributes on the current active tag context | Below is the the instruction that describes the task:
### Input:
Set attributes on the current active tag context
### Response:
def attr(*args, **kwargs):
'''
Set attributes on the current active tag context
'''
ctx = dom_tag._with_contexts[_get_thread_context()]
if ctx and ctx[-1]:
dicts = args + (kwargs,)
for d in dicts:
for attr, value in d.items():
ctx[-1].tag.set_attribute(*dom_tag.clean_pair(attr, value))
else:
raise ValueError('not in a tag context') |
def write_summary_cnts_goobjs(self, goobjs):
"""Write summary of level and depth counts for active GO Terms."""
cnts = self.get_cnts_levels_depths_recs(goobjs)
self._write_summary_cnts(cnts) | Write summary of level and depth counts for active GO Terms. | Below is the the instruction that describes the task:
### Input:
Write summary of level and depth counts for active GO Terms.
### Response:
def write_summary_cnts_goobjs(self, goobjs):
"""Write summary of level and depth counts for active GO Terms."""
cnts = self.get_cnts_levels_depths_recs(goobjs)
self._write_summary_cnts(cnts) |
def edit_message_live_location(latitude, longitude,
chat_id=None, message_id=None, inline_message_id=None, reply_markup=None,
**kwargs):
"""
Use this method to edit live location messages sent by the bot or via the bot (for inline bots).
A location can be edited until its live_period expires or editing is explicitly disabled by a call
to stopMessageLiveLocation.
On success, if the edited message was sent by the bot, the edited Message is returned, otherwise True is returned.
:param latitude: Latitude of location.
:param longitude: Longitude of location.
:param chat_id: Unique identifier for the target chat or username of the target channel (in the format @channelusername)
:param message_id: Required if inline_message_id is not specified. Identifier of the sent message
:param inline_message_id: Required if chat_id and message_id are not specified. Identifier of the inline message
:param reply_markup: Additional interface options. A JSON-serialized object for a
custom reply keyboard, instructions to hide keyboard or to
force a reply from the user.
:param kwargs: Args that get passed down to :class:`TelegramBotRPCRequest`
:type chat_id: int or str
:type latitude: float
:type longitude: float
:type message_id: Integer
:type inline_message_id: string
:type reply_markup: ReplyKeyboardMarkup or ReplyKeyboardHide or ForceReply
:returns: On success, if the edited message was sent by the bot, the edited Message is returned, otherwise True is returned.
:rtype: TelegramBotRPCRequest or Bool
"""
if not chat_id and not message_id and not inline_message_id:
raise ValueError("Must specify chat_id and message_id or inline_message_id")
if (chat_id and not message_id) or (not chat_id and message_id):
raise ValueError("Must specify chat_id and message_id together")
# required args
params = dict(
latitude=latitude,
longitude=longitude
)
# optional args
params.update(
_clean_params(
chat_id=chat_id,
message_id=message_id,
inline_message_id=inline_message_id,
reply_markup=reply_markup,
)
)
return TelegramBotRPCRequest('editMessageLiveLocation', params=params, on_result=Message.from_result, **kwargs) | Use this method to edit live location messages sent by the bot or via the bot (for inline bots).
A location can be edited until its live_period expires or editing is explicitly disabled by a call
to stopMessageLiveLocation.
On success, if the edited message was sent by the bot, the edited Message is returned, otherwise True is returned.
:param latitude: Latitude of location.
:param longitude: Longitude of location.
:param chat_id: Unique identifier for the target chat or username of the target channel (in the format @channelusername)
:param message_id: Required if inline_message_id is not specified. Identifier of the sent message
:param inline_message_id: Required if chat_id and message_id are not specified. Identifier of the inline message
:param reply_markup: Additional interface options. A JSON-serialized object for a
custom reply keyboard, instructions to hide keyboard or to
force a reply from the user.
:param kwargs: Args that get passed down to :class:`TelegramBotRPCRequest`
:type chat_id: int or str
:type latitude: float
:type longitude: float
:type message_id: Integer
:type inline_message_id: string
:type reply_markup: ReplyKeyboardMarkup or ReplyKeyboardHide or ForceReply
:returns: On success, if the edited message was sent by the bot, the edited Message is returned, otherwise True is returned.
:rtype: TelegramBotRPCRequest or Bool | Below is the the instruction that describes the task:
### Input:
Use this method to edit live location messages sent by the bot or via the bot (for inline bots).
A location can be edited until its live_period expires or editing is explicitly disabled by a call
to stopMessageLiveLocation.
On success, if the edited message was sent by the bot, the edited Message is returned, otherwise True is returned.
:param latitude: Latitude of location.
:param longitude: Longitude of location.
:param chat_id: Unique identifier for the target chat or username of the target channel (in the format @channelusername)
:param message_id: Required if inline_message_id is not specified. Identifier of the sent message
:param inline_message_id: Required if chat_id and message_id are not specified. Identifier of the inline message
:param reply_markup: Additional interface options. A JSON-serialized object for a
custom reply keyboard, instructions to hide keyboard or to
force a reply from the user.
:param kwargs: Args that get passed down to :class:`TelegramBotRPCRequest`
:type chat_id: int or str
:type latitude: float
:type longitude: float
:type message_id: Integer
:type inline_message_id: string
:type reply_markup: ReplyKeyboardMarkup or ReplyKeyboardHide or ForceReply
:returns: On success, if the edited message was sent by the bot, the edited Message is returned, otherwise True is returned.
:rtype: TelegramBotRPCRequest or Bool
### Response:
def edit_message_live_location(latitude, longitude,
chat_id=None, message_id=None, inline_message_id=None, reply_markup=None,
**kwargs):
"""
Use this method to edit live location messages sent by the bot or via the bot (for inline bots).
A location can be edited until its live_period expires or editing is explicitly disabled by a call
to stopMessageLiveLocation.
On success, if the edited message was sent by the bot, the edited Message is returned, otherwise True is returned.
:param latitude: Latitude of location.
:param longitude: Longitude of location.
:param chat_id: Unique identifier for the target chat or username of the target channel (in the format @channelusername)
:param message_id: Required if inline_message_id is not specified. Identifier of the sent message
:param inline_message_id: Required if chat_id and message_id are not specified. Identifier of the inline message
:param reply_markup: Additional interface options. A JSON-serialized object for a
custom reply keyboard, instructions to hide keyboard or to
force a reply from the user.
:param kwargs: Args that get passed down to :class:`TelegramBotRPCRequest`
:type chat_id: int or str
:type latitude: float
:type longitude: float
:type message_id: Integer
:type inline_message_id: string
:type reply_markup: ReplyKeyboardMarkup or ReplyKeyboardHide or ForceReply
:returns: On success, if the edited message was sent by the bot, the edited Message is returned, otherwise True is returned.
:rtype: TelegramBotRPCRequest or Bool
"""
if not chat_id and not message_id and not inline_message_id:
raise ValueError("Must specify chat_id and message_id or inline_message_id")
if (chat_id and not message_id) or (not chat_id and message_id):
raise ValueError("Must specify chat_id and message_id together")
# required args
params = dict(
latitude=latitude,
longitude=longitude
)
# optional args
params.update(
_clean_params(
chat_id=chat_id,
message_id=message_id,
inline_message_id=inline_message_id,
reply_markup=reply_markup,
)
)
return TelegramBotRPCRequest('editMessageLiveLocation', params=params, on_result=Message.from_result, **kwargs) |
def destroy_iam(app='', env='dev', **_):
"""Destroy IAM Resources.
Args:
app (str): Spinnaker Application name.
env (str): Deployment environment, i.e. dev, stage, prod.
Returns:
True upon successful completion.
"""
session = boto3.Session(profile_name=env)
client = session.client('iam')
generated = get_details(env=env, app=app)
generated_iam = generated.iam()
app_details = collections.namedtuple('AppDetails', generated_iam.keys())
details = app_details(**generated_iam)
LOG.debug('Application details: %s', details)
resource_action(
client,
action='remove_user_from_group',
log_format='Removed user from group: %(UserName)s ~> %(GroupName)s',
GroupName=details.group,
UserName=details.user)
resource_action(client, action='delete_user', log_format='Destroyed user: %(UserName)s', UserName=details.user)
resource_action(client, action='delete_group', log_format='Destroyed group: %(GroupName)s', GroupName=details.group)
resource_action(
client,
action='remove_role_from_instance_profile',
log_format='Destroyed Instance Profile from Role: '
'%(InstanceProfileName)s ~> %(RoleName)s',
InstanceProfileName=details.profile,
RoleName=details.role)
resource_action(
client,
action='delete_instance_profile',
log_format='Destroyed Instance Profile: %(InstanceProfileName)s',
InstanceProfileName=details.profile)
role_policies = []
try:
role_policies = resource_action(
client,
action='list_role_policies',
log_format='Found Role Policies for %(RoleName)s.',
RoleName=details.role)['PolicyNames']
except TypeError:
LOG.info('Role %s not found.', details.role)
for policy in role_policies:
resource_action(
client,
action='delete_role_policy',
log_format='Removed Inline Policy from Role: '
'%(PolicyName)s ~> %(RoleName)s',
RoleName=details.role,
PolicyName=policy)
attached_role_policies = []
try:
attached_role_policies = resource_action(
client,
action='list_attached_role_policies',
log_format='Found attached Role Polices for %(RoleName)s.',
RoleName=details.role)['AttachedPolicies']
except TypeError:
LOG.info('Role %s not found.', details.role)
for policy in attached_role_policies:
resource_action(
client,
action='detach_role_policy',
log_format='Detached Policy from Role: '
'%(PolicyArn)s ~> %(RoleName)s',
RoleName=details.role,
PolicyArn=policy['PolicyArn'])
resource_action(client, action='delete_role', log_format='Destroyed Role: %(RoleName)s', RoleName=details.role) | Destroy IAM Resources.
Args:
app (str): Spinnaker Application name.
env (str): Deployment environment, i.e. dev, stage, prod.
Returns:
True upon successful completion. | Below is the the instruction that describes the task:
### Input:
Destroy IAM Resources.
Args:
app (str): Spinnaker Application name.
env (str): Deployment environment, i.e. dev, stage, prod.
Returns:
True upon successful completion.
### Response:
def destroy_iam(app='', env='dev', **_):
"""Destroy IAM Resources.
Args:
app (str): Spinnaker Application name.
env (str): Deployment environment, i.e. dev, stage, prod.
Returns:
True upon successful completion.
"""
session = boto3.Session(profile_name=env)
client = session.client('iam')
generated = get_details(env=env, app=app)
generated_iam = generated.iam()
app_details = collections.namedtuple('AppDetails', generated_iam.keys())
details = app_details(**generated_iam)
LOG.debug('Application details: %s', details)
resource_action(
client,
action='remove_user_from_group',
log_format='Removed user from group: %(UserName)s ~> %(GroupName)s',
GroupName=details.group,
UserName=details.user)
resource_action(client, action='delete_user', log_format='Destroyed user: %(UserName)s', UserName=details.user)
resource_action(client, action='delete_group', log_format='Destroyed group: %(GroupName)s', GroupName=details.group)
resource_action(
client,
action='remove_role_from_instance_profile',
log_format='Destroyed Instance Profile from Role: '
'%(InstanceProfileName)s ~> %(RoleName)s',
InstanceProfileName=details.profile,
RoleName=details.role)
resource_action(
client,
action='delete_instance_profile',
log_format='Destroyed Instance Profile: %(InstanceProfileName)s',
InstanceProfileName=details.profile)
role_policies = []
try:
role_policies = resource_action(
client,
action='list_role_policies',
log_format='Found Role Policies for %(RoleName)s.',
RoleName=details.role)['PolicyNames']
except TypeError:
LOG.info('Role %s not found.', details.role)
for policy in role_policies:
resource_action(
client,
action='delete_role_policy',
log_format='Removed Inline Policy from Role: '
'%(PolicyName)s ~> %(RoleName)s',
RoleName=details.role,
PolicyName=policy)
attached_role_policies = []
try:
attached_role_policies = resource_action(
client,
action='list_attached_role_policies',
log_format='Found attached Role Polices for %(RoleName)s.',
RoleName=details.role)['AttachedPolicies']
except TypeError:
LOG.info('Role %s not found.', details.role)
for policy in attached_role_policies:
resource_action(
client,
action='detach_role_policy',
log_format='Detached Policy from Role: '
'%(PolicyArn)s ~> %(RoleName)s',
RoleName=details.role,
PolicyArn=policy['PolicyArn'])
resource_action(client, action='delete_role', log_format='Destroyed Role: %(RoleName)s', RoleName=details.role) |
def convertAndMake(converter, handler):
"""Convert with location."""
def convertAction(loc, value):
return handler(loc, converter(value))
return convertAction | Convert with location. | Below is the the instruction that describes the task:
### Input:
Convert with location.
### Response:
def convertAndMake(converter, handler):
"""Convert with location."""
def convertAction(loc, value):
return handler(loc, converter(value))
return convertAction |
def _offset_to_min(utc_offset):
'''
Helper function that converts the utc offset string into number of minutes
offset. Input is in form "[+-]?HHMM". Example valid inputs are "+0500"
"-0300" and "0800". These would return -300, 180, 480 respectively.
'''
match = re.match(r"^([+-])?(\d\d)(\d\d)$", utc_offset)
if not match:
raise SaltInvocationError("Invalid UTC offset")
sign = -1 if match.group(1) == '-' else 1
hours_offset = int(match.group(2))
minutes_offset = int(match.group(3))
total_offset = sign * (hours_offset * 60 + minutes_offset)
return total_offset | Helper function that converts the utc offset string into number of minutes
offset. Input is in form "[+-]?HHMM". Example valid inputs are "+0500"
"-0300" and "0800". These would return -300, 180, 480 respectively. | Below is the the instruction that describes the task:
### Input:
Helper function that converts the utc offset string into number of minutes
offset. Input is in form "[+-]?HHMM". Example valid inputs are "+0500"
"-0300" and "0800". These would return -300, 180, 480 respectively.
### Response:
def _offset_to_min(utc_offset):
'''
Helper function that converts the utc offset string into number of minutes
offset. Input is in form "[+-]?HHMM". Example valid inputs are "+0500"
"-0300" and "0800". These would return -300, 180, 480 respectively.
'''
match = re.match(r"^([+-])?(\d\d)(\d\d)$", utc_offset)
if not match:
raise SaltInvocationError("Invalid UTC offset")
sign = -1 if match.group(1) == '-' else 1
hours_offset = int(match.group(2))
minutes_offset = int(match.group(3))
total_offset = sign * (hours_offset * 60 + minutes_offset)
return total_offset |
def resume(self, trigger_duration=0):
"""Resumes pulse capture after an optional trigger pulse."""
if trigger_duration != 0:
self._mq.send("t%d" % trigger_duration, True, type=1)
else:
self._mq.send("r", True, type=1)
self._paused = False | Resumes pulse capture after an optional trigger pulse. | Below is the the instruction that describes the task:
### Input:
Resumes pulse capture after an optional trigger pulse.
### Response:
def resume(self, trigger_duration=0):
"""Resumes pulse capture after an optional trigger pulse."""
if trigger_duration != 0:
self._mq.send("t%d" % trigger_duration, True, type=1)
else:
self._mq.send("r", True, type=1)
self._paused = False |
def add(self, data, name=None):
''' Appends a new column of data to the data source.
Args:
data (seq) : new data to add
name (str, optional) : column name to use.
If not supplied, generate a name of the form "Series ####"
Returns:
str: the column name used
'''
if name is None:
n = len(self.data)
while "Series %d"%n in self.data:
n += 1
name = "Series %d"%n
self.data[name] = data
return name | Appends a new column of data to the data source.
Args:
data (seq) : new data to add
name (str, optional) : column name to use.
If not supplied, generate a name of the form "Series ####"
Returns:
str: the column name used | Below is the the instruction that describes the task:
### Input:
Appends a new column of data to the data source.
Args:
data (seq) : new data to add
name (str, optional) : column name to use.
If not supplied, generate a name of the form "Series ####"
Returns:
str: the column name used
### Response:
def add(self, data, name=None):
''' Appends a new column of data to the data source.
Args:
data (seq) : new data to add
name (str, optional) : column name to use.
If not supplied, generate a name of the form "Series ####"
Returns:
str: the column name used
'''
if name is None:
n = len(self.data)
while "Series %d"%n in self.data:
n += 1
name = "Series %d"%n
self.data[name] = data
return name |
def iterate_presentation_files(path=None, excludes=None, includes=None):
"""Iterates the repository presentation files relative to 'path',
not including themes. Note that 'includes' take priority."""
# Defaults
if includes is None:
includes = []
if excludes is None:
excludes = []
# Transform glob patterns to regular expressions
includes_pattern = r'|'.join([fnmatch.translate(x) for x in includes]) or r'$.'
excludes_pattern = r'|'.join([fnmatch.translate(x) for x in excludes]) or r'$.'
includes_re = re.compile(includes_pattern)
excludes_re = re.compile(excludes_pattern)
def included(root, name):
"""Returns True if the specified file is a presentation file."""
full_path = os.path.join(root, name)
# Explicitly included files takes priority
if includes_re.match(full_path):
return True
# Ignore special and excluded files
return (not specials_re.match(name)
and not excludes_re.match(full_path))
# Get a filtered list of paths to be built
for root, dirs, files in os.walk(path):
dirs[:] = [d for d in dirs if included(root, d)]
files = [f for f in files if included(root, f)]
for f in files:
yield os.path.relpath(os.path.join(root, f), path) | Iterates the repository presentation files relative to 'path',
not including themes. Note that 'includes' take priority. | Below is the the instruction that describes the task:
### Input:
Iterates the repository presentation files relative to 'path',
not including themes. Note that 'includes' take priority.
### Response:
def iterate_presentation_files(path=None, excludes=None, includes=None):
"""Iterates the repository presentation files relative to 'path',
not including themes. Note that 'includes' take priority."""
# Defaults
if includes is None:
includes = []
if excludes is None:
excludes = []
# Transform glob patterns to regular expressions
includes_pattern = r'|'.join([fnmatch.translate(x) for x in includes]) or r'$.'
excludes_pattern = r'|'.join([fnmatch.translate(x) for x in excludes]) or r'$.'
includes_re = re.compile(includes_pattern)
excludes_re = re.compile(excludes_pattern)
def included(root, name):
"""Returns True if the specified file is a presentation file."""
full_path = os.path.join(root, name)
# Explicitly included files takes priority
if includes_re.match(full_path):
return True
# Ignore special and excluded files
return (not specials_re.match(name)
and not excludes_re.match(full_path))
# Get a filtered list of paths to be built
for root, dirs, files in os.walk(path):
dirs[:] = [d for d in dirs if included(root, d)]
files = [f for f in files if included(root, f)]
for f in files:
yield os.path.relpath(os.path.join(root, f), path) |
def prop_symbols(x):
"Return a list of all propositional symbols in x."
if not isinstance(x, Expr):
return []
elif is_prop_symbol(x.op):
return [x]
else:
return list(set(symbol for arg in x.args
for symbol in prop_symbols(arg))) | Return a list of all propositional symbols in x. | Below is the the instruction that describes the task:
### Input:
Return a list of all propositional symbols in x.
### Response:
def prop_symbols(x):
"Return a list of all propositional symbols in x."
if not isinstance(x, Expr):
return []
elif is_prop_symbol(x.op):
return [x]
else:
return list(set(symbol for arg in x.args
for symbol in prop_symbols(arg))) |
def getPaths(urlOrPaths):
'''
Determines if the given URL in urlOrPaths is a URL or a file or directory. If it's
a directory, it walks the directory and then finds all file paths in it, and ads them
too. If it's a file, it adds it to the paths. If it's a URL it just adds it to the path.
:param urlOrPaths: the url or path to be scanned
:return: ``list`` of paths
'''
if isinstance(urlOrPaths, basestring):
#FIXME: basestring is undefined
urlOrPaths = [urlOrPaths] # do not recursively walk over letters of a single path which can include "/"
paths = []
for eachUrlOrPaths in urlOrPaths:
if os.path.isdir(eachUrlOrPaths):
for root, directories, filenames in walk(eachUrlOrPaths):
for filename in filenames:
paths.append(os.path.join(root,filename))
else:
paths.append(eachUrlOrPaths)
return paths | Determines if the given URL in urlOrPaths is a URL or a file or directory. If it's
a directory, it walks the directory and then finds all file paths in it, and ads them
too. If it's a file, it adds it to the paths. If it's a URL it just adds it to the path.
:param urlOrPaths: the url or path to be scanned
:return: ``list`` of paths | Below is the the instruction that describes the task:
### Input:
Determines if the given URL in urlOrPaths is a URL or a file or directory. If it's
a directory, it walks the directory and then finds all file paths in it, and ads them
too. If it's a file, it adds it to the paths. If it's a URL it just adds it to the path.
:param urlOrPaths: the url or path to be scanned
:return: ``list`` of paths
### Response:
def getPaths(urlOrPaths):
'''
Determines if the given URL in urlOrPaths is a URL or a file or directory. If it's
a directory, it walks the directory and then finds all file paths in it, and ads them
too. If it's a file, it adds it to the paths. If it's a URL it just adds it to the path.
:param urlOrPaths: the url or path to be scanned
:return: ``list`` of paths
'''
if isinstance(urlOrPaths, basestring):
#FIXME: basestring is undefined
urlOrPaths = [urlOrPaths] # do not recursively walk over letters of a single path which can include "/"
paths = []
for eachUrlOrPaths in urlOrPaths:
if os.path.isdir(eachUrlOrPaths):
for root, directories, filenames in walk(eachUrlOrPaths):
for filename in filenames:
paths.append(os.path.join(root,filename))
else:
paths.append(eachUrlOrPaths)
return paths |
def read_lua_file(dotted_module, path=None, context=None):
'''Load lua script from the stdnet/lib/lua directory'''
path = path or DEFAULT_LUA_PATH
bits = dotted_module.split('.')
bits[-1] += '.lua'
name = os.path.join(path, *bits)
with open(name) as f:
data = f.read()
if context:
data = data.format(context)
return data | Load lua script from the stdnet/lib/lua directory | Below is the the instruction that describes the task:
### Input:
Load lua script from the stdnet/lib/lua directory
### Response:
def read_lua_file(dotted_module, path=None, context=None):
'''Load lua script from the stdnet/lib/lua directory'''
path = path or DEFAULT_LUA_PATH
bits = dotted_module.split('.')
bits[-1] += '.lua'
name = os.path.join(path, *bits)
with open(name) as f:
data = f.read()
if context:
data = data.format(context)
return data |
def view_show(self, id, **kwargs):
"https://developer.zendesk.com/rest_api/docs/core/views#show-view"
api_path = "/api/v2/views/{id}.json"
api_path = api_path.format(id=id)
return self.call(api_path, **kwargs) | https://developer.zendesk.com/rest_api/docs/core/views#show-view | Below is the the instruction that describes the task:
### Input:
https://developer.zendesk.com/rest_api/docs/core/views#show-view
### Response:
def view_show(self, id, **kwargs):
"https://developer.zendesk.com/rest_api/docs/core/views#show-view"
api_path = "/api/v2/views/{id}.json"
api_path = api_path.format(id=id)
return self.call(api_path, **kwargs) |
def _keep_this(self, name):
"""Return True if there are to be no modifications to name."""
for keep_name in self.keep:
if name == keep_name:
return True
return False | Return True if there are to be no modifications to name. | Below is the the instruction that describes the task:
### Input:
Return True if there are to be no modifications to name.
### Response:
def _keep_this(self, name):
"""Return True if there are to be no modifications to name."""
for keep_name in self.keep:
if name == keep_name:
return True
return False |
def make_caption(self, caption):
"""Adds/Substitutes the table's caption."""
if not hasattr(self, "caption"):
self(caption=Caption())
return self.caption.empty()(caption) | Adds/Substitutes the table's caption. | Below is the the instruction that describes the task:
### Input:
Adds/Substitutes the table's caption.
### Response:
def make_caption(self, caption):
"""Adds/Substitutes the table's caption."""
if not hasattr(self, "caption"):
self(caption=Caption())
return self.caption.empty()(caption) |
def send_location(self, peer: Peer, latitude: float, longitude: float, reply: int=None, on_success: callable=None,
reply_markup: botapi.ReplyMarkup=None):
"""
Send location to peer.
:param peer: Peer to send message to.
:param latitude: Latitude of the location.
:param longitude: Longitude of the location.
:param reply: Message object or message_id to reply to.
:param on_success: Callback to call when call is complete.
:type reply: int or Message
"""
pass | Send location to peer.
:param peer: Peer to send message to.
:param latitude: Latitude of the location.
:param longitude: Longitude of the location.
:param reply: Message object or message_id to reply to.
:param on_success: Callback to call when call is complete.
:type reply: int or Message | Below is the the instruction that describes the task:
### Input:
Send location to peer.
:param peer: Peer to send message to.
:param latitude: Latitude of the location.
:param longitude: Longitude of the location.
:param reply: Message object or message_id to reply to.
:param on_success: Callback to call when call is complete.
:type reply: int or Message
### Response:
def send_location(self, peer: Peer, latitude: float, longitude: float, reply: int=None, on_success: callable=None,
reply_markup: botapi.ReplyMarkup=None):
"""
Send location to peer.
:param peer: Peer to send message to.
:param latitude: Latitude of the location.
:param longitude: Longitude of the location.
:param reply: Message object or message_id to reply to.
:param on_success: Callback to call when call is complete.
:type reply: int or Message
"""
pass |
def tisbod(ref, body, et):
"""
Return a 6x6 matrix that transforms states in inertial coordinates to
states in body-equator-and-prime-meridian coordinates.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/tisbod_c.html
:param ref: ID of inertial reference frame to transform from.
:type ref: str
:param body: ID code of body.
:type body: int
:param et: Epoch of transformation.
:type et: float
:return: Transformation (state), inertial to prime meridian.
:rtype: 6x6-Element Array of floats
"""
ref = stypes.stringToCharP(ref)
body = ctypes.c_int(body)
et = ctypes.c_double(et)
retmatrix = stypes.emptyDoubleMatrix(x=6, y=6)
libspice.tisbod_c(ref, body, et, retmatrix)
return stypes.cMatrixToNumpy(retmatrix) | Return a 6x6 matrix that transforms states in inertial coordinates to
states in body-equator-and-prime-meridian coordinates.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/tisbod_c.html
:param ref: ID of inertial reference frame to transform from.
:type ref: str
:param body: ID code of body.
:type body: int
:param et: Epoch of transformation.
:type et: float
:return: Transformation (state), inertial to prime meridian.
:rtype: 6x6-Element Array of floats | Below is the the instruction that describes the task:
### Input:
Return a 6x6 matrix that transforms states in inertial coordinates to
states in body-equator-and-prime-meridian coordinates.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/tisbod_c.html
:param ref: ID of inertial reference frame to transform from.
:type ref: str
:param body: ID code of body.
:type body: int
:param et: Epoch of transformation.
:type et: float
:return: Transformation (state), inertial to prime meridian.
:rtype: 6x6-Element Array of floats
### Response:
def tisbod(ref, body, et):
"""
Return a 6x6 matrix that transforms states in inertial coordinates to
states in body-equator-and-prime-meridian coordinates.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/tisbod_c.html
:param ref: ID of inertial reference frame to transform from.
:type ref: str
:param body: ID code of body.
:type body: int
:param et: Epoch of transformation.
:type et: float
:return: Transformation (state), inertial to prime meridian.
:rtype: 6x6-Element Array of floats
"""
ref = stypes.stringToCharP(ref)
body = ctypes.c_int(body)
et = ctypes.c_double(et)
retmatrix = stypes.emptyDoubleMatrix(x=6, y=6)
libspice.tisbod_c(ref, body, et, retmatrix)
return stypes.cMatrixToNumpy(retmatrix) |
def load(obj, env=None, silent=None, key=None):
"""Reads and loads in to "settings" a single key or all keys from vault
:param obj: the settings instance
:param env: settings env default='DYNACONF'
:param silent: if errors should raise
:param key: if defined load a single key, else load all in env
:return: None
"""
client = get_client(obj)
env_list = _get_env_list(obj, env)
for env in env_list:
path = "/".join([obj.VAULT_PATH_FOR_DYNACONF, env]).replace("//", "/")
data = client.read(path)
if data:
# There seems to be a data dict within a data dict,
# extract the inner data
data = data.get("data", {}).get("data", {})
try:
if data and key:
value = parse_conf_data(data.get(key), tomlfy=True)
if value:
obj.logger.debug(
"vault_loader: loading by key: %s:%s (%s:%s)",
key,
"****",
IDENTIFIER,
path,
)
obj.set(key, value)
elif data:
obj.logger.debug(
"vault_loader: loading: %s (%s:%s)",
list(data.keys()),
IDENTIFIER,
path,
)
obj.update(data, loader_identifier=IDENTIFIER, tomlfy=True)
except Exception as e:
if silent:
if hasattr(obj, "logger"):
obj.logger.error(str(e))
return False
raise | Reads and loads in to "settings" a single key or all keys from vault
:param obj: the settings instance
:param env: settings env default='DYNACONF'
:param silent: if errors should raise
:param key: if defined load a single key, else load all in env
:return: None | Below is the the instruction that describes the task:
### Input:
Reads and loads in to "settings" a single key or all keys from vault
:param obj: the settings instance
:param env: settings env default='DYNACONF'
:param silent: if errors should raise
:param key: if defined load a single key, else load all in env
:return: None
### Response:
def load(obj, env=None, silent=None, key=None):
"""Reads and loads in to "settings" a single key or all keys from vault
:param obj: the settings instance
:param env: settings env default='DYNACONF'
:param silent: if errors should raise
:param key: if defined load a single key, else load all in env
:return: None
"""
client = get_client(obj)
env_list = _get_env_list(obj, env)
for env in env_list:
path = "/".join([obj.VAULT_PATH_FOR_DYNACONF, env]).replace("//", "/")
data = client.read(path)
if data:
# There seems to be a data dict within a data dict,
# extract the inner data
data = data.get("data", {}).get("data", {})
try:
if data and key:
value = parse_conf_data(data.get(key), tomlfy=True)
if value:
obj.logger.debug(
"vault_loader: loading by key: %s:%s (%s:%s)",
key,
"****",
IDENTIFIER,
path,
)
obj.set(key, value)
elif data:
obj.logger.debug(
"vault_loader: loading: %s (%s:%s)",
list(data.keys()),
IDENTIFIER,
path,
)
obj.update(data, loader_identifier=IDENTIFIER, tomlfy=True)
except Exception as e:
if silent:
if hasattr(obj, "logger"):
obj.logger.error(str(e))
return False
raise |
def __copyfile(source, destination):
"""Copy data and mode bits ("cp source destination").
The destination may be a directory.
Args:
source (str): Source file (file to copy).
destination (str): Destination file or directory (where to copy).
Returns:
bool: True if the operation is successful, False otherwise.
"""
logger.info("copyfile: %s -> %s" % (source, destination))
try:
__create_destdir(destination)
shutil.copy(source, destination)
return True
except Exception as e:
logger.error(
"copyfile: %s -> %s failed! Error: %s", source, destination, e
)
return False | Copy data and mode bits ("cp source destination").
The destination may be a directory.
Args:
source (str): Source file (file to copy).
destination (str): Destination file or directory (where to copy).
Returns:
bool: True if the operation is successful, False otherwise. | Below is the the instruction that describes the task:
### Input:
Copy data and mode bits ("cp source destination").
The destination may be a directory.
Args:
source (str): Source file (file to copy).
destination (str): Destination file or directory (where to copy).
Returns:
bool: True if the operation is successful, False otherwise.
### Response:
def __copyfile(source, destination):
"""Copy data and mode bits ("cp source destination").
The destination may be a directory.
Args:
source (str): Source file (file to copy).
destination (str): Destination file or directory (where to copy).
Returns:
bool: True if the operation is successful, False otherwise.
"""
logger.info("copyfile: %s -> %s" % (source, destination))
try:
__create_destdir(destination)
shutil.copy(source, destination)
return True
except Exception as e:
logger.error(
"copyfile: %s -> %s failed! Error: %s", source, destination, e
)
return False |
def find_session(self, session_name):
"""Finds guest sessions by their friendly name and returns an interface
array with all found guest sessions.
in session_name of type str
The session's friendly name to find. Wildcards like ? and * are allowed.
return sessions of type :class:`IGuestSession`
Array with all guest sessions found matching the name specified.
"""
if not isinstance(session_name, basestring):
raise TypeError("session_name can only be an instance of type basestring")
sessions = self._call("findSession",
in_p=[session_name])
sessions = [IGuestSession(a) for a in sessions]
return sessions | Finds guest sessions by their friendly name and returns an interface
array with all found guest sessions.
in session_name of type str
The session's friendly name to find. Wildcards like ? and * are allowed.
return sessions of type :class:`IGuestSession`
Array with all guest sessions found matching the name specified. | Below is the the instruction that describes the task:
### Input:
Finds guest sessions by their friendly name and returns an interface
array with all found guest sessions.
in session_name of type str
The session's friendly name to find. Wildcards like ? and * are allowed.
return sessions of type :class:`IGuestSession`
Array with all guest sessions found matching the name specified.
### Response:
def find_session(self, session_name):
"""Finds guest sessions by their friendly name and returns an interface
array with all found guest sessions.
in session_name of type str
The session's friendly name to find. Wildcards like ? and * are allowed.
return sessions of type :class:`IGuestSession`
Array with all guest sessions found matching the name specified.
"""
if not isinstance(session_name, basestring):
raise TypeError("session_name can only be an instance of type basestring")
sessions = self._call("findSession",
in_p=[session_name])
sessions = [IGuestSession(a) for a in sessions]
return sessions |
def get_posix(self, i):
"""Get POSIX."""
index = i.index
value = ['[']
try:
c = next(i)
if c != ':':
raise ValueError('Not a valid property!')
else:
value.append(c)
c = next(i)
if c == '^':
value.append(c)
c = next(i)
while c != ':':
if c not in _PROPERTY:
raise ValueError('Not a valid property!')
if c not in _PROPERTY_STRIP:
value.append(c)
c = next(i)
value.append(c)
c = next(i)
if c != ']' or not value:
raise ValueError('Unmatched ]')
value.append(c)
except Exception:
i.rewind(i.index - index)
value = []
return ''.join(value) if value else None | Get POSIX. | Below is the the instruction that describes the task:
### Input:
Get POSIX.
### Response:
def get_posix(self, i):
"""Get POSIX."""
index = i.index
value = ['[']
try:
c = next(i)
if c != ':':
raise ValueError('Not a valid property!')
else:
value.append(c)
c = next(i)
if c == '^':
value.append(c)
c = next(i)
while c != ':':
if c not in _PROPERTY:
raise ValueError('Not a valid property!')
if c not in _PROPERTY_STRIP:
value.append(c)
c = next(i)
value.append(c)
c = next(i)
if c != ']' or not value:
raise ValueError('Unmatched ]')
value.append(c)
except Exception:
i.rewind(i.index - index)
value = []
return ''.join(value) if value else None |
def pad_sentences(sentences, padding_word="</s>"):
"""Pads all sentences to the same length. The length is defined by the longest sentence.
Returns padded sentences.
"""
sequence_length = max(len(x) for x in sentences)
padded_sentences = []
for i, sentence in enumerate(sentences):
num_padding = sequence_length - len(sentence)
new_sentence = sentence + [padding_word] * num_padding
padded_sentences.append(new_sentence)
return padded_sentences | Pads all sentences to the same length. The length is defined by the longest sentence.
Returns padded sentences. | Below is the the instruction that describes the task:
### Input:
Pads all sentences to the same length. The length is defined by the longest sentence.
Returns padded sentences.
### Response:
def pad_sentences(sentences, padding_word="</s>"):
"""Pads all sentences to the same length. The length is defined by the longest sentence.
Returns padded sentences.
"""
sequence_length = max(len(x) for x in sentences)
padded_sentences = []
for i, sentence in enumerate(sentences):
num_padding = sequence_length - len(sentence)
new_sentence = sentence + [padding_word] * num_padding
padded_sentences.append(new_sentence)
return padded_sentences |
def EMetaclass(cls):
"""Class decorator for creating PyEcore metaclass."""
superclass = cls.__bases__
if not issubclass(cls, EObject):
sclasslist = list(superclass)
if object in superclass:
index = sclasslist.index(object)
sclasslist.insert(index, EObject)
sclasslist.remove(object)
else:
sclasslist.insert(0, EObject)
superclass = tuple(sclasslist)
orig_vars = cls.__dict__.copy()
slots = orig_vars.get('__slots__')
if slots is not None:
if isinstance(slots, str):
slots = [slots]
for slots_var in slots:
orig_vars.pop(slots_var)
orig_vars.pop('__dict__', None)
orig_vars.pop('__weakref__', None)
return MetaEClass(cls.__name__, superclass, orig_vars) | Class decorator for creating PyEcore metaclass. | Below is the the instruction that describes the task:
### Input:
Class decorator for creating PyEcore metaclass.
### Response:
def EMetaclass(cls):
"""Class decorator for creating PyEcore metaclass."""
superclass = cls.__bases__
if not issubclass(cls, EObject):
sclasslist = list(superclass)
if object in superclass:
index = sclasslist.index(object)
sclasslist.insert(index, EObject)
sclasslist.remove(object)
else:
sclasslist.insert(0, EObject)
superclass = tuple(sclasslist)
orig_vars = cls.__dict__.copy()
slots = orig_vars.get('__slots__')
if slots is not None:
if isinstance(slots, str):
slots = [slots]
for slots_var in slots:
orig_vars.pop(slots_var)
orig_vars.pop('__dict__', None)
orig_vars.pop('__weakref__', None)
return MetaEClass(cls.__name__, superclass, orig_vars) |
def export_dse_home_in_dse_env_sh(self):
'''
Due to the way CCM lays out files, separating the repository
from the node(s) confs, the `dse-env.sh` script of each node
needs to have its DSE_HOME var set and exported. Since DSE
4.5.x, the stock `dse-env.sh` file includes a commented-out
place to do exactly this, intended for installers.
Basically: read in the file, write it back out and add the two
lines.
'sstableloader' is an example of a node script that depends on
this, when used in a CCM-built cluster.
'''
with open(self.get_bin_dir() + "/dse-env.sh", "r") as dse_env_sh:
buf = dse_env_sh.readlines()
with open(self.get_bin_dir() + "/dse-env.sh", "w") as out_file:
for line in buf:
out_file.write(line)
if line == "# This is here so the installer can force set DSE_HOME\n":
out_file.write("DSE_HOME=" + self.get_install_dir() + "\nexport DSE_HOME\n") | Due to the way CCM lays out files, separating the repository
from the node(s) confs, the `dse-env.sh` script of each node
needs to have its DSE_HOME var set and exported. Since DSE
4.5.x, the stock `dse-env.sh` file includes a commented-out
place to do exactly this, intended for installers.
Basically: read in the file, write it back out and add the two
lines.
'sstableloader' is an example of a node script that depends on
this, when used in a CCM-built cluster. | Below is the the instruction that describes the task:
### Input:
Due to the way CCM lays out files, separating the repository
from the node(s) confs, the `dse-env.sh` script of each node
needs to have its DSE_HOME var set and exported. Since DSE
4.5.x, the stock `dse-env.sh` file includes a commented-out
place to do exactly this, intended for installers.
Basically: read in the file, write it back out and add the two
lines.
'sstableloader' is an example of a node script that depends on
this, when used in a CCM-built cluster.
### Response:
def export_dse_home_in_dse_env_sh(self):
'''
Due to the way CCM lays out files, separating the repository
from the node(s) confs, the `dse-env.sh` script of each node
needs to have its DSE_HOME var set and exported. Since DSE
4.5.x, the stock `dse-env.sh` file includes a commented-out
place to do exactly this, intended for installers.
Basically: read in the file, write it back out and add the two
lines.
'sstableloader' is an example of a node script that depends on
this, when used in a CCM-built cluster.
'''
with open(self.get_bin_dir() + "/dse-env.sh", "r") as dse_env_sh:
buf = dse_env_sh.readlines()
with open(self.get_bin_dir() + "/dse-env.sh", "w") as out_file:
for line in buf:
out_file.write(line)
if line == "# This is here so the installer can force set DSE_HOME\n":
out_file.write("DSE_HOME=" + self.get_install_dir() + "\nexport DSE_HOME\n") |
def postprocess(self):
"""Submit a postprocessing script after collation"""
assert self.postscript
envmod.setup()
envmod.module('load', 'pbs')
cmd = 'qsub {script}'.format(script=self.postscript)
cmd = shlex.split(cmd)
rc = sp.call(cmd)
assert rc == 0, 'Postprocessing script submission failed.' | Submit a postprocessing script after collation | Below is the the instruction that describes the task:
### Input:
Submit a postprocessing script after collation
### Response:
def postprocess(self):
"""Submit a postprocessing script after collation"""
assert self.postscript
envmod.setup()
envmod.module('load', 'pbs')
cmd = 'qsub {script}'.format(script=self.postscript)
cmd = shlex.split(cmd)
rc = sp.call(cmd)
assert rc == 0, 'Postprocessing script submission failed.' |
def check_support_ucannet(cls, hw_info_ex):
"""
Checks whether the module supports the usage of USB-CANnetwork driver.
:param HardwareInfoEx hw_info_ex:
Extended hardware information structure (see method :meth:`get_hardware_info`).
:return: True when the module does support the usage of the USB-CANnetwork driver, otherwise False.
:rtype: bool
"""
return cls.check_is_systec(hw_info_ex) and \
cls.check_version_is_equal_or_higher(hw_info_ex.m_dwFwVersionEx, 3, 8) | Checks whether the module supports the usage of USB-CANnetwork driver.
:param HardwareInfoEx hw_info_ex:
Extended hardware information structure (see method :meth:`get_hardware_info`).
:return: True when the module does support the usage of the USB-CANnetwork driver, otherwise False.
:rtype: bool | Below is the the instruction that describes the task:
### Input:
Checks whether the module supports the usage of USB-CANnetwork driver.
:param HardwareInfoEx hw_info_ex:
Extended hardware information structure (see method :meth:`get_hardware_info`).
:return: True when the module does support the usage of the USB-CANnetwork driver, otherwise False.
:rtype: bool
### Response:
def check_support_ucannet(cls, hw_info_ex):
"""
Checks whether the module supports the usage of USB-CANnetwork driver.
:param HardwareInfoEx hw_info_ex:
Extended hardware information structure (see method :meth:`get_hardware_info`).
:return: True when the module does support the usage of the USB-CANnetwork driver, otherwise False.
:rtype: bool
"""
return cls.check_is_systec(hw_info_ex) and \
cls.check_version_is_equal_or_higher(hw_info_ex.m_dwFwVersionEx, 3, 8) |
def img2ascii(img_path, ascii_path, ascii_char="*", pad=0):
"""Convert an image to ascii art text.
Suppose we have an image like that:
.. image:: images/rabbit.png
:align: left
Put some codes::
>>> from weatherlab.math.img2waveform import img2ascii
>>> img2ascii(r"testdata\img2waveform\rabbit.png",
... r"testdata\img2waveform\asciiart.txt", pad=0)
Then you will see this in asciiart.txt::
******
*** *** ****
** ** *********
** ** *** ***
** * ** **
** ** ** **
** * *** *
* ** ** **
** * ** **
** * ** *
* ** ** *
** ** * **
** * ** **
* * ** **
* ** * **
** ** ** **
** * ** **
** * * **
** * ** *
** * ** *
* ** ** *
* ** * *
* ** ** *
* ** ** *
** ** ** **
** * ** **
** * * **
** * * **
** * * **
* * ** **
* * ** *
** * ** *
** * ** *
** ** ** **
* ** ** **
* ** ** **
** ** ** *
** ** ** **
* ** ** **
** ** ** *
** ******* *
** ******* **
** **
** *
** **
*** *
**** ***
*** ***
** ****
** ***
** ***
** **
** **
* **
** **
** **
** **
** **
** **
** **
** **
* **
* **
** *
** **
* **
* **
** *
** *
** **
** **
** **
** **
** ** **
** *** *** **
* **** **** **
* *** **** **
** ** ** *
** *
** *
* **
** **
** *
* **
** **
** **
** *
** **
** **
** **
** *** ** **
** ****** ***
*** ****** **
*** * *** ***
*** ***
*** ***
**** ****
******** *******
*** ********** ******** ***
** *** ************ ********** *** * ***
** * **** *********************** *** ** ***
** * ** **** ** ******* * *** ***** ***
**** * * ***** ********** * **** * * ** **
*** * * ** * ******************************* * *** * **
** ***** * *** ********** ** ** ********** *** ** ***
** * ***** ** * ***** ** ** ***** * * ** * **
*** *** ************ ** ****** ** * * ** ** ** * ** ***
** ******* * * ** ** ** **** * ** * ** * **** **
** *** *** ******* ****** * ** * *** ***** *** ** ***** ** **
** * * ***** ************************************ * **** * **
*** ** ** *********************************************** *** ***
*** ** ****************************************** **** ** ** **
**** ** ** ******************************************** ** * **
** ****** ** ******************************************** ** * ***
** ***** *********************************************** ** ****
* *** ****************************** **************** *********
** ** *************************************** * * * ***** *
** ** ********************************************** *** *
* ** ** *********************************** ******* ** *
** ** ***************************************** *** ** *
*** ** * ********************************************** ** **
****** ************************************************ ** ***
**** *********************************************** ********
** *********************************************** ****
*** ** ******************************************* **
*** ** ***** ****** * * * * * ******** *** ** ** ***
*** * * **** **** **** * ** ** * *** ** ***
**** * * ** **** * *** ******** * *** *****
***** ** ** ** ** *** ** *** *****
******* * * ** * ** ********
*************** * *******************
****************************** ***
*** ********* **
** * **
** * **
** * **
** * **
** * **
** ** **
** ****** * ** *********
*************************************
**********
:param img_path: the image file path
:type img_path: str
:param ascii_path: the output ascii text file path
:type ascii_path: str
:param pad: how many space been filled in between two pixels
:type pad: int
"""
if len(ascii_char) != 1:
raise Exception("ascii_char has to be single character.")
image = Image.open(img_path).convert("L")
matrix = np.array(image)
# you can customize the gray scale fix behavior to fit color image
matrix[np.where(matrix >= 128)] = 255
matrix[np.where(matrix < 128)] = 0
lines = list()
for vector in matrix:
line = list()
for i in vector:
line.append(" " * pad)
if i:
line.append(" ")
else:
line.append(ascii_char)
lines.append("".join(line))
with open(ascii_path, "w") as f:
f.write("\n".join(lines)) | Convert an image to ascii art text.
Suppose we have an image like that:
.. image:: images/rabbit.png
:align: left
Put some codes::
>>> from weatherlab.math.img2waveform import img2ascii
>>> img2ascii(r"testdata\img2waveform\rabbit.png",
... r"testdata\img2waveform\asciiart.txt", pad=0)
Then you will see this in asciiart.txt::
******
*** *** ****
** ** *********
** ** *** ***
** * ** **
** ** ** **
** * *** *
* ** ** **
** * ** **
** * ** *
* ** ** *
** ** * **
** * ** **
* * ** **
* ** * **
** ** ** **
** * ** **
** * * **
** * ** *
** * ** *
* ** ** *
* ** * *
* ** ** *
* ** ** *
** ** ** **
** * ** **
** * * **
** * * **
** * * **
* * ** **
* * ** *
** * ** *
** * ** *
** ** ** **
* ** ** **
* ** ** **
** ** ** *
** ** ** **
* ** ** **
** ** ** *
** ******* *
** ******* **
** **
** *
** **
*** *
**** ***
*** ***
** ****
** ***
** ***
** **
** **
* **
** **
** **
** **
** **
** **
** **
** **
* **
* **
** *
** **
* **
* **
** *
** *
** **
** **
** **
** **
** ** **
** *** *** **
* **** **** **
* *** **** **
** ** ** *
** *
** *
* **
** **
** *
* **
** **
** **
** *
** **
** **
** **
** *** ** **
** ****** ***
*** ****** **
*** * *** ***
*** ***
*** ***
**** ****
******** *******
*** ********** ******** ***
** *** ************ ********** *** * ***
** * **** *********************** *** ** ***
** * ** **** ** ******* * *** ***** ***
**** * * ***** ********** * **** * * ** **
*** * * ** * ******************************* * *** * **
** ***** * *** ********** ** ** ********** *** ** ***
** * ***** ** * ***** ** ** ***** * * ** * **
*** *** ************ ** ****** ** * * ** ** ** * ** ***
** ******* * * ** ** ** **** * ** * ** * **** **
** *** *** ******* ****** * ** * *** ***** *** ** ***** ** **
** * * ***** ************************************ * **** * **
*** ** ** *********************************************** *** ***
*** ** ****************************************** **** ** ** **
**** ** ** ******************************************** ** * **
** ****** ** ******************************************** ** * ***
** ***** *********************************************** ** ****
* *** ****************************** **************** *********
** ** *************************************** * * * ***** *
** ** ********************************************** *** *
* ** ** *********************************** ******* ** *
** ** ***************************************** *** ** *
*** ** * ********************************************** ** **
****** ************************************************ ** ***
**** *********************************************** ********
** *********************************************** ****
*** ** ******************************************* **
*** ** ***** ****** * * * * * ******** *** ** ** ***
*** * * **** **** **** * ** ** * *** ** ***
**** * * ** **** * *** ******** * *** *****
***** ** ** ** ** *** ** *** *****
******* * * ** * ** ********
*************** * *******************
****************************** ***
*** ********* **
** * **
** * **
** * **
** * **
** * **
** ** **
** ****** * ** *********
*************************************
**********
:param img_path: the image file path
:type img_path: str
:param ascii_path: the output ascii text file path
:type ascii_path: str
:param pad: how many space been filled in between two pixels
:type pad: int | Below is the the instruction that describes the task:
### Input:
Convert an image to ascii art text.
Suppose we have an image like that:
.. image:: images/rabbit.png
:align: left
Put some codes::
>>> from weatherlab.math.img2waveform import img2ascii
>>> img2ascii(r"testdata\img2waveform\rabbit.png",
... r"testdata\img2waveform\asciiart.txt", pad=0)
Then you will see this in asciiart.txt::
******
*** *** ****
** ** *********
** ** *** ***
** * ** **
** ** ** **
** * *** *
* ** ** **
** * ** **
** * ** *
* ** ** *
** ** * **
** * ** **
* * ** **
* ** * **
** ** ** **
** * ** **
** * * **
** * ** *
** * ** *
* ** ** *
* ** * *
* ** ** *
* ** ** *
** ** ** **
** * ** **
** * * **
** * * **
** * * **
* * ** **
* * ** *
** * ** *
** * ** *
** ** ** **
* ** ** **
* ** ** **
** ** ** *
** ** ** **
* ** ** **
** ** ** *
** ******* *
** ******* **
** **
** *
** **
*** *
**** ***
*** ***
** ****
** ***
** ***
** **
** **
* **
** **
** **
** **
** **
** **
** **
** **
* **
* **
** *
** **
* **
* **
** *
** *
** **
** **
** **
** **
** ** **
** *** *** **
* **** **** **
* *** **** **
** ** ** *
** *
** *
* **
** **
** *
* **
** **
** **
** *
** **
** **
** **
** *** ** **
** ****** ***
*** ****** **
*** * *** ***
*** ***
*** ***
**** ****
******** *******
*** ********** ******** ***
** *** ************ ********** *** * ***
** * **** *********************** *** ** ***
** * ** **** ** ******* * *** ***** ***
**** * * ***** ********** * **** * * ** **
*** * * ** * ******************************* * *** * **
** ***** * *** ********** ** ** ********** *** ** ***
** * ***** ** * ***** ** ** ***** * * ** * **
*** *** ************ ** ****** ** * * ** ** ** * ** ***
** ******* * * ** ** ** **** * ** * ** * **** **
** *** *** ******* ****** * ** * *** ***** *** ** ***** ** **
** * * ***** ************************************ * **** * **
*** ** ** *********************************************** *** ***
*** ** ****************************************** **** ** ** **
**** ** ** ******************************************** ** * **
** ****** ** ******************************************** ** * ***
** ***** *********************************************** ** ****
* *** ****************************** **************** *********
** ** *************************************** * * * ***** *
** ** ********************************************** *** *
* ** ** *********************************** ******* ** *
** ** ***************************************** *** ** *
*** ** * ********************************************** ** **
****** ************************************************ ** ***
**** *********************************************** ********
** *********************************************** ****
*** ** ******************************************* **
*** ** ***** ****** * * * * * ******** *** ** ** ***
*** * * **** **** **** * ** ** * *** ** ***
**** * * ** **** * *** ******** * *** *****
***** ** ** ** ** *** ** *** *****
******* * * ** * ** ********
*************** * *******************
****************************** ***
*** ********* **
** * **
** * **
** * **
** * **
** * **
** ** **
** ****** * ** *********
*************************************
**********
:param img_path: the image file path
:type img_path: str
:param ascii_path: the output ascii text file path
:type ascii_path: str
:param pad: how many space been filled in between two pixels
:type pad: int
### Response:
def img2ascii(img_path, ascii_path, ascii_char="*", pad=0):
"""Convert an image to ascii art text.
Suppose we have an image like that:
.. image:: images/rabbit.png
:align: left
Put some codes::
>>> from weatherlab.math.img2waveform import img2ascii
>>> img2ascii(r"testdata\img2waveform\rabbit.png",
... r"testdata\img2waveform\asciiart.txt", pad=0)
Then you will see this in asciiart.txt::
******
*** *** ****
** ** *********
** ** *** ***
** * ** **
** ** ** **
** * *** *
* ** ** **
** * ** **
** * ** *
* ** ** *
** ** * **
** * ** **
* * ** **
* ** * **
** ** ** **
** * ** **
** * * **
** * ** *
** * ** *
* ** ** *
* ** * *
* ** ** *
* ** ** *
** ** ** **
** * ** **
** * * **
** * * **
** * * **
* * ** **
* * ** *
** * ** *
** * ** *
** ** ** **
* ** ** **
* ** ** **
** ** ** *
** ** ** **
* ** ** **
** ** ** *
** ******* *
** ******* **
** **
** *
** **
*** *
**** ***
*** ***
** ****
** ***
** ***
** **
** **
* **
** **
** **
** **
** **
** **
** **
** **
* **
* **
** *
** **
* **
* **
** *
** *
** **
** **
** **
** **
** ** **
** *** *** **
* **** **** **
* *** **** **
** ** ** *
** *
** *
* **
** **
** *
* **
** **
** **
** *
** **
** **
** **
** *** ** **
** ****** ***
*** ****** **
*** * *** ***
*** ***
*** ***
**** ****
******** *******
*** ********** ******** ***
** *** ************ ********** *** * ***
** * **** *********************** *** ** ***
** * ** **** ** ******* * *** ***** ***
**** * * ***** ********** * **** * * ** **
*** * * ** * ******************************* * *** * **
** ***** * *** ********** ** ** ********** *** ** ***
** * ***** ** * ***** ** ** ***** * * ** * **
*** *** ************ ** ****** ** * * ** ** ** * ** ***
** ******* * * ** ** ** **** * ** * ** * **** **
** *** *** ******* ****** * ** * *** ***** *** ** ***** ** **
** * * ***** ************************************ * **** * **
*** ** ** *********************************************** *** ***
*** ** ****************************************** **** ** ** **
**** ** ** ******************************************** ** * **
** ****** ** ******************************************** ** * ***
** ***** *********************************************** ** ****
* *** ****************************** **************** *********
** ** *************************************** * * * ***** *
** ** ********************************************** *** *
* ** ** *********************************** ******* ** *
** ** ***************************************** *** ** *
*** ** * ********************************************** ** **
****** ************************************************ ** ***
**** *********************************************** ********
** *********************************************** ****
*** ** ******************************************* **
*** ** ***** ****** * * * * * ******** *** ** ** ***
*** * * **** **** **** * ** ** * *** ** ***
**** * * ** **** * *** ******** * *** *****
***** ** ** ** ** *** ** *** *****
******* * * ** * ** ********
*************** * *******************
****************************** ***
*** ********* **
** * **
** * **
** * **
** * **
** * **
** ** **
** ****** * ** *********
*************************************
**********
:param img_path: the image file path
:type img_path: str
:param ascii_path: the output ascii text file path
:type ascii_path: str
:param pad: how many space been filled in between two pixels
:type pad: int
"""
if len(ascii_char) != 1:
raise Exception("ascii_char has to be single character.")
image = Image.open(img_path).convert("L")
matrix = np.array(image)
# you can customize the gray scale fix behavior to fit color image
matrix[np.where(matrix >= 128)] = 255
matrix[np.where(matrix < 128)] = 0
lines = list()
for vector in matrix:
line = list()
for i in vector:
line.append(" " * pad)
if i:
line.append(" ")
else:
line.append(ascii_char)
lines.append("".join(line))
with open(ascii_path, "w") as f:
f.write("\n".join(lines)) |
def verifies( self, hash, signature ):
"""Verify that signature is a valid signature of hash.
Return True if the signature is valid.
"""
# From X9.62 J.3.1.
G = self.generator
n = G.order()
r = signature.r
s = signature.s
if r < 1 or r > n-1: return False
if s < 1 or s > n-1: return False
c = numbertheory.inverse_mod( s, n )
u1 = ( hash * c ) % n
u2 = ( r * c ) % n
xy = u1 * G + u2 * self.point
v = xy.x() % n
return v == r | Verify that signature is a valid signature of hash.
Return True if the signature is valid. | Below is the the instruction that describes the task:
### Input:
Verify that signature is a valid signature of hash.
Return True if the signature is valid.
### Response:
def verifies( self, hash, signature ):
"""Verify that signature is a valid signature of hash.
Return True if the signature is valid.
"""
# From X9.62 J.3.1.
G = self.generator
n = G.order()
r = signature.r
s = signature.s
if r < 1 or r > n-1: return False
if s < 1 or s > n-1: return False
c = numbertheory.inverse_mod( s, n )
u1 = ( hash * c ) % n
u2 = ( r * c ) % n
xy = u1 * G + u2 * self.point
v = xy.x() % n
return v == r |
def _detect_start_end(true_values):
"""From ndarray of bool values, return intervals of True values.
Parameters
----------
true_values : ndarray (dtype='bool')
array with bool values
Returns
-------
ndarray (dtype='int')
N x 2 matrix with starting and ending times.
"""
neg = zeros((1), dtype='bool')
int_values = asarray(concatenate((neg, true_values[:-1], neg)),
dtype='int')
# must discard last value to avoid axis out of bounds
cross_threshold = diff(int_values)
event_starts = where(cross_threshold == 1)[0]
event_ends = where(cross_threshold == -1)[0]
if len(event_starts):
events = vstack((event_starts, event_ends)).T
else:
events = None
return events | From ndarray of bool values, return intervals of True values.
Parameters
----------
true_values : ndarray (dtype='bool')
array with bool values
Returns
-------
ndarray (dtype='int')
N x 2 matrix with starting and ending times. | Below is the the instruction that describes the task:
### Input:
From ndarray of bool values, return intervals of True values.
Parameters
----------
true_values : ndarray (dtype='bool')
array with bool values
Returns
-------
ndarray (dtype='int')
N x 2 matrix with starting and ending times.
### Response:
def _detect_start_end(true_values):
"""From ndarray of bool values, return intervals of True values.
Parameters
----------
true_values : ndarray (dtype='bool')
array with bool values
Returns
-------
ndarray (dtype='int')
N x 2 matrix with starting and ending times.
"""
neg = zeros((1), dtype='bool')
int_values = asarray(concatenate((neg, true_values[:-1], neg)),
dtype='int')
# must discard last value to avoid axis out of bounds
cross_threshold = diff(int_values)
event_starts = where(cross_threshold == 1)[0]
event_ends = where(cross_threshold == -1)[0]
if len(event_starts):
events = vstack((event_starts, event_ends)).T
else:
events = None
return events |
def isPythonFile(filename):
"""Return True if filename points to a Python file."""
if filename.endswith('.py'):
return True
# Avoid obvious Emacs backup files
if filename.endswith("~"):
return False
max_bytes = 128
try:
with open(filename, 'rb') as f:
text = f.read(max_bytes)
if not text:
return False
except IOError:
return False
first_line = text.splitlines()[0]
return PYTHON_SHEBANG_REGEX.match(first_line) | Return True if filename points to a Python file. | Below is the the instruction that describes the task:
### Input:
Return True if filename points to a Python file.
### Response:
def isPythonFile(filename):
"""Return True if filename points to a Python file."""
if filename.endswith('.py'):
return True
# Avoid obvious Emacs backup files
if filename.endswith("~"):
return False
max_bytes = 128
try:
with open(filename, 'rb') as f:
text = f.read(max_bytes)
if not text:
return False
except IOError:
return False
first_line = text.splitlines()[0]
return PYTHON_SHEBANG_REGEX.match(first_line) |
def reftrack_status_data(rt, role):
"""Return the data for the status
:param rt: the :class:`jukeboxcore.reftrack.Reftrack` holds the data
:type rt: :class:`jukeboxcore.reftrack.Reftrack`
:param role: item data role
:type role: QtCore.Qt.ItemDataRole
:returns: data for the status
:rtype: depending on role
:raises: None
"""
status = rt.status()
if role == QtCore.Qt.DisplayRole or role == QtCore.Qt.EditRole:
if status:
return status
else:
return "Not in scene!" | Return the data for the status
:param rt: the :class:`jukeboxcore.reftrack.Reftrack` holds the data
:type rt: :class:`jukeboxcore.reftrack.Reftrack`
:param role: item data role
:type role: QtCore.Qt.ItemDataRole
:returns: data for the status
:rtype: depending on role
:raises: None | Below is the the instruction that describes the task:
### Input:
Return the data for the status
:param rt: the :class:`jukeboxcore.reftrack.Reftrack` holds the data
:type rt: :class:`jukeboxcore.reftrack.Reftrack`
:param role: item data role
:type role: QtCore.Qt.ItemDataRole
:returns: data for the status
:rtype: depending on role
:raises: None
### Response:
def reftrack_status_data(rt, role):
"""Return the data for the status
:param rt: the :class:`jukeboxcore.reftrack.Reftrack` holds the data
:type rt: :class:`jukeboxcore.reftrack.Reftrack`
:param role: item data role
:type role: QtCore.Qt.ItemDataRole
:returns: data for the status
:rtype: depending on role
:raises: None
"""
status = rt.status()
if role == QtCore.Qt.DisplayRole or role == QtCore.Qt.EditRole:
if status:
return status
else:
return "Not in scene!" |
def needs_encode(obj):
'''
>>> from re import compile
>>> atomics = (True, 1, 1.0, '', None, compile(''), datetime.now(), b'')
>>> any(needs_encode(i) for i in atomics)
False
>>> needs_encode([1, 2, 3])
False
>>> needs_encode([])
False
>>> needs_encode([1, [2, 3]])
False
>>> needs_encode({})
False
>>> needs_encode({'1': {'2': 3}})
False
>>> needs_encode({'1': [2]})
False
>>> needs_encode(b'1')
False
Objects that don't round trip need encoding::
>>> needs_encode(tuple())
True
>>> needs_encode(set())
True
>>> needs_encode([1, [set()]])
True
>>> needs_encode({'1': {'2': set()}})
True
Mongo rejects dicts with non-string keys so they need encoding too::
>>> needs_encode({1: 2})
True
>>> needs_encode({'1': {None: True}})
True
'''
obtype = type(obj)
if obtype in atomic_types:
return False
if obtype is list:
return any(needs_encode(i) for i in obj)
if obtype is dict:
return any(type(k) not in valid_key_types or needs_encode(v)
for (k, v) in obj.items())
return True | >>> from re import compile
>>> atomics = (True, 1, 1.0, '', None, compile(''), datetime.now(), b'')
>>> any(needs_encode(i) for i in atomics)
False
>>> needs_encode([1, 2, 3])
False
>>> needs_encode([])
False
>>> needs_encode([1, [2, 3]])
False
>>> needs_encode({})
False
>>> needs_encode({'1': {'2': 3}})
False
>>> needs_encode({'1': [2]})
False
>>> needs_encode(b'1')
False
Objects that don't round trip need encoding::
>>> needs_encode(tuple())
True
>>> needs_encode(set())
True
>>> needs_encode([1, [set()]])
True
>>> needs_encode({'1': {'2': set()}})
True
Mongo rejects dicts with non-string keys so they need encoding too::
>>> needs_encode({1: 2})
True
>>> needs_encode({'1': {None: True}})
True | Below is the the instruction that describes the task:
### Input:
>>> from re import compile
>>> atomics = (True, 1, 1.0, '', None, compile(''), datetime.now(), b'')
>>> any(needs_encode(i) for i in atomics)
False
>>> needs_encode([1, 2, 3])
False
>>> needs_encode([])
False
>>> needs_encode([1, [2, 3]])
False
>>> needs_encode({})
False
>>> needs_encode({'1': {'2': 3}})
False
>>> needs_encode({'1': [2]})
False
>>> needs_encode(b'1')
False
Objects that don't round trip need encoding::
>>> needs_encode(tuple())
True
>>> needs_encode(set())
True
>>> needs_encode([1, [set()]])
True
>>> needs_encode({'1': {'2': set()}})
True
Mongo rejects dicts with non-string keys so they need encoding too::
>>> needs_encode({1: 2})
True
>>> needs_encode({'1': {None: True}})
True
### Response:
def needs_encode(obj):
'''
>>> from re import compile
>>> atomics = (True, 1, 1.0, '', None, compile(''), datetime.now(), b'')
>>> any(needs_encode(i) for i in atomics)
False
>>> needs_encode([1, 2, 3])
False
>>> needs_encode([])
False
>>> needs_encode([1, [2, 3]])
False
>>> needs_encode({})
False
>>> needs_encode({'1': {'2': 3}})
False
>>> needs_encode({'1': [2]})
False
>>> needs_encode(b'1')
False
Objects that don't round trip need encoding::
>>> needs_encode(tuple())
True
>>> needs_encode(set())
True
>>> needs_encode([1, [set()]])
True
>>> needs_encode({'1': {'2': set()}})
True
Mongo rejects dicts with non-string keys so they need encoding too::
>>> needs_encode({1: 2})
True
>>> needs_encode({'1': {None: True}})
True
'''
obtype = type(obj)
if obtype in atomic_types:
return False
if obtype is list:
return any(needs_encode(i) for i in obj)
if obtype is dict:
return any(type(k) not in valid_key_types or needs_encode(v)
for (k, v) in obj.items())
return True |
Subsets and Splits