code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def _parse_description(html_chunk):
"""
Parse description of the book.
Args:
html_chunk (obj): HTMLElement containing slice of the page with details.
Returns:
str/None: Description as string or None if not found.
"""
description_tag = html_chunk.match(
["div", {"class": "kniha_detail_text"}],
"p"
)
if not description_tag:
return None
description = get_first_content(description_tag)
description = description.replace("<br />", "\n")
description = description.replace("<br/>", "\n")
return dhtmlparser.removeTags(description).strip() | Parse description of the book.
Args:
html_chunk (obj): HTMLElement containing slice of the page with details.
Returns:
str/None: Description as string or None if not found. | Below is the the instruction that describes the task:
### Input:
Parse description of the book.
Args:
html_chunk (obj): HTMLElement containing slice of the page with details.
Returns:
str/None: Description as string or None if not found.
### Response:
def _parse_description(html_chunk):
"""
Parse description of the book.
Args:
html_chunk (obj): HTMLElement containing slice of the page with details.
Returns:
str/None: Description as string or None if not found.
"""
description_tag = html_chunk.match(
["div", {"class": "kniha_detail_text"}],
"p"
)
if not description_tag:
return None
description = get_first_content(description_tag)
description = description.replace("<br />", "\n")
description = description.replace("<br/>", "\n")
return dhtmlparser.removeTags(description).strip() |
def context_value(name):
"""
Returns an effect that drops the current value, and replaces it with
the value from the context with the given name.
"""
def context_value(_value, context, **_params):
return defer.succeed(context[name])
return context_value | Returns an effect that drops the current value, and replaces it with
the value from the context with the given name. | Below is the the instruction that describes the task:
### Input:
Returns an effect that drops the current value, and replaces it with
the value from the context with the given name.
### Response:
def context_value(name):
"""
Returns an effect that drops the current value, and replaces it with
the value from the context with the given name.
"""
def context_value(_value, context, **_params):
return defer.succeed(context[name])
return context_value |
def any_slug_field(field, **kwargs):
"""
Return random value for SlugField
>>> result = any_field(models.SlugField())
>>> type(result)
<type 'str'>
>>> from django.core.validators import slug_re
>>> re.match(slug_re, result) is not None
True
"""
letters = ascii_letters + digits + '_-'
return xunit.any_string(letters = letters, max_length = field.max_length) | Return random value for SlugField
>>> result = any_field(models.SlugField())
>>> type(result)
<type 'str'>
>>> from django.core.validators import slug_re
>>> re.match(slug_re, result) is not None
True | Below is the the instruction that describes the task:
### Input:
Return random value for SlugField
>>> result = any_field(models.SlugField())
>>> type(result)
<type 'str'>
>>> from django.core.validators import slug_re
>>> re.match(slug_re, result) is not None
True
### Response:
def any_slug_field(field, **kwargs):
"""
Return random value for SlugField
>>> result = any_field(models.SlugField())
>>> type(result)
<type 'str'>
>>> from django.core.validators import slug_re
>>> re.match(slug_re, result) is not None
True
"""
letters = ascii_letters + digits + '_-'
return xunit.any_string(letters = letters, max_length = field.max_length) |
def close(self):
"""Cleanly shutdown the connection to RabbitMQ
:raises: sprockets.mixins.amqp.ConnectionStateError
"""
if not self.closable:
LOGGER.warning('Closed called while %s', self.state_description)
raise ConnectionStateError(self.state_description)
self.state = self.STATE_CLOSING
LOGGER.info('Closing RabbitMQ connection')
self.connection.close() | Cleanly shutdown the connection to RabbitMQ
:raises: sprockets.mixins.amqp.ConnectionStateError | Below is the the instruction that describes the task:
### Input:
Cleanly shutdown the connection to RabbitMQ
:raises: sprockets.mixins.amqp.ConnectionStateError
### Response:
def close(self):
"""Cleanly shutdown the connection to RabbitMQ
:raises: sprockets.mixins.amqp.ConnectionStateError
"""
if not self.closable:
LOGGER.warning('Closed called while %s', self.state_description)
raise ConnectionStateError(self.state_description)
self.state = self.STATE_CLOSING
LOGGER.info('Closing RabbitMQ connection')
self.connection.close() |
def _backup_pb_tqdm(self, dirs):
"""Create a backup with a tqdm progress bar."""
with ZipFile(self.zip_filename, 'w') as backup_zip:
for path in tqdm(dirs, desc='Writing Zip Files', total=len(dirs)):
backup_zip.write(path, path[len(self.source):len(path)]) | Create a backup with a tqdm progress bar. | Below is the the instruction that describes the task:
### Input:
Create a backup with a tqdm progress bar.
### Response:
def _backup_pb_tqdm(self, dirs):
"""Create a backup with a tqdm progress bar."""
with ZipFile(self.zip_filename, 'w') as backup_zip:
for path in tqdm(dirs, desc='Writing Zip Files', total=len(dirs)):
backup_zip.write(path, path[len(self.source):len(path)]) |
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(NetstatCollector, self).get_default_config()
config.update({
'path': 'netstat',
})
return config | Returns the default collector settings | Below is the the instruction that describes the task:
### Input:
Returns the default collector settings
### Response:
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(NetstatCollector, self).get_default_config()
config.update({
'path': 'netstat',
})
return config |
def discover(scope, loglevel, capture):
"Discover systems using WS-Discovery"
if loglevel:
level = getattr(logging, loglevel, None)
if not level:
print("Invalid log level '%s'" % loglevel)
return
logger.setLevel(level)
run(scope=scope, capture=capture) | Discover systems using WS-Discovery | Below is the the instruction that describes the task:
### Input:
Discover systems using WS-Discovery
### Response:
def discover(scope, loglevel, capture):
"Discover systems using WS-Discovery"
if loglevel:
level = getattr(logging, loglevel, None)
if not level:
print("Invalid log level '%s'" % loglevel)
return
logger.setLevel(level)
run(scope=scope, capture=capture) |
def _ReadStreamDataTypeDefinition(
self, definitions_registry, definition_values, definition_name,
is_member=False):
"""Reads a stream data type definition.
Args:
definitions_registry (DataTypeDefinitionsRegistry): data type definitions
registry.
definition_values (dict[str, object]): definition values.
definition_name (str): name of the definition.
is_member (Optional[bool]): True if the data type definition is a member
data type definition.
Returns:
StreamDefinition: stream data type definition.
Raises:
DefinitionReaderError: if the definitions values are missing or if
the format is incorrect.
"""
if is_member:
supported_definition_values = (
self._SUPPORTED_DEFINITION_VALUES_ELEMENTS_MEMBER_DATA_TYPE)
else:
supported_definition_values = (
self._SUPPORTED_DEFINITION_VALUES_ELEMENTS_DATA_TYPE)
return self._ReadElementSequenceDataTypeDefinition(
definitions_registry, definition_values, data_types.StreamDefinition,
definition_name, supported_definition_values) | Reads a stream data type definition.
Args:
definitions_registry (DataTypeDefinitionsRegistry): data type definitions
registry.
definition_values (dict[str, object]): definition values.
definition_name (str): name of the definition.
is_member (Optional[bool]): True if the data type definition is a member
data type definition.
Returns:
StreamDefinition: stream data type definition.
Raises:
DefinitionReaderError: if the definitions values are missing or if
the format is incorrect. | Below is the the instruction that describes the task:
### Input:
Reads a stream data type definition.
Args:
definitions_registry (DataTypeDefinitionsRegistry): data type definitions
registry.
definition_values (dict[str, object]): definition values.
definition_name (str): name of the definition.
is_member (Optional[bool]): True if the data type definition is a member
data type definition.
Returns:
StreamDefinition: stream data type definition.
Raises:
DefinitionReaderError: if the definitions values are missing or if
the format is incorrect.
### Response:
def _ReadStreamDataTypeDefinition(
self, definitions_registry, definition_values, definition_name,
is_member=False):
"""Reads a stream data type definition.
Args:
definitions_registry (DataTypeDefinitionsRegistry): data type definitions
registry.
definition_values (dict[str, object]): definition values.
definition_name (str): name of the definition.
is_member (Optional[bool]): True if the data type definition is a member
data type definition.
Returns:
StreamDefinition: stream data type definition.
Raises:
DefinitionReaderError: if the definitions values are missing or if
the format is incorrect.
"""
if is_member:
supported_definition_values = (
self._SUPPORTED_DEFINITION_VALUES_ELEMENTS_MEMBER_DATA_TYPE)
else:
supported_definition_values = (
self._SUPPORTED_DEFINITION_VALUES_ELEMENTS_DATA_TYPE)
return self._ReadElementSequenceDataTypeDefinition(
definitions_registry, definition_values, data_types.StreamDefinition,
definition_name, supported_definition_values) |
def setParent(self,parent):
"""
Sets the parent of this bone for all entities.
Note that this method must be called before many other methods to ensure internal state has been initialized.
This method also registers this bone as a child of its parent.
"""
self.parent = parent
self.parent.child_bones[self.name]=self | Sets the parent of this bone for all entities.
Note that this method must be called before many other methods to ensure internal state has been initialized.
This method also registers this bone as a child of its parent. | Below is the the instruction that describes the task:
### Input:
Sets the parent of this bone for all entities.
Note that this method must be called before many other methods to ensure internal state has been initialized.
This method also registers this bone as a child of its parent.
### Response:
def setParent(self,parent):
"""
Sets the parent of this bone for all entities.
Note that this method must be called before many other methods to ensure internal state has been initialized.
This method also registers this bone as a child of its parent.
"""
self.parent = parent
self.parent.child_bones[self.name]=self |
async def open(self, wait_for_completion=True):
"""Open window.
Parameters:
* wait_for_completion: If set, function will return
after device has reached target position.
"""
await self.set_position(
position=Position(position_percent=0),
wait_for_completion=wait_for_completion) | Open window.
Parameters:
* wait_for_completion: If set, function will return
after device has reached target position. | Below is the the instruction that describes the task:
### Input:
Open window.
Parameters:
* wait_for_completion: If set, function will return
after device has reached target position.
### Response:
async def open(self, wait_for_completion=True):
"""Open window.
Parameters:
* wait_for_completion: If set, function will return
after device has reached target position.
"""
await self.set_position(
position=Position(position_percent=0),
wait_for_completion=wait_for_completion) |
def add(self, cell, overwrite_duplicate=False):
"""
Add one or more cells to the library.
Parameters
----------
cell : ``Cell`` of list of ``Cell``
Cells to be included in the library.
overwrite_duplicate : bool
If True an existing cell with the same name in the library
will be overwritten.
Returns
-------
out : ``GdsLibrary``
This object.
"""
if isinstance(cell, Cell):
if (not overwrite_duplicate and cell.name in self.cell_dict and
self.cell_dict[cell.name] is not cell):
raise ValueError("[GDSPY] cell named {0} already present in "
"library.".format(cell.name))
self.cell_dict[cell.name] = cell
else:
for c in cell:
if (not overwrite_duplicate and c.name in self.cell_dict and
self.cell_dict[c.name] is not c):
raise ValueError("[GDSPY] cell named {0} already present "
"in library.".format(c.name))
self.cell_dict[c.name] = c
return self | Add one or more cells to the library.
Parameters
----------
cell : ``Cell`` of list of ``Cell``
Cells to be included in the library.
overwrite_duplicate : bool
If True an existing cell with the same name in the library
will be overwritten.
Returns
-------
out : ``GdsLibrary``
This object. | Below is the the instruction that describes the task:
### Input:
Add one or more cells to the library.
Parameters
----------
cell : ``Cell`` of list of ``Cell``
Cells to be included in the library.
overwrite_duplicate : bool
If True an existing cell with the same name in the library
will be overwritten.
Returns
-------
out : ``GdsLibrary``
This object.
### Response:
def add(self, cell, overwrite_duplicate=False):
"""
Add one or more cells to the library.
Parameters
----------
cell : ``Cell`` of list of ``Cell``
Cells to be included in the library.
overwrite_duplicate : bool
If True an existing cell with the same name in the library
will be overwritten.
Returns
-------
out : ``GdsLibrary``
This object.
"""
if isinstance(cell, Cell):
if (not overwrite_duplicate and cell.name in self.cell_dict and
self.cell_dict[cell.name] is not cell):
raise ValueError("[GDSPY] cell named {0} already present in "
"library.".format(cell.name))
self.cell_dict[cell.name] = cell
else:
for c in cell:
if (not overwrite_duplicate and c.name in self.cell_dict and
self.cell_dict[c.name] is not c):
raise ValueError("[GDSPY] cell named {0} already present "
"in library.".format(c.name))
self.cell_dict[c.name] = c
return self |
def GetEntries(self, parser_mediator, match=None, **unused_kwargs):
"""Extracts relevant Airport entries.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
match (Optional[dict[str: object]]): keys extracted from PLIST_KEYS.
"""
if 'RememberedNetworks' not in match:
return
for wifi in match['RememberedNetworks']:
ssid = wifi.get('SSIDString', 'UNKNOWN_SSID')
security_type = wifi.get('SecurityType', 'UNKNOWN_SECURITY_TYPE')
event_data = plist_event.PlistTimeEventData()
event_data.desc = (
'[WiFi] Connected to network: <{0:s}> using security {1:s}').format(
ssid, security_type)
event_data.key = 'item'
event_data.root = '/RememberedNetworks'
datetime_value = wifi.get('LastConnected', None)
if datetime_value:
event = time_events.PythonDatetimeEvent(
datetime_value, definitions.TIME_DESCRIPTION_WRITTEN)
else:
date_time = dfdatetime_semantic_time.SemanticTime('Not set')
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_NOT_A_TIME)
parser_mediator.ProduceEventWithEventData(event, event_data) | Extracts relevant Airport entries.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
match (Optional[dict[str: object]]): keys extracted from PLIST_KEYS. | Below is the the instruction that describes the task:
### Input:
Extracts relevant Airport entries.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
match (Optional[dict[str: object]]): keys extracted from PLIST_KEYS.
### Response:
def GetEntries(self, parser_mediator, match=None, **unused_kwargs):
"""Extracts relevant Airport entries.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
match (Optional[dict[str: object]]): keys extracted from PLIST_KEYS.
"""
if 'RememberedNetworks' not in match:
return
for wifi in match['RememberedNetworks']:
ssid = wifi.get('SSIDString', 'UNKNOWN_SSID')
security_type = wifi.get('SecurityType', 'UNKNOWN_SECURITY_TYPE')
event_data = plist_event.PlistTimeEventData()
event_data.desc = (
'[WiFi] Connected to network: <{0:s}> using security {1:s}').format(
ssid, security_type)
event_data.key = 'item'
event_data.root = '/RememberedNetworks'
datetime_value = wifi.get('LastConnected', None)
if datetime_value:
event = time_events.PythonDatetimeEvent(
datetime_value, definitions.TIME_DESCRIPTION_WRITTEN)
else:
date_time = dfdatetime_semantic_time.SemanticTime('Not set')
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_NOT_A_TIME)
parser_mediator.ProduceEventWithEventData(event, event_data) |
def in_collision_other(self, other_manager,
return_names=False, return_data=False):
"""
Check if any object from this manager collides with any object
from another manager.
Parameters
-------------------
other_manager : CollisionManager
Another collision manager object
return_names : bool
If true, a set is returned containing the names
of all pairs of objects in collision.
return_data : bool
If true, a list of ContactData is returned as well
Returns
-------------
is_collision : bool
True if a collision occurred between any pair of objects
and False otherwise
names : set of 2-tup
The set of pairwise collisions. Each tuple
contains two names (first from this manager,
second from the other_manager) indicating
that the two corresponding objects are in collision.
contacts : list of ContactData
All contacts detected
"""
cdata = fcl.CollisionData()
if return_names or return_data:
cdata = fcl.CollisionData(
request=fcl.CollisionRequest(
num_max_contacts=100000,
enable_contact=True))
self._manager.collide(other_manager._manager,
cdata,
fcl.defaultCollisionCallback)
result = cdata.result.is_collision
objs_in_collision = set()
contact_data = []
if return_names or return_data:
for contact in cdata.result.contacts:
reverse = False
names = (self._extract_name(contact.o1),
other_manager._extract_name(contact.o2))
if names[0] is None:
names = (self._extract_name(contact.o2),
other_manager._extract_name(contact.o1))
reverse = True
if return_names:
objs_in_collision.add(names)
if return_data:
if reverse:
names = reversed(names)
contact_data.append(ContactData(names, contact))
if return_names and return_data:
return result, objs_in_collision, contact_data
elif return_names:
return result, objs_in_collision
elif return_data:
return result, contact_data
else:
return result | Check if any object from this manager collides with any object
from another manager.
Parameters
-------------------
other_manager : CollisionManager
Another collision manager object
return_names : bool
If true, a set is returned containing the names
of all pairs of objects in collision.
return_data : bool
If true, a list of ContactData is returned as well
Returns
-------------
is_collision : bool
True if a collision occurred between any pair of objects
and False otherwise
names : set of 2-tup
The set of pairwise collisions. Each tuple
contains two names (first from this manager,
second from the other_manager) indicating
that the two corresponding objects are in collision.
contacts : list of ContactData
All contacts detected | Below is the the instruction that describes the task:
### Input:
Check if any object from this manager collides with any object
from another manager.
Parameters
-------------------
other_manager : CollisionManager
Another collision manager object
return_names : bool
If true, a set is returned containing the names
of all pairs of objects in collision.
return_data : bool
If true, a list of ContactData is returned as well
Returns
-------------
is_collision : bool
True if a collision occurred between any pair of objects
and False otherwise
names : set of 2-tup
The set of pairwise collisions. Each tuple
contains two names (first from this manager,
second from the other_manager) indicating
that the two corresponding objects are in collision.
contacts : list of ContactData
All contacts detected
### Response:
def in_collision_other(self, other_manager,
return_names=False, return_data=False):
"""
Check if any object from this manager collides with any object
from another manager.
Parameters
-------------------
other_manager : CollisionManager
Another collision manager object
return_names : bool
If true, a set is returned containing the names
of all pairs of objects in collision.
return_data : bool
If true, a list of ContactData is returned as well
Returns
-------------
is_collision : bool
True if a collision occurred between any pair of objects
and False otherwise
names : set of 2-tup
The set of pairwise collisions. Each tuple
contains two names (first from this manager,
second from the other_manager) indicating
that the two corresponding objects are in collision.
contacts : list of ContactData
All contacts detected
"""
cdata = fcl.CollisionData()
if return_names or return_data:
cdata = fcl.CollisionData(
request=fcl.CollisionRequest(
num_max_contacts=100000,
enable_contact=True))
self._manager.collide(other_manager._manager,
cdata,
fcl.defaultCollisionCallback)
result = cdata.result.is_collision
objs_in_collision = set()
contact_data = []
if return_names or return_data:
for contact in cdata.result.contacts:
reverse = False
names = (self._extract_name(contact.o1),
other_manager._extract_name(contact.o2))
if names[0] is None:
names = (self._extract_name(contact.o2),
other_manager._extract_name(contact.o1))
reverse = True
if return_names:
objs_in_collision.add(names)
if return_data:
if reverse:
names = reversed(names)
contact_data.append(ContactData(names, contact))
if return_names and return_data:
return result, objs_in_collision, contact_data
elif return_names:
return result, objs_in_collision
elif return_data:
return result, contact_data
else:
return result |
async def close_room(self, room, namespace=None):
"""Close a room.
The only difference with the :func:`socketio.Server.close_room` method
is that when the ``namespace`` argument is not given the namespace
associated with the class is used.
Note: this method is a coroutine.
"""
return await self.server.close_room(
room, namespace=namespace or self.namespace) | Close a room.
The only difference with the :func:`socketio.Server.close_room` method
is that when the ``namespace`` argument is not given the namespace
associated with the class is used.
Note: this method is a coroutine. | Below is the the instruction that describes the task:
### Input:
Close a room.
The only difference with the :func:`socketio.Server.close_room` method
is that when the ``namespace`` argument is not given the namespace
associated with the class is used.
Note: this method is a coroutine.
### Response:
async def close_room(self, room, namespace=None):
"""Close a room.
The only difference with the :func:`socketio.Server.close_room` method
is that when the ``namespace`` argument is not given the namespace
associated with the class is used.
Note: this method is a coroutine.
"""
return await self.server.close_room(
room, namespace=namespace or self.namespace) |
def detect(self, volume_system, vstype='detect'):
"""Finds and mounts all volumes based on mmls."""
try:
cmd = ['mmls']
if volume_system.parent.offset:
cmd.extend(['-o', str(volume_system.parent.offset // volume_system.disk.block_size)])
if vstype in ('dos', 'mac', 'bsd', 'sun', 'gpt'):
cmd.extend(['-t', vstype])
cmd.append(volume_system.parent.get_raw_path())
output = _util.check_output_(cmd, stderr=subprocess.STDOUT)
volume_system.volume_source = 'multi'
except Exception as e:
# some bug in sleuthkit makes detection sometimes difficult, so we hack around it:
if hasattr(e, 'output') and "(GPT or DOS at 0)" in e.output.decode() and vstype != 'gpt':
volume_system.vstype = 'gpt'
# noinspection PyBroadException
try:
logger.warning("Error in retrieving volume info: mmls couldn't decide between GPT and DOS, "
"choosing GPT for you. Use --vstype=dos to force DOS.", exc_info=True)
cmd = ['mmls', '-t', 'gpt', self.parent.get_raw_path()]
output = _util.check_output_(cmd, stderr=subprocess.STDOUT)
volume_system.volume_source = 'multi'
except Exception as e:
logger.exception("Failed executing mmls command")
raise SubsystemError(e)
else:
logger.exception("Failed executing mmls command")
raise SubsystemError(e)
output = output.split("Description", 1)[-1]
for line in output.splitlines():
if not line:
continue
# noinspection PyBroadException
try:
values = line.split(None, 5)
# sometimes there are only 5 elements available
description = ''
index, slot, start, end, length = values[0:5]
if len(values) > 5:
description = values[5]
volume = volume_system._make_subvolume(
index=self._format_index(volume_system, int(index[:-1])),
offset=int(start) * volume_system.disk.block_size,
size=int(length) * volume_system.disk.block_size
)
volume.info['fsdescription'] = description
except Exception:
logger.exception("Error while parsing mmls output")
continue
if slot.lower() == 'meta':
volume.flag = 'meta'
logger.info("Found meta volume: block offset: {0}, length: {1}".format(start, length))
elif slot.lower().startswith('-----'):
volume.flag = 'unalloc'
logger.info("Found unallocated space: block offset: {0}, length: {1}".format(start, length))
else:
volume.flag = 'alloc'
if ":" in slot:
volume.slot = _util.determine_slot(*slot.split(':'))
else:
volume.slot = _util.determine_slot(-1, slot)
volume_system._assign_disktype_data(volume)
logger.info("Found allocated {2}: block offset: {0}, length: {1} ".format(start, length,
volume.info['fsdescription']))
yield volume | Finds and mounts all volumes based on mmls. | Below is the the instruction that describes the task:
### Input:
Finds and mounts all volumes based on mmls.
### Response:
def detect(self, volume_system, vstype='detect'):
"""Finds and mounts all volumes based on mmls."""
try:
cmd = ['mmls']
if volume_system.parent.offset:
cmd.extend(['-o', str(volume_system.parent.offset // volume_system.disk.block_size)])
if vstype in ('dos', 'mac', 'bsd', 'sun', 'gpt'):
cmd.extend(['-t', vstype])
cmd.append(volume_system.parent.get_raw_path())
output = _util.check_output_(cmd, stderr=subprocess.STDOUT)
volume_system.volume_source = 'multi'
except Exception as e:
# some bug in sleuthkit makes detection sometimes difficult, so we hack around it:
if hasattr(e, 'output') and "(GPT or DOS at 0)" in e.output.decode() and vstype != 'gpt':
volume_system.vstype = 'gpt'
# noinspection PyBroadException
try:
logger.warning("Error in retrieving volume info: mmls couldn't decide between GPT and DOS, "
"choosing GPT for you. Use --vstype=dos to force DOS.", exc_info=True)
cmd = ['mmls', '-t', 'gpt', self.parent.get_raw_path()]
output = _util.check_output_(cmd, stderr=subprocess.STDOUT)
volume_system.volume_source = 'multi'
except Exception as e:
logger.exception("Failed executing mmls command")
raise SubsystemError(e)
else:
logger.exception("Failed executing mmls command")
raise SubsystemError(e)
output = output.split("Description", 1)[-1]
for line in output.splitlines():
if not line:
continue
# noinspection PyBroadException
try:
values = line.split(None, 5)
# sometimes there are only 5 elements available
description = ''
index, slot, start, end, length = values[0:5]
if len(values) > 5:
description = values[5]
volume = volume_system._make_subvolume(
index=self._format_index(volume_system, int(index[:-1])),
offset=int(start) * volume_system.disk.block_size,
size=int(length) * volume_system.disk.block_size
)
volume.info['fsdescription'] = description
except Exception:
logger.exception("Error while parsing mmls output")
continue
if slot.lower() == 'meta':
volume.flag = 'meta'
logger.info("Found meta volume: block offset: {0}, length: {1}".format(start, length))
elif slot.lower().startswith('-----'):
volume.flag = 'unalloc'
logger.info("Found unallocated space: block offset: {0}, length: {1}".format(start, length))
else:
volume.flag = 'alloc'
if ":" in slot:
volume.slot = _util.determine_slot(*slot.split(':'))
else:
volume.slot = _util.determine_slot(-1, slot)
volume_system._assign_disktype_data(volume)
logger.info("Found allocated {2}: block offset: {0}, length: {1} ".format(start, length,
volume.info['fsdescription']))
yield volume |
def past_active(self):
"""
Weak verbs
I
>>> verb = WeakOldNorseVerb()
>>> verb.set_canonic_forms(["kalla", "kallaði", "kallaðinn"])
>>> verb.past_active()
['kallaða', 'kallaðir', 'kallaði', 'kölluðum', 'kölluðuð', 'kölluðu']
II
>>> verb = WeakOldNorseVerb()
>>> verb.set_canonic_forms(["mæla", "mælti", "mæltr"])
>>> verb.past_active()
['mælta', 'mæltir', 'mælti', 'mæltum', 'mæltuð', 'mæltu']
III
>>> verb = WeakOldNorseVerb()
>>> verb.set_canonic_forms(["telja", "taldi", "talinn"])
>>> verb.past_active()
['talda', 'taldir', 'taldi', 'töldum', 'tölduð', 'töldu']
IV
>>> verb = WeakOldNorseVerb()
>>> verb.set_canonic_forms(["vaka", "vakti", "vakat"])
>>> verb.past_active()
['vakta', 'vaktir', 'vakti', 'vöktum', 'vöktuð', 'vöktu']
:return:
"""
forms = []
stem = self.sfg3et[:-1]
forms.append(stem+"a")
forms.append(self.sfg3et+"r")
forms.append(self.sfg3et)
forms.append(apply_u_umlaut(stem)+"um")
forms.append(apply_u_umlaut(stem)+"uð")
forms.append(apply_u_umlaut(stem)+"u")
return forms | Weak verbs
I
>>> verb = WeakOldNorseVerb()
>>> verb.set_canonic_forms(["kalla", "kallaði", "kallaðinn"])
>>> verb.past_active()
['kallaða', 'kallaðir', 'kallaði', 'kölluðum', 'kölluðuð', 'kölluðu']
II
>>> verb = WeakOldNorseVerb()
>>> verb.set_canonic_forms(["mæla", "mælti", "mæltr"])
>>> verb.past_active()
['mælta', 'mæltir', 'mælti', 'mæltum', 'mæltuð', 'mæltu']
III
>>> verb = WeakOldNorseVerb()
>>> verb.set_canonic_forms(["telja", "taldi", "talinn"])
>>> verb.past_active()
['talda', 'taldir', 'taldi', 'töldum', 'tölduð', 'töldu']
IV
>>> verb = WeakOldNorseVerb()
>>> verb.set_canonic_forms(["vaka", "vakti", "vakat"])
>>> verb.past_active()
['vakta', 'vaktir', 'vakti', 'vöktum', 'vöktuð', 'vöktu']
:return: | Below is the the instruction that describes the task:
### Input:
Weak verbs
I
>>> verb = WeakOldNorseVerb()
>>> verb.set_canonic_forms(["kalla", "kallaði", "kallaðinn"])
>>> verb.past_active()
['kallaða', 'kallaðir', 'kallaði', 'kölluðum', 'kölluðuð', 'kölluðu']
II
>>> verb = WeakOldNorseVerb()
>>> verb.set_canonic_forms(["mæla", "mælti", "mæltr"])
>>> verb.past_active()
['mælta', 'mæltir', 'mælti', 'mæltum', 'mæltuð', 'mæltu']
III
>>> verb = WeakOldNorseVerb()
>>> verb.set_canonic_forms(["telja", "taldi", "talinn"])
>>> verb.past_active()
['talda', 'taldir', 'taldi', 'töldum', 'tölduð', 'töldu']
IV
>>> verb = WeakOldNorseVerb()
>>> verb.set_canonic_forms(["vaka", "vakti", "vakat"])
>>> verb.past_active()
['vakta', 'vaktir', 'vakti', 'vöktum', 'vöktuð', 'vöktu']
:return:
### Response:
def past_active(self):
"""
Weak verbs
I
>>> verb = WeakOldNorseVerb()
>>> verb.set_canonic_forms(["kalla", "kallaði", "kallaðinn"])
>>> verb.past_active()
['kallaða', 'kallaðir', 'kallaði', 'kölluðum', 'kölluðuð', 'kölluðu']
II
>>> verb = WeakOldNorseVerb()
>>> verb.set_canonic_forms(["mæla", "mælti", "mæltr"])
>>> verb.past_active()
['mælta', 'mæltir', 'mælti', 'mæltum', 'mæltuð', 'mæltu']
III
>>> verb = WeakOldNorseVerb()
>>> verb.set_canonic_forms(["telja", "taldi", "talinn"])
>>> verb.past_active()
['talda', 'taldir', 'taldi', 'töldum', 'tölduð', 'töldu']
IV
>>> verb = WeakOldNorseVerb()
>>> verb.set_canonic_forms(["vaka", "vakti", "vakat"])
>>> verb.past_active()
['vakta', 'vaktir', 'vakti', 'vöktum', 'vöktuð', 'vöktu']
:return:
"""
forms = []
stem = self.sfg3et[:-1]
forms.append(stem+"a")
forms.append(self.sfg3et+"r")
forms.append(self.sfg3et)
forms.append(apply_u_umlaut(stem)+"um")
forms.append(apply_u_umlaut(stem)+"uð")
forms.append(apply_u_umlaut(stem)+"u")
return forms |
def connect(self):
"""
Starts up an authentication session for the client using cookie
authentication if necessary.
"""
if self.r_session:
self.session_logout()
if self.admin_party:
self._use_iam = False
self.r_session = ClientSession(
timeout=self._timeout
)
elif self._use_basic_auth:
self._use_iam = False
self.r_session = BasicSession(
self._user,
self._auth_token,
self.server_url,
timeout=self._timeout
)
elif self._use_iam:
self.r_session = IAMSession(
self._auth_token,
self.server_url,
auto_renew=self._auto_renew,
client_id=self._iam_client_id,
client_secret=self._iam_client_secret,
timeout=self._timeout
)
else:
self.r_session = CookieSession(
self._user,
self._auth_token,
self.server_url,
auto_renew=self._auto_renew,
timeout=self._timeout
)
# If a Transport Adapter was supplied add it to the session
if self.adapter is not None:
self.r_session.mount(self.server_url, self.adapter)
if self._client_user_header is not None:
self.r_session.headers.update(self._client_user_header)
self.session_login()
# Utilize an event hook to append to the response message
# using :func:`~cloudant.common_util.append_response_error_content`
self.r_session.hooks['response'].append(append_response_error_content) | Starts up an authentication session for the client using cookie
authentication if necessary. | Below is the the instruction that describes the task:
### Input:
Starts up an authentication session for the client using cookie
authentication if necessary.
### Response:
def connect(self):
"""
Starts up an authentication session for the client using cookie
authentication if necessary.
"""
if self.r_session:
self.session_logout()
if self.admin_party:
self._use_iam = False
self.r_session = ClientSession(
timeout=self._timeout
)
elif self._use_basic_auth:
self._use_iam = False
self.r_session = BasicSession(
self._user,
self._auth_token,
self.server_url,
timeout=self._timeout
)
elif self._use_iam:
self.r_session = IAMSession(
self._auth_token,
self.server_url,
auto_renew=self._auto_renew,
client_id=self._iam_client_id,
client_secret=self._iam_client_secret,
timeout=self._timeout
)
else:
self.r_session = CookieSession(
self._user,
self._auth_token,
self.server_url,
auto_renew=self._auto_renew,
timeout=self._timeout
)
# If a Transport Adapter was supplied add it to the session
if self.adapter is not None:
self.r_session.mount(self.server_url, self.adapter)
if self._client_user_header is not None:
self.r_session.headers.update(self._client_user_header)
self.session_login()
# Utilize an event hook to append to the response message
# using :func:`~cloudant.common_util.append_response_error_content`
self.r_session.hooks['response'].append(append_response_error_content) |
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'limit') and self.limit is not None:
_dict['limit'] = self.limit
if hasattr(self, 'model') and self.model is not None:
_dict['model'] = self.model
return _dict | Return a json dictionary representing this model. | Below is the the instruction that describes the task:
### Input:
Return a json dictionary representing this model.
### Response:
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'limit') and self.limit is not None:
_dict['limit'] = self.limit
if hasattr(self, 'model') and self.model is not None:
_dict['model'] = self.model
return _dict |
def add_member(self, member):
"""
Add a single member to the scope.
You may only edit the list of members if the pykechain credentials allow this.
:param member: single username to be added to the scope list of members
:type member: basestring
:raises APIError: when unable to update the scope member
"""
select_action = 'add_member'
self._update_scope_project_team(select_action=select_action, user=member, user_type='member') | Add a single member to the scope.
You may only edit the list of members if the pykechain credentials allow this.
:param member: single username to be added to the scope list of members
:type member: basestring
:raises APIError: when unable to update the scope member | Below is the the instruction that describes the task:
### Input:
Add a single member to the scope.
You may only edit the list of members if the pykechain credentials allow this.
:param member: single username to be added to the scope list of members
:type member: basestring
:raises APIError: when unable to update the scope member
### Response:
def add_member(self, member):
"""
Add a single member to the scope.
You may only edit the list of members if the pykechain credentials allow this.
:param member: single username to be added to the scope list of members
:type member: basestring
:raises APIError: when unable to update the scope member
"""
select_action = 'add_member'
self._update_scope_project_team(select_action=select_action, user=member, user_type='member') |
def get_ways(self, way_id=None, **kwargs):
"""
Alias for get_elements() but filter the result by Way
:param way_id: The Id of the way
:type way_id: Integer
:return: List of elements
"""
return self.get_elements(Way, elem_id=way_id, **kwargs) | Alias for get_elements() but filter the result by Way
:param way_id: The Id of the way
:type way_id: Integer
:return: List of elements | Below is the the instruction that describes the task:
### Input:
Alias for get_elements() but filter the result by Way
:param way_id: The Id of the way
:type way_id: Integer
:return: List of elements
### Response:
def get_ways(self, way_id=None, **kwargs):
"""
Alias for get_elements() but filter the result by Way
:param way_id: The Id of the way
:type way_id: Integer
:return: List of elements
"""
return self.get_elements(Way, elem_id=way_id, **kwargs) |
def set_mouse_handler_for_range(self, x_min, x_max, y_min, y_max, handler=None):
"""
Set mouse handler for a region.
"""
for x, y in product(range(x_min, x_max), range(y_min, y_max)):
self.mouse_handlers[x,y] = handler | Set mouse handler for a region. | Below is the the instruction that describes the task:
### Input:
Set mouse handler for a region.
### Response:
def set_mouse_handler_for_range(self, x_min, x_max, y_min, y_max, handler=None):
"""
Set mouse handler for a region.
"""
for x, y in product(range(x_min, x_max), range(y_min, y_max)):
self.mouse_handlers[x,y] = handler |
def hydrate_struct(address_mapper, address):
"""Given an AddressMapper and an Address, resolve a Struct from a BUILD file.
Recursively collects any embedded addressables within the Struct, but will not walk into a
dependencies field, since those should be requested explicitly by rules.
"""
address_family = yield Get(AddressFamily, Dir(address.spec_path))
struct = address_family.addressables.get(address)
addresses = address_family.addressables
if not struct or address not in addresses:
_raise_did_you_mean(address_family, address.target_name)
# TODO: This is effectively: "get the BuildFileAddress for this Address".
# see https://github.com/pantsbuild/pants/issues/6657
address = next(build_address for build_address in addresses if build_address == address)
inline_dependencies = []
def maybe_append(outer_key, value):
if isinstance(value, six.string_types):
if outer_key != 'dependencies':
inline_dependencies.append(Address.parse(value,
relative_to=address.spec_path,
subproject_roots=address_mapper.subproject_roots))
elif isinstance(value, Struct):
collect_inline_dependencies(value)
def collect_inline_dependencies(item):
for key, value in sorted(item._asdict().items(), key=_key_func):
if not AddressableDescriptor.is_addressable(item, key):
continue
if isinstance(value, MutableMapping):
for _, v in sorted(value.items(), key=_key_func):
maybe_append(key, v)
elif isinstance(value, MutableSequence):
for v in value:
maybe_append(key, v)
else:
maybe_append(key, value)
# Recursively collect inline dependencies from the fields of the struct into `inline_dependencies`.
collect_inline_dependencies(struct)
# And then hydrate the inline dependencies.
hydrated_inline_dependencies = yield [Get(HydratedStruct, Address, a) for a in inline_dependencies]
dependencies = [d.value for d in hydrated_inline_dependencies]
def maybe_consume(outer_key, value):
if isinstance(value, six.string_types):
if outer_key == 'dependencies':
# Don't recurse into the dependencies field of a Struct, since those will be explicitly
# requested by tasks. But do ensure that their addresses are absolute, since we're
# about to lose the context in which they were declared.
value = Address.parse(value,
relative_to=address.spec_path,
subproject_roots=address_mapper.subproject_roots)
else:
value = dependencies[maybe_consume.idx]
maybe_consume.idx += 1
elif isinstance(value, Struct):
value = consume_dependencies(value)
return value
# NB: Some pythons throw an UnboundLocalError for `idx` if it is a simple local variable.
maybe_consume.idx = 0
# 'zip' the previously-requested dependencies back together as struct fields.
def consume_dependencies(item, args=None):
hydrated_args = args or {}
for key, value in sorted(item._asdict().items(), key=_key_func):
if not AddressableDescriptor.is_addressable(item, key):
hydrated_args[key] = value
continue
if isinstance(value, MutableMapping):
container_type = type(value)
hydrated_args[key] = container_type((k, maybe_consume(key, v))
for k, v in sorted(value.items(), key=_key_func))
elif isinstance(value, MutableSequence):
container_type = type(value)
hydrated_args[key] = container_type(maybe_consume(key, v) for v in value)
else:
hydrated_args[key] = maybe_consume(key, value)
return _hydrate(type(item), address.spec_path, **hydrated_args)
yield HydratedStruct(consume_dependencies(struct, args={'address': address})) | Given an AddressMapper and an Address, resolve a Struct from a BUILD file.
Recursively collects any embedded addressables within the Struct, but will not walk into a
dependencies field, since those should be requested explicitly by rules. | Below is the the instruction that describes the task:
### Input:
Given an AddressMapper and an Address, resolve a Struct from a BUILD file.
Recursively collects any embedded addressables within the Struct, but will not walk into a
dependencies field, since those should be requested explicitly by rules.
### Response:
def hydrate_struct(address_mapper, address):
"""Given an AddressMapper and an Address, resolve a Struct from a BUILD file.
Recursively collects any embedded addressables within the Struct, but will not walk into a
dependencies field, since those should be requested explicitly by rules.
"""
address_family = yield Get(AddressFamily, Dir(address.spec_path))
struct = address_family.addressables.get(address)
addresses = address_family.addressables
if not struct or address not in addresses:
_raise_did_you_mean(address_family, address.target_name)
# TODO: This is effectively: "get the BuildFileAddress for this Address".
# see https://github.com/pantsbuild/pants/issues/6657
address = next(build_address for build_address in addresses if build_address == address)
inline_dependencies = []
def maybe_append(outer_key, value):
if isinstance(value, six.string_types):
if outer_key != 'dependencies':
inline_dependencies.append(Address.parse(value,
relative_to=address.spec_path,
subproject_roots=address_mapper.subproject_roots))
elif isinstance(value, Struct):
collect_inline_dependencies(value)
def collect_inline_dependencies(item):
for key, value in sorted(item._asdict().items(), key=_key_func):
if not AddressableDescriptor.is_addressable(item, key):
continue
if isinstance(value, MutableMapping):
for _, v in sorted(value.items(), key=_key_func):
maybe_append(key, v)
elif isinstance(value, MutableSequence):
for v in value:
maybe_append(key, v)
else:
maybe_append(key, value)
# Recursively collect inline dependencies from the fields of the struct into `inline_dependencies`.
collect_inline_dependencies(struct)
# And then hydrate the inline dependencies.
hydrated_inline_dependencies = yield [Get(HydratedStruct, Address, a) for a in inline_dependencies]
dependencies = [d.value for d in hydrated_inline_dependencies]
def maybe_consume(outer_key, value):
if isinstance(value, six.string_types):
if outer_key == 'dependencies':
# Don't recurse into the dependencies field of a Struct, since those will be explicitly
# requested by tasks. But do ensure that their addresses are absolute, since we're
# about to lose the context in which they were declared.
value = Address.parse(value,
relative_to=address.spec_path,
subproject_roots=address_mapper.subproject_roots)
else:
value = dependencies[maybe_consume.idx]
maybe_consume.idx += 1
elif isinstance(value, Struct):
value = consume_dependencies(value)
return value
# NB: Some pythons throw an UnboundLocalError for `idx` if it is a simple local variable.
maybe_consume.idx = 0
# 'zip' the previously-requested dependencies back together as struct fields.
def consume_dependencies(item, args=None):
hydrated_args = args or {}
for key, value in sorted(item._asdict().items(), key=_key_func):
if not AddressableDescriptor.is_addressable(item, key):
hydrated_args[key] = value
continue
if isinstance(value, MutableMapping):
container_type = type(value)
hydrated_args[key] = container_type((k, maybe_consume(key, v))
for k, v in sorted(value.items(), key=_key_func))
elif isinstance(value, MutableSequence):
container_type = type(value)
hydrated_args[key] = container_type(maybe_consume(key, v) for v in value)
else:
hydrated_args[key] = maybe_consume(key, value)
return _hydrate(type(item), address.spec_path, **hydrated_args)
yield HydratedStruct(consume_dependencies(struct, args={'address': address})) |
def print_value(value: Any, type_: GraphQLInputType) -> str:
"""Convenience function for printing a Python value"""
return print_ast(ast_from_value(value, type_)) | Convenience function for printing a Python value | Below is the the instruction that describes the task:
### Input:
Convenience function for printing a Python value
### Response:
def print_value(value: Any, type_: GraphQLInputType) -> str:
"""Convenience function for printing a Python value"""
return print_ast(ast_from_value(value, type_)) |
def shiftx_image2d_flux(image2d_orig, xoffset):
"""Resample 2D image using a shift in the x direction (flux is preserved).
Parameters
----------
image2d_orig : numpy array
2D image to be resampled.
xoffset : float
Offset to be applied.
Returns
-------
image2d_resampled : numpy array
Resampled 2D image.
"""
if image2d_orig.ndim == 1:
naxis1 = image2d_orig.size
elif image2d_orig.ndim == 2:
naxis2, naxis1 = image2d_orig.shape
else:
print('>>> image2d_orig.shape:', image2d_orig.shape)
raise ValueError('Unexpected number of dimensions')
return resample_image2d_flux(image2d_orig,
naxis1=naxis1,
cdelt1=1,
crval1=1,
crpix1=1,
coeff=[xoffset, 1]) | Resample 2D image using a shift in the x direction (flux is preserved).
Parameters
----------
image2d_orig : numpy array
2D image to be resampled.
xoffset : float
Offset to be applied.
Returns
-------
image2d_resampled : numpy array
Resampled 2D image. | Below is the the instruction that describes the task:
### Input:
Resample 2D image using a shift in the x direction (flux is preserved).
Parameters
----------
image2d_orig : numpy array
2D image to be resampled.
xoffset : float
Offset to be applied.
Returns
-------
image2d_resampled : numpy array
Resampled 2D image.
### Response:
def shiftx_image2d_flux(image2d_orig, xoffset):
"""Resample 2D image using a shift in the x direction (flux is preserved).
Parameters
----------
image2d_orig : numpy array
2D image to be resampled.
xoffset : float
Offset to be applied.
Returns
-------
image2d_resampled : numpy array
Resampled 2D image.
"""
if image2d_orig.ndim == 1:
naxis1 = image2d_orig.size
elif image2d_orig.ndim == 2:
naxis2, naxis1 = image2d_orig.shape
else:
print('>>> image2d_orig.shape:', image2d_orig.shape)
raise ValueError('Unexpected number of dimensions')
return resample_image2d_flux(image2d_orig,
naxis1=naxis1,
cdelt1=1,
crval1=1,
crpix1=1,
coeff=[xoffset, 1]) |
def import_lsdinst(self, struct_data):
"""import from an lsdinst struct"""
self.name = struct_data['name']
self.automate = struct_data['data']['automate']
self.pan = struct_data['data']['pan']
if self.table is not None:
self.table.import_lsdinst(struct_data) | import from an lsdinst struct | Below is the the instruction that describes the task:
### Input:
import from an lsdinst struct
### Response:
def import_lsdinst(self, struct_data):
"""import from an lsdinst struct"""
self.name = struct_data['name']
self.automate = struct_data['data']['automate']
self.pan = struct_data['data']['pan']
if self.table is not None:
self.table.import_lsdinst(struct_data) |
def has_neigh(tag_name, params=None, content=None, left=True):
"""
This function generates functions, which matches all tags with neighbours
defined by parameters.
Args:
tag_name (str): Tag has to have neighbour with this tagname.
params (dict): Tag has to have neighbour with this parameters.
params (str): Tag has to have neighbour with this content.
left (bool, default True): Tag has to have neigbour on the left, or
right (set to ``False``).
Returns:
bool: True for every matching tag.
Note:
This function can be used as parameter for ``.find()`` method in
HTMLElement.
"""
def has_neigh_closure(element):
if not element.parent \
or not (element.isTag() and not element.isEndTag()):
return False
# filter only visible tags/neighbours
childs = element.parent.childs
childs = filter(
lambda x: (x.isTag() and not x.isEndTag()) \
or x.getContent().strip() or x is element,
childs
)
if len(childs) <= 1:
return False
ioe = childs.index(element)
if left and ioe > 0:
return is_equal_tag(childs[ioe - 1], tag_name, params, content)
if not left and ioe + 1 < len(childs):
return is_equal_tag(childs[ioe + 1], tag_name, params, content)
return False
return has_neigh_closure | This function generates functions, which matches all tags with neighbours
defined by parameters.
Args:
tag_name (str): Tag has to have neighbour with this tagname.
params (dict): Tag has to have neighbour with this parameters.
params (str): Tag has to have neighbour with this content.
left (bool, default True): Tag has to have neigbour on the left, or
right (set to ``False``).
Returns:
bool: True for every matching tag.
Note:
This function can be used as parameter for ``.find()`` method in
HTMLElement. | Below is the the instruction that describes the task:
### Input:
This function generates functions, which matches all tags with neighbours
defined by parameters.
Args:
tag_name (str): Tag has to have neighbour with this tagname.
params (dict): Tag has to have neighbour with this parameters.
params (str): Tag has to have neighbour with this content.
left (bool, default True): Tag has to have neigbour on the left, or
right (set to ``False``).
Returns:
bool: True for every matching tag.
Note:
This function can be used as parameter for ``.find()`` method in
HTMLElement.
### Response:
def has_neigh(tag_name, params=None, content=None, left=True):
"""
This function generates functions, which matches all tags with neighbours
defined by parameters.
Args:
tag_name (str): Tag has to have neighbour with this tagname.
params (dict): Tag has to have neighbour with this parameters.
params (str): Tag has to have neighbour with this content.
left (bool, default True): Tag has to have neigbour on the left, or
right (set to ``False``).
Returns:
bool: True for every matching tag.
Note:
This function can be used as parameter for ``.find()`` method in
HTMLElement.
"""
def has_neigh_closure(element):
if not element.parent \
or not (element.isTag() and not element.isEndTag()):
return False
# filter only visible tags/neighbours
childs = element.parent.childs
childs = filter(
lambda x: (x.isTag() and not x.isEndTag()) \
or x.getContent().strip() or x is element,
childs
)
if len(childs) <= 1:
return False
ioe = childs.index(element)
if left and ioe > 0:
return is_equal_tag(childs[ioe - 1], tag_name, params, content)
if not left and ioe + 1 < len(childs):
return is_equal_tag(childs[ioe + 1], tag_name, params, content)
return False
return has_neigh_closure |
def groupByKey(self, numPartitions=None):
"""
Return a new DStream by applying groupByKey on each RDD.
"""
if numPartitions is None:
numPartitions = self._sc.defaultParallelism
return self.transform(lambda rdd: rdd.groupByKey(numPartitions)) | Return a new DStream by applying groupByKey on each RDD. | Below is the the instruction that describes the task:
### Input:
Return a new DStream by applying groupByKey on each RDD.
### Response:
def groupByKey(self, numPartitions=None):
"""
Return a new DStream by applying groupByKey on each RDD.
"""
if numPartitions is None:
numPartitions = self._sc.defaultParallelism
return self.transform(lambda rdd: rdd.groupByKey(numPartitions)) |
def update(self, columns=(), by=(), where=(), **kwds):
"""update from self
>>> t = q('([]a:1 2 3; b:10 20 30)')
>>> t.update('a*2',
... where='b > 20').show() # doctest: +NORMALIZE_WHITESPACE
a b
----
1 10
2 20
6 30
"""
return self._seu('update', columns, by, where, kwds) | update from self
>>> t = q('([]a:1 2 3; b:10 20 30)')
>>> t.update('a*2',
... where='b > 20').show() # doctest: +NORMALIZE_WHITESPACE
a b
----
1 10
2 20
6 30 | Below is the the instruction that describes the task:
### Input:
update from self
>>> t = q('([]a:1 2 3; b:10 20 30)')
>>> t.update('a*2',
... where='b > 20').show() # doctest: +NORMALIZE_WHITESPACE
a b
----
1 10
2 20
6 30
### Response:
def update(self, columns=(), by=(), where=(), **kwds):
"""update from self
>>> t = q('([]a:1 2 3; b:10 20 30)')
>>> t.update('a*2',
... where='b > 20').show() # doctest: +NORMALIZE_WHITESPACE
a b
----
1 10
2 20
6 30
"""
return self._seu('update', columns, by, where, kwds) |
def _utc_year(self):
"""Return a fractional UTC year, for convenience when plotting.
An experiment, probably superseded by the ``J`` attribute below.
"""
d = self._utc_float() - 1721059.5
#d += offset
C = 365 * 100 + 24
d -= 365
d += d // C - d // (4 * C)
d += 365
# Y = d / C * 100
# print(Y)
K = 365 * 3 + 366
d -= (d + K*7//8) // K
# d -= d // 1461.0
return d / 365.0 | Return a fractional UTC year, for convenience when plotting.
An experiment, probably superseded by the ``J`` attribute below. | Below is the the instruction that describes the task:
### Input:
Return a fractional UTC year, for convenience when plotting.
An experiment, probably superseded by the ``J`` attribute below.
### Response:
def _utc_year(self):
"""Return a fractional UTC year, for convenience when plotting.
An experiment, probably superseded by the ``J`` attribute below.
"""
d = self._utc_float() - 1721059.5
#d += offset
C = 365 * 100 + 24
d -= 365
d += d // C - d // (4 * C)
d += 365
# Y = d / C * 100
# print(Y)
K = 365 * 3 + 366
d -= (d + K*7//8) // K
# d -= d // 1461.0
return d / 365.0 |
def fetch_open_orders(self, limit: int) -> List[Order]:
"""Fetch latest open orders, must provide a limit."""
return self._fetch_orders_limit(self._open_orders, limit) | Fetch latest open orders, must provide a limit. | Below is the the instruction that describes the task:
### Input:
Fetch latest open orders, must provide a limit.
### Response:
def fetch_open_orders(self, limit: int) -> List[Order]:
"""Fetch latest open orders, must provide a limit."""
return self._fetch_orders_limit(self._open_orders, limit) |
def _verified_version_from_id(version_id):
# type: (int) -> SerializationVersion
"""Load a message :class:`SerializationVersion` for the specified version ID.
:param int version_id: Message format version ID
:return: Message format version
:rtype: SerializationVersion
:raises NotSupportedError: if unsupported version ID is received
"""
try:
return SerializationVersion(version_id)
except ValueError as error:
raise NotSupportedError("Unsupported version {}".format(version_id), error) | Load a message :class:`SerializationVersion` for the specified version ID.
:param int version_id: Message format version ID
:return: Message format version
:rtype: SerializationVersion
:raises NotSupportedError: if unsupported version ID is received | Below is the the instruction that describes the task:
### Input:
Load a message :class:`SerializationVersion` for the specified version ID.
:param int version_id: Message format version ID
:return: Message format version
:rtype: SerializationVersion
:raises NotSupportedError: if unsupported version ID is received
### Response:
def _verified_version_from_id(version_id):
# type: (int) -> SerializationVersion
"""Load a message :class:`SerializationVersion` for the specified version ID.
:param int version_id: Message format version ID
:return: Message format version
:rtype: SerializationVersion
:raises NotSupportedError: if unsupported version ID is received
"""
try:
return SerializationVersion(version_id)
except ValueError as error:
raise NotSupportedError("Unsupported version {}".format(version_id), error) |
def configure_stream(level='WARNING'):
"""Configure root logger using a standard stream handler.
Args:
level (string, optional): lowest level to log to the console
Returns:
logging.RootLogger: root logger instance with attached handler
"""
# get the root logger
root_logger = logging.getLogger()
# set the logger level to the same as will be used by the handler
root_logger.setLevel(level)
# customize formatter, align each column
template = "[%(asctime)s] %(name)-25s %(levelname)-8s %(message)s"
formatter = logging.Formatter(template)
# add a basic STDERR handler to the logger
console = logging.StreamHandler()
console.setLevel(level)
console.setFormatter(formatter)
root_logger.addHandler(console)
return root_logger | Configure root logger using a standard stream handler.
Args:
level (string, optional): lowest level to log to the console
Returns:
logging.RootLogger: root logger instance with attached handler | Below is the the instruction that describes the task:
### Input:
Configure root logger using a standard stream handler.
Args:
level (string, optional): lowest level to log to the console
Returns:
logging.RootLogger: root logger instance with attached handler
### Response:
def configure_stream(level='WARNING'):
"""Configure root logger using a standard stream handler.
Args:
level (string, optional): lowest level to log to the console
Returns:
logging.RootLogger: root logger instance with attached handler
"""
# get the root logger
root_logger = logging.getLogger()
# set the logger level to the same as will be used by the handler
root_logger.setLevel(level)
# customize formatter, align each column
template = "[%(asctime)s] %(name)-25s %(levelname)-8s %(message)s"
formatter = logging.Formatter(template)
# add a basic STDERR handler to the logger
console = logging.StreamHandler()
console.setLevel(level)
console.setFormatter(formatter)
root_logger.addHandler(console)
return root_logger |
def commit_withdrawal(self, account_id, withdrawal_id, **params):
"""https://developers.coinbase.com/api/v2#commit-a-withdrawal"""
response = self._post(
'v2', 'accounts', account_id, 'withdrawals', withdrawal_id, 'commit',
data=params)
return self._make_api_object(response, Withdrawal) | https://developers.coinbase.com/api/v2#commit-a-withdrawal | Below is the the instruction that describes the task:
### Input:
https://developers.coinbase.com/api/v2#commit-a-withdrawal
### Response:
def commit_withdrawal(self, account_id, withdrawal_id, **params):
"""https://developers.coinbase.com/api/v2#commit-a-withdrawal"""
response = self._post(
'v2', 'accounts', account_id, 'withdrawals', withdrawal_id, 'commit',
data=params)
return self._make_api_object(response, Withdrawal) |
def lattice(prng, n_features, alpha, random_sign=False, low=0.3, high=0.7):
"""Returns the adjacency matrix for a lattice network.
The resulting network is a Toeplitz matrix with random values summing
between -1 and 1 and zeros along the diagonal.
The range of the values can be controlled via the parameters low and high.
If random_sign is false, all entries will be negative, otherwise their sign
will be modulated at random with probability 1/2.
Each row has maximum edges of np.ceil(alpha * n_features).
Parameters
-----------
n_features : int
alpha : float (0, 1)
The complexity / sparsity factor.
random sign : bool (default=False)
Randomly modulate each entry by 1 or -1 with probability of 1/2.
low : float (0, 1) (default=0.3)
Lower bound for np.random.RandomState.uniform before normalization.
high : float (0, 1) > low (default=0.7)
Upper bound for np.random.RandomState.uniform before normalization.
"""
degree = int(1 + np.round(alpha * n_features / 2.))
if random_sign:
sign_row = -1.0 * np.ones(degree) + 2 * (
prng.uniform(low=0, high=1, size=degree) > .5
)
else:
sign_row = -1.0 * np.ones(degree)
# in the *very unlikely* event that we draw a bad row that sums to zero
# (which is only possible when random_sign=True), we try again up to
# MAX_ATTEMPTS=5 times. If we are still unable to draw a good set of
# values something is probably wrong and we raise.
MAX_ATTEMPTS = 5
attempt = 0
row = np.zeros((n_features,))
while np.sum(row) == 0 and attempt < MAX_ATTEMPTS:
row = np.zeros((n_features,))
row[1 : 1 + degree] = sign_row * prng.uniform(low=low, high=high, size=degree)
attempt += 1
if np.sum(row) == 0:
raise Exception("InvalidLattice", "Rows sum to 0.")
return
# sum-normalize and keep signs
row /= np.abs(np.sum(row))
return sp.linalg.toeplitz(c=row, r=row) | Returns the adjacency matrix for a lattice network.
The resulting network is a Toeplitz matrix with random values summing
between -1 and 1 and zeros along the diagonal.
The range of the values can be controlled via the parameters low and high.
If random_sign is false, all entries will be negative, otherwise their sign
will be modulated at random with probability 1/2.
Each row has maximum edges of np.ceil(alpha * n_features).
Parameters
-----------
n_features : int
alpha : float (0, 1)
The complexity / sparsity factor.
random sign : bool (default=False)
Randomly modulate each entry by 1 or -1 with probability of 1/2.
low : float (0, 1) (default=0.3)
Lower bound for np.random.RandomState.uniform before normalization.
high : float (0, 1) > low (default=0.7)
Upper bound for np.random.RandomState.uniform before normalization. | Below is the the instruction that describes the task:
### Input:
Returns the adjacency matrix for a lattice network.
The resulting network is a Toeplitz matrix with random values summing
between -1 and 1 and zeros along the diagonal.
The range of the values can be controlled via the parameters low and high.
If random_sign is false, all entries will be negative, otherwise their sign
will be modulated at random with probability 1/2.
Each row has maximum edges of np.ceil(alpha * n_features).
Parameters
-----------
n_features : int
alpha : float (0, 1)
The complexity / sparsity factor.
random sign : bool (default=False)
Randomly modulate each entry by 1 or -1 with probability of 1/2.
low : float (0, 1) (default=0.3)
Lower bound for np.random.RandomState.uniform before normalization.
high : float (0, 1) > low (default=0.7)
Upper bound for np.random.RandomState.uniform before normalization.
### Response:
def lattice(prng, n_features, alpha, random_sign=False, low=0.3, high=0.7):
"""Returns the adjacency matrix for a lattice network.
The resulting network is a Toeplitz matrix with random values summing
between -1 and 1 and zeros along the diagonal.
The range of the values can be controlled via the parameters low and high.
If random_sign is false, all entries will be negative, otherwise their sign
will be modulated at random with probability 1/2.
Each row has maximum edges of np.ceil(alpha * n_features).
Parameters
-----------
n_features : int
alpha : float (0, 1)
The complexity / sparsity factor.
random sign : bool (default=False)
Randomly modulate each entry by 1 or -1 with probability of 1/2.
low : float (0, 1) (default=0.3)
Lower bound for np.random.RandomState.uniform before normalization.
high : float (0, 1) > low (default=0.7)
Upper bound for np.random.RandomState.uniform before normalization.
"""
degree = int(1 + np.round(alpha * n_features / 2.))
if random_sign:
sign_row = -1.0 * np.ones(degree) + 2 * (
prng.uniform(low=0, high=1, size=degree) > .5
)
else:
sign_row = -1.0 * np.ones(degree)
# in the *very unlikely* event that we draw a bad row that sums to zero
# (which is only possible when random_sign=True), we try again up to
# MAX_ATTEMPTS=5 times. If we are still unable to draw a good set of
# values something is probably wrong and we raise.
MAX_ATTEMPTS = 5
attempt = 0
row = np.zeros((n_features,))
while np.sum(row) == 0 and attempt < MAX_ATTEMPTS:
row = np.zeros((n_features,))
row[1 : 1 + degree] = sign_row * prng.uniform(low=low, high=high, size=degree)
attempt += 1
if np.sum(row) == 0:
raise Exception("InvalidLattice", "Rows sum to 0.")
return
# sum-normalize and keep signs
row /= np.abs(np.sum(row))
return sp.linalg.toeplitz(c=row, r=row) |
def unit_overlap(evaluated_model, reference_model):
"""
Computes unit overlap of two text documents. Documents
has to be represented as TF models of non-empty document.
:returns float:
0 <= overlap <= 1, where 0 means no match and 1 means
exactly the same.
"""
if not (isinstance(evaluated_model, TfModel) and isinstance(reference_model, TfModel)):
raise ValueError(
"Arguments has to be instances of 'sumy.models.TfDocumentModel'")
terms1 = frozenset(evaluated_model.terms)
terms2 = frozenset(reference_model.terms)
if not terms1 and not terms2:
raise ValueError(
"Documents can't be empty. Please pass the valid documents.")
common_terms_count = len(terms1 & terms2)
return common_terms_count / (len(terms1) + len(terms2) - common_terms_count) | Computes unit overlap of two text documents. Documents
has to be represented as TF models of non-empty document.
:returns float:
0 <= overlap <= 1, where 0 means no match and 1 means
exactly the same. | Below is the the instruction that describes the task:
### Input:
Computes unit overlap of two text documents. Documents
has to be represented as TF models of non-empty document.
:returns float:
0 <= overlap <= 1, where 0 means no match and 1 means
exactly the same.
### Response:
def unit_overlap(evaluated_model, reference_model):
"""
Computes unit overlap of two text documents. Documents
has to be represented as TF models of non-empty document.
:returns float:
0 <= overlap <= 1, where 0 means no match and 1 means
exactly the same.
"""
if not (isinstance(evaluated_model, TfModel) and isinstance(reference_model, TfModel)):
raise ValueError(
"Arguments has to be instances of 'sumy.models.TfDocumentModel'")
terms1 = frozenset(evaluated_model.terms)
terms2 = frozenset(reference_model.terms)
if not terms1 and not terms2:
raise ValueError(
"Documents can't be empty. Please pass the valid documents.")
common_terms_count = len(terms1 & terms2)
return common_terms_count / (len(terms1) + len(terms2) - common_terms_count) |
async def get_data(self, url):
"""Get data from the TMDb API via :py:func:`aiohttp.get`.
Notes:
Updates configuration (if required) on successful requests.
Arguments:
url (:py:class:`str`): The endpoint URL and params.
Returns:
:py:class:`dict`: The parsed JSON result.
"""
logger.debug('making request to %r', url)
with aiohttp.ClientSession() as session:
async with session.get(url, headers=self.headers) as response:
body = json.loads((await response.read()).decode('utf-8'))
if response.status == HTTPStatus.OK:
if url != self.url_builder('configuration'):
await self._update_config()
return body
elif response.status == HTTPStatus.TOO_MANY_REQUESTS:
timeout = self.calculate_timeout(
response.headers['Retry-After'],
)
logger.warning(
'Request limit exceeded, waiting %s seconds',
timeout,
)
await asyncio.sleep(timeout)
return await self.get_data(url)
logger.warning(
'request failed %s: %r',
response.status,
body.get('status_message', '<no message>')
) | Get data from the TMDb API via :py:func:`aiohttp.get`.
Notes:
Updates configuration (if required) on successful requests.
Arguments:
url (:py:class:`str`): The endpoint URL and params.
Returns:
:py:class:`dict`: The parsed JSON result. | Below is the the instruction that describes the task:
### Input:
Get data from the TMDb API via :py:func:`aiohttp.get`.
Notes:
Updates configuration (if required) on successful requests.
Arguments:
url (:py:class:`str`): The endpoint URL and params.
Returns:
:py:class:`dict`: The parsed JSON result.
### Response:
async def get_data(self, url):
"""Get data from the TMDb API via :py:func:`aiohttp.get`.
Notes:
Updates configuration (if required) on successful requests.
Arguments:
url (:py:class:`str`): The endpoint URL and params.
Returns:
:py:class:`dict`: The parsed JSON result.
"""
logger.debug('making request to %r', url)
with aiohttp.ClientSession() as session:
async with session.get(url, headers=self.headers) as response:
body = json.loads((await response.read()).decode('utf-8'))
if response.status == HTTPStatus.OK:
if url != self.url_builder('configuration'):
await self._update_config()
return body
elif response.status == HTTPStatus.TOO_MANY_REQUESTS:
timeout = self.calculate_timeout(
response.headers['Retry-After'],
)
logger.warning(
'Request limit exceeded, waiting %s seconds',
timeout,
)
await asyncio.sleep(timeout)
return await self.get_data(url)
logger.warning(
'request failed %s: %r',
response.status,
body.get('status_message', '<no message>')
) |
def _RunCommand(self, command):
"""Runs the command."""
arguments = shlex.split(command)
process = subprocess.Popen(
arguments, stderr=subprocess.PIPE, stdout=subprocess.PIPE,
universal_newlines=True)
if not process:
raise RuntimeError("Running: {0:s} failed.".format(command))
output, error = process.communicate()
if process.returncode != 0:
error = "\n".join(error.split("\n")[-5:])
raise RuntimeError("Running: {0:s} failed with error:\n{1:s}.".format(
command, error))
return output | Runs the command. | Below is the the instruction that describes the task:
### Input:
Runs the command.
### Response:
def _RunCommand(self, command):
"""Runs the command."""
arguments = shlex.split(command)
process = subprocess.Popen(
arguments, stderr=subprocess.PIPE, stdout=subprocess.PIPE,
universal_newlines=True)
if not process:
raise RuntimeError("Running: {0:s} failed.".format(command))
output, error = process.communicate()
if process.returncode != 0:
error = "\n".join(error.split("\n")[-5:])
raise RuntimeError("Running: {0:s} failed with error:\n{1:s}.".format(
command, error))
return output |
def main(sample_id, assembly_file, minsize):
"""Main executor of the process_mapping template.
Parameters
----------
sample_id : str
Sample Identification string.
assembly: str
Path to the fatsa file generated by the assembler.
minsize: str
Min contig size to be considered a complete ORF
"""
logger.info("Starting assembly file processing")
warnings = []
fails = ""
# Parse the spades assembly file and perform the first filtering.
logger.info("Starting assembly parsing")
assembly_obj = Assembly(assembly_file, 0, 0,
sample_id, minsize)
if 'spades' in assembly_file:
assembler = "SPAdes"
else:
assembler = "MEGAHIT"
with open(".warnings", "w") as warn_fh:
t_80 = int(minsize) * 0.8
t_150 = int(minsize) * 1.5
# Check if assembly size of the first assembly is lower than 80% of the
# estimated genome size - DENV ORF has min 10k nt. If True, redo the filtering without the
# k-mer coverage filter
assembly_len = assembly_obj.get_assembly_length()
logger.debug("Checking assembly length: {}".format(assembly_len))
if assembly_obj.nORFs < 1:
warn_msg = "No complete ORFs found."
warn_fh.write(warn_msg)
fails = warn_msg
if assembly_len < t_80:
logger.warning("Assembly size ({}) smaller than the minimum "
"threshold of 80% of expected genome size. "
"Applying contig filters without the k-mer "
"coverage filter".format(assembly_len))
assembly_len = assembly_obj.get_assembly_length()
logger.debug("Checking updated assembly length: "
"{}".format(assembly_len))
if assembly_len < t_80:
warn_msg = "Assembly size smaller than the minimum" \
" threshold of 80% of expected genome size: {}".format(
assembly_len)
logger.warning(warn_msg)
warn_fh.write(warn_msg)
fails = warn_msg
if assembly_len > t_150:
warn_msg = "Assembly size ({}) larger than the maximum" \
" threshold of 150% of expected genome size.".format(
assembly_len)
logger.warning(warn_msg)
warn_fh.write(warn_msg)
fails = warn_msg
# Write json report
with open(".report.json", "w") as json_report:
json_dic = {
"tableRow": [{
"sample": sample_id,
"data": [
{"header": "Contigs ({})".format(assembler),
"value": len(assembly_obj.contigs),
"table": "assembly",
"columnBar": True},
{"header": "Assembled BP ({})".format(assembler),
"value": assembly_len,
"table": "assembly",
"columnBar": True},
{"header": "ORFs",
"value": assembly_obj.nORFs,
"table": "assembly",
"columnBar":False}
]
}],
}
if warnings:
json_dic["warnings"] = [{
"sample": sample_id,
"table": "assembly",
"value": warnings
}]
if fails:
json_dic["fail"] = [{
"sample": sample_id,
"table": "assembly",
"value": [fails]
}]
json_report.write(json.dumps(json_dic, separators=(",", ":")))
with open(".status", "w") as status_fh:
status_fh.write("pass") | Main executor of the process_mapping template.
Parameters
----------
sample_id : str
Sample Identification string.
assembly: str
Path to the fatsa file generated by the assembler.
minsize: str
Min contig size to be considered a complete ORF | Below is the the instruction that describes the task:
### Input:
Main executor of the process_mapping template.
Parameters
----------
sample_id : str
Sample Identification string.
assembly: str
Path to the fatsa file generated by the assembler.
minsize: str
Min contig size to be considered a complete ORF
### Response:
def main(sample_id, assembly_file, minsize):
"""Main executor of the process_mapping template.
Parameters
----------
sample_id : str
Sample Identification string.
assembly: str
Path to the fatsa file generated by the assembler.
minsize: str
Min contig size to be considered a complete ORF
"""
logger.info("Starting assembly file processing")
warnings = []
fails = ""
# Parse the spades assembly file and perform the first filtering.
logger.info("Starting assembly parsing")
assembly_obj = Assembly(assembly_file, 0, 0,
sample_id, minsize)
if 'spades' in assembly_file:
assembler = "SPAdes"
else:
assembler = "MEGAHIT"
with open(".warnings", "w") as warn_fh:
t_80 = int(minsize) * 0.8
t_150 = int(minsize) * 1.5
# Check if assembly size of the first assembly is lower than 80% of the
# estimated genome size - DENV ORF has min 10k nt. If True, redo the filtering without the
# k-mer coverage filter
assembly_len = assembly_obj.get_assembly_length()
logger.debug("Checking assembly length: {}".format(assembly_len))
if assembly_obj.nORFs < 1:
warn_msg = "No complete ORFs found."
warn_fh.write(warn_msg)
fails = warn_msg
if assembly_len < t_80:
logger.warning("Assembly size ({}) smaller than the minimum "
"threshold of 80% of expected genome size. "
"Applying contig filters without the k-mer "
"coverage filter".format(assembly_len))
assembly_len = assembly_obj.get_assembly_length()
logger.debug("Checking updated assembly length: "
"{}".format(assembly_len))
if assembly_len < t_80:
warn_msg = "Assembly size smaller than the minimum" \
" threshold of 80% of expected genome size: {}".format(
assembly_len)
logger.warning(warn_msg)
warn_fh.write(warn_msg)
fails = warn_msg
if assembly_len > t_150:
warn_msg = "Assembly size ({}) larger than the maximum" \
" threshold of 150% of expected genome size.".format(
assembly_len)
logger.warning(warn_msg)
warn_fh.write(warn_msg)
fails = warn_msg
# Write json report
with open(".report.json", "w") as json_report:
json_dic = {
"tableRow": [{
"sample": sample_id,
"data": [
{"header": "Contigs ({})".format(assembler),
"value": len(assembly_obj.contigs),
"table": "assembly",
"columnBar": True},
{"header": "Assembled BP ({})".format(assembler),
"value": assembly_len,
"table": "assembly",
"columnBar": True},
{"header": "ORFs",
"value": assembly_obj.nORFs,
"table": "assembly",
"columnBar":False}
]
}],
}
if warnings:
json_dic["warnings"] = [{
"sample": sample_id,
"table": "assembly",
"value": warnings
}]
if fails:
json_dic["fail"] = [{
"sample": sample_id,
"table": "assembly",
"value": [fails]
}]
json_report.write(json.dumps(json_dic, separators=(",", ":")))
with open(".status", "w") as status_fh:
status_fh.write("pass") |
def get_mutator(self, obj, obj_type):
"""
Get a random mutator for the given type
"""
if obj_type == unicode:
obj_type = str
obj = str(obj)
return self._get_random(obj_type)(obj) | Get a random mutator for the given type | Below is the the instruction that describes the task:
### Input:
Get a random mutator for the given type
### Response:
def get_mutator(self, obj, obj_type):
"""
Get a random mutator for the given type
"""
if obj_type == unicode:
obj_type = str
obj = str(obj)
return self._get_random(obj_type)(obj) |
def find_lb_by_name(self, name):
"""
Look up a LBaaS instance by name (rather than id)
:attr string name: The LBaaS name assigned at creation time
:rtype :class:`dict`
"""
log.debug("Finding load balancers matching name '%s'" % name)
matching = filter(lambda l: l['name'] == name, self.list_lbs())
if len(matching) > 1:
raise ValueError("Ambiguous; more than one load balancer matched '%s'" % name)
if matching:
log.info("Found existing load balancer, %s" % matching[0]['id'])
return matching[0]
return None | Look up a LBaaS instance by name (rather than id)
:attr string name: The LBaaS name assigned at creation time
:rtype :class:`dict` | Below is the the instruction that describes the task:
### Input:
Look up a LBaaS instance by name (rather than id)
:attr string name: The LBaaS name assigned at creation time
:rtype :class:`dict`
### Response:
def find_lb_by_name(self, name):
"""
Look up a LBaaS instance by name (rather than id)
:attr string name: The LBaaS name assigned at creation time
:rtype :class:`dict`
"""
log.debug("Finding load balancers matching name '%s'" % name)
matching = filter(lambda l: l['name'] == name, self.list_lbs())
if len(matching) > 1:
raise ValueError("Ambiguous; more than one load balancer matched '%s'" % name)
if matching:
log.info("Found existing load balancer, %s" % matching[0]['id'])
return matching[0]
return None |
def dnld_annotation(assc_file, prt=sys.stdout):
"""Download gaf, gpad, or gpi from http://current.geneontology.org/annotations/"""
if not os.path.isfile(assc_file):
# assc_http = "http://geneontology.org/gene-associations/"
assc_http = "http://current.geneontology.org/annotations/"
_, assc_base = os.path.split(assc_file)
src = os.path.join(assc_http, "{ASSC}.gz".format(ASSC=assc_base))
dnld_file(src, assc_file, prt, loading_bar=None) | Download gaf, gpad, or gpi from http://current.geneontology.org/annotations/ | Below is the the instruction that describes the task:
### Input:
Download gaf, gpad, or gpi from http://current.geneontology.org/annotations/
### Response:
def dnld_annotation(assc_file, prt=sys.stdout):
"""Download gaf, gpad, or gpi from http://current.geneontology.org/annotations/"""
if not os.path.isfile(assc_file):
# assc_http = "http://geneontology.org/gene-associations/"
assc_http = "http://current.geneontology.org/annotations/"
_, assc_base = os.path.split(assc_file)
src = os.path.join(assc_http, "{ASSC}.gz".format(ASSC=assc_base))
dnld_file(src, assc_file, prt, loading_bar=None) |
def delete(self,pool_or_cursor):
".. warning:: pgmock doesn't support delete yet, so this isn't tested"
vals=self.pkey_vals()
whereclause=' and '.join('%s=%%s'%k for k in self.PKEY.split(','))
q='delete from %s where %s'%(self.TABLE,whereclause)
commit_or_execute(pool_or_cursor,q,vals) | .. warning:: pgmock doesn't support delete yet, so this isn't tested | Below is the the instruction that describes the task:
### Input:
.. warning:: pgmock doesn't support delete yet, so this isn't tested
### Response:
def delete(self,pool_or_cursor):
".. warning:: pgmock doesn't support delete yet, so this isn't tested"
vals=self.pkey_vals()
whereclause=' and '.join('%s=%%s'%k for k in self.PKEY.split(','))
q='delete from %s where %s'%(self.TABLE,whereclause)
commit_or_execute(pool_or_cursor,q,vals) |
def model(self, inputs, mode='train'):
"""Build a simple convnet (BN before ReLU).
Args:
inputs: a tensor of size [batch_size, height, width, channels]
mode: string in ['train', 'test']
Returns:
the last op containing the predictions
Note:
Best score
Step: 7015 - Epoch: 18/20 - best batch acc: 0.8984 - loss: 1.5656
Worst score
Step: 7523 - Epoch: 20/20 - best batch acc: 0.7734 - loss: 1.6874
"""
# Extract features
training = (mode == 'train')
with tf.variable_scope('conv1') as scope:
conv = tf.layers.conv2d(inputs=inputs, filters=16, kernel_size=[3, 3], padding='SAME')
bn = tf.layers.batch_normalization(inputs=conv, training=training)
bn = tf.nn.relu(bn)
conv = tf.layers.conv2d(inputs=bn, filters=16, kernel_size=[3, 3], padding='SAME')
bn = tf.layers.batch_normalization(inputs=conv, training=training)
bn = tf.nn.relu(bn)
pool = tf.layers.max_pooling2d(bn, pool_size=[2, 2], strides=2, padding='SAME', name=scope.name)
with tf.variable_scope('conv2') as scope:
conv = tf.layers.conv2d(inputs=pool, filters=32, kernel_size=[3, 3], padding='SAME')
bn = tf.layers.batch_normalization(inputs=conv, training=training)
bn = tf.nn.relu(bn)
conv = tf.layers.conv2d(inputs=bn, filters=32, kernel_size=[3, 3], padding='SAME')
bn = tf.layers.batch_normalization(inputs=conv, training=training)
bn = tf.nn.relu(bn)
pool = tf.layers.max_pooling2d(bn, pool_size=[2, 2], strides=2, padding='SAME', name=scope.name)
with tf.variable_scope('conv3') as scope:
conv = tf.layers.conv2d(inputs=pool, filters=32, kernel_size=[3, 3], padding='SAME')
bn = tf.layers.batch_normalization(inputs=conv, training=training)
bn = tf.nn.relu(bn)
conv = tf.layers.conv2d(inputs=bn, filters=32, kernel_size=[3, 3], padding='SAME')
bn = tf.layers.batch_normalization(inputs=conv, training=training)
bn = tf.nn.relu(bn)
pool = tf.layers.max_pooling2d(bn, pool_size=[2, 2], strides=2, padding='SAME', name=scope.name)
# Classify
with tf.variable_scope('fc') as scope:
flat = tf.layers.flatten(pool)
fc = tf.layers.dense(inputs=flat, units=32, activation=tf.nn.relu)
softmax = tf.layers.dense(inputs=fc, units=self.num_classes, activation=tf.nn.softmax)
return softmax | Build a simple convnet (BN before ReLU).
Args:
inputs: a tensor of size [batch_size, height, width, channels]
mode: string in ['train', 'test']
Returns:
the last op containing the predictions
Note:
Best score
Step: 7015 - Epoch: 18/20 - best batch acc: 0.8984 - loss: 1.5656
Worst score
Step: 7523 - Epoch: 20/20 - best batch acc: 0.7734 - loss: 1.6874 | Below is the the instruction that describes the task:
### Input:
Build a simple convnet (BN before ReLU).
Args:
inputs: a tensor of size [batch_size, height, width, channels]
mode: string in ['train', 'test']
Returns:
the last op containing the predictions
Note:
Best score
Step: 7015 - Epoch: 18/20 - best batch acc: 0.8984 - loss: 1.5656
Worst score
Step: 7523 - Epoch: 20/20 - best batch acc: 0.7734 - loss: 1.6874
### Response:
def model(self, inputs, mode='train'):
"""Build a simple convnet (BN before ReLU).
Args:
inputs: a tensor of size [batch_size, height, width, channels]
mode: string in ['train', 'test']
Returns:
the last op containing the predictions
Note:
Best score
Step: 7015 - Epoch: 18/20 - best batch acc: 0.8984 - loss: 1.5656
Worst score
Step: 7523 - Epoch: 20/20 - best batch acc: 0.7734 - loss: 1.6874
"""
# Extract features
training = (mode == 'train')
with tf.variable_scope('conv1') as scope:
conv = tf.layers.conv2d(inputs=inputs, filters=16, kernel_size=[3, 3], padding='SAME')
bn = tf.layers.batch_normalization(inputs=conv, training=training)
bn = tf.nn.relu(bn)
conv = tf.layers.conv2d(inputs=bn, filters=16, kernel_size=[3, 3], padding='SAME')
bn = tf.layers.batch_normalization(inputs=conv, training=training)
bn = tf.nn.relu(bn)
pool = tf.layers.max_pooling2d(bn, pool_size=[2, 2], strides=2, padding='SAME', name=scope.name)
with tf.variable_scope('conv2') as scope:
conv = tf.layers.conv2d(inputs=pool, filters=32, kernel_size=[3, 3], padding='SAME')
bn = tf.layers.batch_normalization(inputs=conv, training=training)
bn = tf.nn.relu(bn)
conv = tf.layers.conv2d(inputs=bn, filters=32, kernel_size=[3, 3], padding='SAME')
bn = tf.layers.batch_normalization(inputs=conv, training=training)
bn = tf.nn.relu(bn)
pool = tf.layers.max_pooling2d(bn, pool_size=[2, 2], strides=2, padding='SAME', name=scope.name)
with tf.variable_scope('conv3') as scope:
conv = tf.layers.conv2d(inputs=pool, filters=32, kernel_size=[3, 3], padding='SAME')
bn = tf.layers.batch_normalization(inputs=conv, training=training)
bn = tf.nn.relu(bn)
conv = tf.layers.conv2d(inputs=bn, filters=32, kernel_size=[3, 3], padding='SAME')
bn = tf.layers.batch_normalization(inputs=conv, training=training)
bn = tf.nn.relu(bn)
pool = tf.layers.max_pooling2d(bn, pool_size=[2, 2], strides=2, padding='SAME', name=scope.name)
# Classify
with tf.variable_scope('fc') as scope:
flat = tf.layers.flatten(pool)
fc = tf.layers.dense(inputs=flat, units=32, activation=tf.nn.relu)
softmax = tf.layers.dense(inputs=fc, units=self.num_classes, activation=tf.nn.softmax)
return softmax |
def check_new_version_available(this_version):
"""
Checks if a newer version of Zappa is available.
Returns True is updateable, else False.
"""
import requests
pypi_url = 'https://pypi.python.org/pypi/Zappa/json'
resp = requests.get(pypi_url, timeout=1.5)
top_version = resp.json()['info']['version']
return this_version != top_version | Checks if a newer version of Zappa is available.
Returns True is updateable, else False. | Below is the the instruction that describes the task:
### Input:
Checks if a newer version of Zappa is available.
Returns True is updateable, else False.
### Response:
def check_new_version_available(this_version):
"""
Checks if a newer version of Zappa is available.
Returns True is updateable, else False.
"""
import requests
pypi_url = 'https://pypi.python.org/pypi/Zappa/json'
resp = requests.get(pypi_url, timeout=1.5)
top_version = resp.json()['info']['version']
return this_version != top_version |
def tomindec(origin):
"""
Convert [+/-]DDD.DDDDD to a tuple (degrees, minutes)
"""
origin = float(origin)
degrees = int(origin)
minutes = (origin % 1) * 60
return degrees, minutes | Convert [+/-]DDD.DDDDD to a tuple (degrees, minutes) | Below is the the instruction that describes the task:
### Input:
Convert [+/-]DDD.DDDDD to a tuple (degrees, minutes)
### Response:
def tomindec(origin):
"""
Convert [+/-]DDD.DDDDD to a tuple (degrees, minutes)
"""
origin = float(origin)
degrees = int(origin)
minutes = (origin % 1) * 60
return degrees, minutes |
def get_active_entry(user, select_for_update=False):
"""Returns the user's currently-active entry, or None."""
entries = apps.get_model('entries', 'Entry').no_join
if select_for_update:
entries = entries.select_for_update()
entries = entries.filter(user=user, end_time__isnull=True)
if not entries.exists():
return None
if entries.count() > 1:
raise ActiveEntryError('Only one active entry is allowed.')
return entries[0] | Returns the user's currently-active entry, or None. | Below is the the instruction that describes the task:
### Input:
Returns the user's currently-active entry, or None.
### Response:
def get_active_entry(user, select_for_update=False):
"""Returns the user's currently-active entry, or None."""
entries = apps.get_model('entries', 'Entry').no_join
if select_for_update:
entries = entries.select_for_update()
entries = entries.filter(user=user, end_time__isnull=True)
if not entries.exists():
return None
if entries.count() > 1:
raise ActiveEntryError('Only one active entry is allowed.')
return entries[0] |
def get_str(self, key, default=None):
"""
Args:
key (str | unicode | None): Key to lookup
default (str | unicode | None): Default to use if key is not configured
Returns:
(str | None): Value of key, if defined
"""
if key:
for provider in self.providers:
value = provider.get_str(key)
if value is not None:
return value
return default | Args:
key (str | unicode | None): Key to lookup
default (str | unicode | None): Default to use if key is not configured
Returns:
(str | None): Value of key, if defined | Below is the the instruction that describes the task:
### Input:
Args:
key (str | unicode | None): Key to lookup
default (str | unicode | None): Default to use if key is not configured
Returns:
(str | None): Value of key, if defined
### Response:
def get_str(self, key, default=None):
"""
Args:
key (str | unicode | None): Key to lookup
default (str | unicode | None): Default to use if key is not configured
Returns:
(str | None): Value of key, if defined
"""
if key:
for provider in self.providers:
value = provider.get_str(key)
if value is not None:
return value
return default |
def pmt_angles(self):
"""A list of PMT directions sorted by PMT channel, on DU-1, floor-1"""
if self._pmt_angles == []:
mask = (self.pmts.du == 1) & (self.pmts.floor == 1)
self._pmt_angles = self.pmts.dir[mask]
return self._pmt_angles | A list of PMT directions sorted by PMT channel, on DU-1, floor-1 | Below is the the instruction that describes the task:
### Input:
A list of PMT directions sorted by PMT channel, on DU-1, floor-1
### Response:
def pmt_angles(self):
"""A list of PMT directions sorted by PMT channel, on DU-1, floor-1"""
if self._pmt_angles == []:
mask = (self.pmts.du == 1) & (self.pmts.floor == 1)
self._pmt_angles = self.pmts.dir[mask]
return self._pmt_angles |
def get_error(self, block=False, timeout=None):
"""Removes and returns an error from self._errors
Args:
block(bool): if True block until a RTMMessage is available,
else it will return None when self._inbox is empty
timeout(int): it blocks at most timeout seconds
Returns:
error if inbox is not empty, else None
"""
try:
error = self._errors.get(block=block, timeout=timeout)
return error
except Exception:
return None | Removes and returns an error from self._errors
Args:
block(bool): if True block until a RTMMessage is available,
else it will return None when self._inbox is empty
timeout(int): it blocks at most timeout seconds
Returns:
error if inbox is not empty, else None | Below is the the instruction that describes the task:
### Input:
Removes and returns an error from self._errors
Args:
block(bool): if True block until a RTMMessage is available,
else it will return None when self._inbox is empty
timeout(int): it blocks at most timeout seconds
Returns:
error if inbox is not empty, else None
### Response:
def get_error(self, block=False, timeout=None):
"""Removes and returns an error from self._errors
Args:
block(bool): if True block until a RTMMessage is available,
else it will return None when self._inbox is empty
timeout(int): it blocks at most timeout seconds
Returns:
error if inbox is not empty, else None
"""
try:
error = self._errors.get(block=block, timeout=timeout)
return error
except Exception:
return None |
def is_published(self):
"""Check fields 980 and 773 to see if the record has already been published.
:return: True is published, else False
"""
field980 = record_get_field_instances(self.record, '980')
field773 = record_get_field_instances(self.record, '773')
for f980 in field980:
if 'a' in field_get_subfields(f980):
for f773 in field773:
if 'p' in field_get_subfields(f773):
return True
return False | Check fields 980 and 773 to see if the record has already been published.
:return: True is published, else False | Below is the the instruction that describes the task:
### Input:
Check fields 980 and 773 to see if the record has already been published.
:return: True is published, else False
### Response:
def is_published(self):
"""Check fields 980 and 773 to see if the record has already been published.
:return: True is published, else False
"""
field980 = record_get_field_instances(self.record, '980')
field773 = record_get_field_instances(self.record, '773')
for f980 in field980:
if 'a' in field_get_subfields(f980):
for f773 in field773:
if 'p' in field_get_subfields(f773):
return True
return False |
def detect_number_of_cores():
"""
Detects the number of cores on a system. Cribbed from pp.
"""
# Linux, Unix and MacOS:
if hasattr(os, "sysconf"):
if "SC_NPROCESSORS_ONLN" in os.sysconf_names:
# Linux & Unix:
ncpus = os.sysconf("SC_NPROCESSORS_ONLN")
if isinstance(ncpus, int) and ncpus > 0:
return ncpus
else: # OSX:
return int(subprocess.check_output(["sysctl", "-n", "hw.ncpu"]))
# Windows:
try:
ncpus = int(os.environ.get("NUMBER_OF_PROCESSORS", ""))
if ncpus > 0:
return ncpus
except ValueError:
pass
return 1 | Detects the number of cores on a system. Cribbed from pp. | Below is the the instruction that describes the task:
### Input:
Detects the number of cores on a system. Cribbed from pp.
### Response:
def detect_number_of_cores():
"""
Detects the number of cores on a system. Cribbed from pp.
"""
# Linux, Unix and MacOS:
if hasattr(os, "sysconf"):
if "SC_NPROCESSORS_ONLN" in os.sysconf_names:
# Linux & Unix:
ncpus = os.sysconf("SC_NPROCESSORS_ONLN")
if isinstance(ncpus, int) and ncpus > 0:
return ncpus
else: # OSX:
return int(subprocess.check_output(["sysctl", "-n", "hw.ncpu"]))
# Windows:
try:
ncpus = int(os.environ.get("NUMBER_OF_PROCESSORS", ""))
if ncpus > 0:
return ncpus
except ValueError:
pass
return 1 |
def gen_primes():
""" Generate an infinite sequence of prime numbers.
"""
D = {}
q = 2
while True:
if q not in D:
yield q
D[q * q] = [q]
else:
for p in D[q]:
D.setdefault(p + q, []).append(p)
del D[q]
q += 1 | Generate an infinite sequence of prime numbers. | Below is the the instruction that describes the task:
### Input:
Generate an infinite sequence of prime numbers.
### Response:
def gen_primes():
""" Generate an infinite sequence of prime numbers.
"""
D = {}
q = 2
while True:
if q not in D:
yield q
D[q * q] = [q]
else:
for p in D[q]:
D.setdefault(p + q, []).append(p)
del D[q]
q += 1 |
def p_labelled_statement(self, p):
"""labelled_statement : identifier COLON statement"""
p[0] = ast.Label(identifier=p[1], statement=p[3]) | labelled_statement : identifier COLON statement | Below is the the instruction that describes the task:
### Input:
labelled_statement : identifier COLON statement
### Response:
def p_labelled_statement(self, p):
"""labelled_statement : identifier COLON statement"""
p[0] = ast.Label(identifier=p[1], statement=p[3]) |
def padding(s, bs=AES.block_size):
"""Fills a bytes-like object with arbitrary symbols to make its length divisible by `bs`.
"""
s = to_bytes(s)
if len(s) % bs == 0:
res = s + b''.join(map(to_bytes, [random.SystemRandom().choice(string.ascii_lowercase + string.digits) for _ in range(bs - 1)])) + to_bytes(chr(96 - bs))
elif len(s) % bs > 0 and len(s) > bs:
res = s + b''.join(map(to_bytes, [random.SystemRandom().choice(string.ascii_lowercase + string.digits) for _ in range(bs - len(s) % bs - 1)])) + to_bytes(chr(96 + len(s) % bs - bs))
else:
res = s + b''.join(map(to_bytes, [random.SystemRandom().choice(string.ascii_lowercase + string.digits) for _ in range(bs - len(s) - 1)])) + to_bytes(chr(96 + len(s) - bs))
return res | Fills a bytes-like object with arbitrary symbols to make its length divisible by `bs`. | Below is the the instruction that describes the task:
### Input:
Fills a bytes-like object with arbitrary symbols to make its length divisible by `bs`.
### Response:
def padding(s, bs=AES.block_size):
"""Fills a bytes-like object with arbitrary symbols to make its length divisible by `bs`.
"""
s = to_bytes(s)
if len(s) % bs == 0:
res = s + b''.join(map(to_bytes, [random.SystemRandom().choice(string.ascii_lowercase + string.digits) for _ in range(bs - 1)])) + to_bytes(chr(96 - bs))
elif len(s) % bs > 0 and len(s) > bs:
res = s + b''.join(map(to_bytes, [random.SystemRandom().choice(string.ascii_lowercase + string.digits) for _ in range(bs - len(s) % bs - 1)])) + to_bytes(chr(96 + len(s) % bs - bs))
else:
res = s + b''.join(map(to_bytes, [random.SystemRandom().choice(string.ascii_lowercase + string.digits) for _ in range(bs - len(s) - 1)])) + to_bytes(chr(96 + len(s) - bs))
return res |
def describe_parameter_group(name, Filters=None, MaxRecords=None, Marker=None,
region=None, key=None, keyid=None, profile=None):
'''
Returns a list of `DBParameterGroup` descriptions.
CLI example to description of parameter group::
salt myminion boto_rds.describe_parameter_group parametergroupname\
region=us-east-1
'''
res = __salt__['boto_rds.parameter_group_exists'](name, tags=None,
region=region, key=key,
keyid=keyid,
profile=profile)
if not res.get('exists'):
return {'exists': bool(res)}
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if not conn:
return {'results': bool(conn)}
kwargs = {}
for key in ('Marker', 'Filters'):
if locals()[key] is not None:
kwargs[key] = str(locals()[key]) # future lint: disable=blacklisted-function
if locals()['MaxRecords'] is not None:
kwargs['MaxRecords'] = int(locals()['MaxRecords'])
info = conn.describe_db_parameter_groups(DBParameterGroupName=name,
**kwargs)
if not info:
return {'results': bool(info), 'message':
'Failed to get RDS description for group {0}.'.format(name)}
return {'results': bool(info), 'message':
'Got RDS descrition for group {0}.'.format(name)}
except ClientError as e:
return {'error': __utils__['boto3.get_error'](e)} | Returns a list of `DBParameterGroup` descriptions.
CLI example to description of parameter group::
salt myminion boto_rds.describe_parameter_group parametergroupname\
region=us-east-1 | Below is the the instruction that describes the task:
### Input:
Returns a list of `DBParameterGroup` descriptions.
CLI example to description of parameter group::
salt myminion boto_rds.describe_parameter_group parametergroupname\
region=us-east-1
### Response:
def describe_parameter_group(name, Filters=None, MaxRecords=None, Marker=None,
region=None, key=None, keyid=None, profile=None):
'''
Returns a list of `DBParameterGroup` descriptions.
CLI example to description of parameter group::
salt myminion boto_rds.describe_parameter_group parametergroupname\
region=us-east-1
'''
res = __salt__['boto_rds.parameter_group_exists'](name, tags=None,
region=region, key=key,
keyid=keyid,
profile=profile)
if not res.get('exists'):
return {'exists': bool(res)}
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if not conn:
return {'results': bool(conn)}
kwargs = {}
for key in ('Marker', 'Filters'):
if locals()[key] is not None:
kwargs[key] = str(locals()[key]) # future lint: disable=blacklisted-function
if locals()['MaxRecords'] is not None:
kwargs['MaxRecords'] = int(locals()['MaxRecords'])
info = conn.describe_db_parameter_groups(DBParameterGroupName=name,
**kwargs)
if not info:
return {'results': bool(info), 'message':
'Failed to get RDS description for group {0}.'.format(name)}
return {'results': bool(info), 'message':
'Got RDS descrition for group {0}.'.format(name)}
except ClientError as e:
return {'error': __utils__['boto3.get_error'](e)} |
def get_upload_path(self, filename):
''' Override this in proxy subclass to customize upload path.
Default upload path is
:file:`/media/images/<user.id>/<filename>.<ext>`
or :file:`/media/images/common/<filename>.<ext>` if user is not set.
``<filename>`` is returned by
:meth:`~generic_images.models.AbstractAttachedImage.get_file_name`
method. By default it is probable id of new image (it is
predicted as it is unknown at this stage).
'''
user_folder = str(self.user.pk) if self.user else 'common'
root, ext = os.path.splitext(filename)
return os.path.join('media', 'images', user_folder,
self.get_file_name(filename) + ext) | Override this in proxy subclass to customize upload path.
Default upload path is
:file:`/media/images/<user.id>/<filename>.<ext>`
or :file:`/media/images/common/<filename>.<ext>` if user is not set.
``<filename>`` is returned by
:meth:`~generic_images.models.AbstractAttachedImage.get_file_name`
method. By default it is probable id of new image (it is
predicted as it is unknown at this stage). | Below is the the instruction that describes the task:
### Input:
Override this in proxy subclass to customize upload path.
Default upload path is
:file:`/media/images/<user.id>/<filename>.<ext>`
or :file:`/media/images/common/<filename>.<ext>` if user is not set.
``<filename>`` is returned by
:meth:`~generic_images.models.AbstractAttachedImage.get_file_name`
method. By default it is probable id of new image (it is
predicted as it is unknown at this stage).
### Response:
def get_upload_path(self, filename):
''' Override this in proxy subclass to customize upload path.
Default upload path is
:file:`/media/images/<user.id>/<filename>.<ext>`
or :file:`/media/images/common/<filename>.<ext>` if user is not set.
``<filename>`` is returned by
:meth:`~generic_images.models.AbstractAttachedImage.get_file_name`
method. By default it is probable id of new image (it is
predicted as it is unknown at this stage).
'''
user_folder = str(self.user.pk) if self.user else 'common'
root, ext = os.path.splitext(filename)
return os.path.join('media', 'images', user_folder,
self.get_file_name(filename) + ext) |
def _check_link_completion(self, link, fail_pending=False, fail_running=False):
"""Internal function to check the completion of all the dispatched jobs
Returns
-------
status_vect : `JobStatusVector`
Vector that summarize the number of jobs in various states.
"""
status_vect = JobStatusVector()
for job_key, job_details in link.jobs.items():
# if job_details.status == JobStatus.failed:
# failed = True
# continue
# elif job_details.status == JobStatus.done:
# continue
if job_key.find(JobDetails.topkey) >= 0:
continue
job_details.status = self._interface.check_job(job_details)
if job_details.status == JobStatus.pending:
if fail_pending:
job_details.status = JobStatus.failed
elif job_details.status == JobStatus.running:
if fail_running:
job_details.status = JobStatus.failed
status_vect[job_details.status] += 1
link.jobs[job_key] = job_details
link._set_status_self(job_details.jobkey, job_details.status)
return status_vect | Internal function to check the completion of all the dispatched jobs
Returns
-------
status_vect : `JobStatusVector`
Vector that summarize the number of jobs in various states. | Below is the the instruction that describes the task:
### Input:
Internal function to check the completion of all the dispatched jobs
Returns
-------
status_vect : `JobStatusVector`
Vector that summarize the number of jobs in various states.
### Response:
def _check_link_completion(self, link, fail_pending=False, fail_running=False):
"""Internal function to check the completion of all the dispatched jobs
Returns
-------
status_vect : `JobStatusVector`
Vector that summarize the number of jobs in various states.
"""
status_vect = JobStatusVector()
for job_key, job_details in link.jobs.items():
# if job_details.status == JobStatus.failed:
# failed = True
# continue
# elif job_details.status == JobStatus.done:
# continue
if job_key.find(JobDetails.topkey) >= 0:
continue
job_details.status = self._interface.check_job(job_details)
if job_details.status == JobStatus.pending:
if fail_pending:
job_details.status = JobStatus.failed
elif job_details.status == JobStatus.running:
if fail_running:
job_details.status = JobStatus.failed
status_vect[job_details.status] += 1
link.jobs[job_key] = job_details
link._set_status_self(job_details.jobkey, job_details.status)
return status_vect |
def normalize_file(file, separators=None):
"""
Normalizes the file path to use the POSIX path separator (i.e., ``'/'``).
*file* (:class:`str`) is the file path.
*separators* (:class:`~collections.abc.Collection` of :class:`str`; or
:data:`None`) optionally contains the path separators to normalize.
This does not need to include the POSIX path separator (``'/'``), but
including it will not affect the results. Default is :data:`None` for
:data:`NORMALIZE_PATH_SEPS`. To prevent normalization, pass an empty
container (e.g., an empty tuple ``()``).
Returns the normalized file path (:class:`str`).
"""
# Normalize path separators.
if separators is None:
separators = NORMALIZE_PATH_SEPS
norm_file = file
for sep in separators:
norm_file = norm_file.replace(sep, posixpath.sep)
# Remove current directory prefix.
if norm_file.startswith('./'):
norm_file = norm_file[2:]
return norm_file | Normalizes the file path to use the POSIX path separator (i.e., ``'/'``).
*file* (:class:`str`) is the file path.
*separators* (:class:`~collections.abc.Collection` of :class:`str`; or
:data:`None`) optionally contains the path separators to normalize.
This does not need to include the POSIX path separator (``'/'``), but
including it will not affect the results. Default is :data:`None` for
:data:`NORMALIZE_PATH_SEPS`. To prevent normalization, pass an empty
container (e.g., an empty tuple ``()``).
Returns the normalized file path (:class:`str`). | Below is the the instruction that describes the task:
### Input:
Normalizes the file path to use the POSIX path separator (i.e., ``'/'``).
*file* (:class:`str`) is the file path.
*separators* (:class:`~collections.abc.Collection` of :class:`str`; or
:data:`None`) optionally contains the path separators to normalize.
This does not need to include the POSIX path separator (``'/'``), but
including it will not affect the results. Default is :data:`None` for
:data:`NORMALIZE_PATH_SEPS`. To prevent normalization, pass an empty
container (e.g., an empty tuple ``()``).
Returns the normalized file path (:class:`str`).
### Response:
def normalize_file(file, separators=None):
"""
Normalizes the file path to use the POSIX path separator (i.e., ``'/'``).
*file* (:class:`str`) is the file path.
*separators* (:class:`~collections.abc.Collection` of :class:`str`; or
:data:`None`) optionally contains the path separators to normalize.
This does not need to include the POSIX path separator (``'/'``), but
including it will not affect the results. Default is :data:`None` for
:data:`NORMALIZE_PATH_SEPS`. To prevent normalization, pass an empty
container (e.g., an empty tuple ``()``).
Returns the normalized file path (:class:`str`).
"""
# Normalize path separators.
if separators is None:
separators = NORMALIZE_PATH_SEPS
norm_file = file
for sep in separators:
norm_file = norm_file.replace(sep, posixpath.sep)
# Remove current directory prefix.
if norm_file.startswith('./'):
norm_file = norm_file[2:]
return norm_file |
def set_weights(self, weights_values: dict, ignore_missing=False):
"""
Sets the weights values of the network.
:param weights_values: dictionary with weights for each layer
"""
network_name = self.__class__.__name__.lower()
with tf.variable_scope(network_name):
for layer_name in weights_values:
with tf.variable_scope(layer_name, reuse=True):
for param_name, data in weights_values[layer_name].items():
try:
var = tf.get_variable(param_name)
self._session.run(var.assign(data))
except ValueError:
if not ignore_missing:
raise | Sets the weights values of the network.
:param weights_values: dictionary with weights for each layer | Below is the the instruction that describes the task:
### Input:
Sets the weights values of the network.
:param weights_values: dictionary with weights for each layer
### Response:
def set_weights(self, weights_values: dict, ignore_missing=False):
"""
Sets the weights values of the network.
:param weights_values: dictionary with weights for each layer
"""
network_name = self.__class__.__name__.lower()
with tf.variable_scope(network_name):
for layer_name in weights_values:
with tf.variable_scope(layer_name, reuse=True):
for param_name, data in weights_values[layer_name].items():
try:
var = tf.get_variable(param_name)
self._session.run(var.assign(data))
except ValueError:
if not ignore_missing:
raise |
def get_table_acl(self, table_name, timeout=None):
'''
Returns details about any stored access policies specified on the
table that may be used with Shared Access Signatures.
:param str table_name:
The name of an existing table.
:param int timeout:
The server timeout, expressed in seconds.
:return: A dictionary of access policies associated with the table.
:rtype: dict(str, :class:`~azure.storage.common.models.AccessPolicy`)
'''
_validate_not_none('table_name', table_name)
request = HTTPRequest()
request.method = 'GET'
request.host_locations = self._get_host_locations(secondary=True)
request.path = '/' + _to_str(table_name)
request.query = {
'comp': 'acl',
'timeout': _int_to_str(timeout),
}
return self._perform_request(request, _convert_xml_to_signed_identifiers) | Returns details about any stored access policies specified on the
table that may be used with Shared Access Signatures.
:param str table_name:
The name of an existing table.
:param int timeout:
The server timeout, expressed in seconds.
:return: A dictionary of access policies associated with the table.
:rtype: dict(str, :class:`~azure.storage.common.models.AccessPolicy`) | Below is the the instruction that describes the task:
### Input:
Returns details about any stored access policies specified on the
table that may be used with Shared Access Signatures.
:param str table_name:
The name of an existing table.
:param int timeout:
The server timeout, expressed in seconds.
:return: A dictionary of access policies associated with the table.
:rtype: dict(str, :class:`~azure.storage.common.models.AccessPolicy`)
### Response:
def get_table_acl(self, table_name, timeout=None):
'''
Returns details about any stored access policies specified on the
table that may be used with Shared Access Signatures.
:param str table_name:
The name of an existing table.
:param int timeout:
The server timeout, expressed in seconds.
:return: A dictionary of access policies associated with the table.
:rtype: dict(str, :class:`~azure.storage.common.models.AccessPolicy`)
'''
_validate_not_none('table_name', table_name)
request = HTTPRequest()
request.method = 'GET'
request.host_locations = self._get_host_locations(secondary=True)
request.path = '/' + _to_str(table_name)
request.query = {
'comp': 'acl',
'timeout': _int_to_str(timeout),
}
return self._perform_request(request, _convert_xml_to_signed_identifiers) |
def begin(self):
"""Called once before using the session to check global step."""
self._global_step_tensor = tf.train.get_global_step()
if self._global_step_tensor is None:
raise RuntimeError(
'Global step should be created to use StepCounterHook.') | Called once before using the session to check global step. | Below is the the instruction that describes the task:
### Input:
Called once before using the session to check global step.
### Response:
def begin(self):
"""Called once before using the session to check global step."""
self._global_step_tensor = tf.train.get_global_step()
if self._global_step_tensor is None:
raise RuntimeError(
'Global step should be created to use StepCounterHook.') |
def _create_put_request(self, resource, billomat_id, command=None, send_data=None):
"""
Creates a put request and return the response data
"""
assert (isinstance(resource, str))
if isinstance(billomat_id, int):
billomat_id = str(billomat_id)
if not command:
command = ''
else:
command = '/' + command
response = self.session.put(
url=self.api_url + resource + '/' + billomat_id + command,
data=json.dumps(send_data),
)
return self._handle_response(response) | Creates a put request and return the response data | Below is the the instruction that describes the task:
### Input:
Creates a put request and return the response data
### Response:
def _create_put_request(self, resource, billomat_id, command=None, send_data=None):
"""
Creates a put request and return the response data
"""
assert (isinstance(resource, str))
if isinstance(billomat_id, int):
billomat_id = str(billomat_id)
if not command:
command = ''
else:
command = '/' + command
response = self.session.put(
url=self.api_url + resource + '/' + billomat_id + command,
data=json.dumps(send_data),
)
return self._handle_response(response) |
def filter_jobs(sacct_jobs, failed=True):
"""Filter jobs that have a FAILED etc. status."""
categories = FAILED_CATEGORIES if failed else NORMAL_CATEGORIES
filtered_jobs = [job for job in sacct_jobs if job['state'] in categories]
return filtered_jobs | Filter jobs that have a FAILED etc. status. | Below is the the instruction that describes the task:
### Input:
Filter jobs that have a FAILED etc. status.
### Response:
def filter_jobs(sacct_jobs, failed=True):
"""Filter jobs that have a FAILED etc. status."""
categories = FAILED_CATEGORIES if failed else NORMAL_CATEGORIES
filtered_jobs = [job for job in sacct_jobs if job['state'] in categories]
return filtered_jobs |
def get_config_map(self, name):
"""
Get a ConfigMap object from the server
Raises exception on error
:param name: str, name of configMap to get from the server
:returns: ConfigMapResponse containing the ConfigMap with the requested name
"""
response = self.os.get_config_map(name)
config_map_response = ConfigMapResponse(response.json())
return config_map_response | Get a ConfigMap object from the server
Raises exception on error
:param name: str, name of configMap to get from the server
:returns: ConfigMapResponse containing the ConfigMap with the requested name | Below is the the instruction that describes the task:
### Input:
Get a ConfigMap object from the server
Raises exception on error
:param name: str, name of configMap to get from the server
:returns: ConfigMapResponse containing the ConfigMap with the requested name
### Response:
def get_config_map(self, name):
"""
Get a ConfigMap object from the server
Raises exception on error
:param name: str, name of configMap to get from the server
:returns: ConfigMapResponse containing the ConfigMap with the requested name
"""
response = self.os.get_config_map(name)
config_map_response = ConfigMapResponse(response.json())
return config_map_response |
def timezone_at(self, *, lng, lat):
"""
this function looks up in which polygons the point could be included in
to speed things up there are shortcuts being used (stored in a binary file)
especially for large polygons it is expensive to check if a point is really included,
so certain simplifications are made and even when you get a hit the point might actually
not be inside the polygon (for example when there is only one timezone nearby)
if you want to make sure a point is really inside a timezone use 'certain_timezone_at'
:param lng: longitude of the point in degree (-180 to 180)
:param lat: latitude in degree (90 to -90)
:return: the timezone name of a matching polygon or None
"""
lng, lat = rectify_coordinates(lng, lat)
# x = longitude y = latitude both converted to 8byte int
x = coord2int(lng)
y = coord2int(lat)
shortcut_id_x, shortcut_id_y = coord2shortcut(lng, lat)
self.shortcuts_unique_id.seek(
(180 * NR_SHORTCUTS_PER_LAT * NR_BYTES_H * shortcut_id_x + NR_BYTES_H * shortcut_id_y))
try:
# if there is just one possible zone in this shortcut instantly return its name
return timezone_names[unpack(DTYPE_FORMAT_H, self.shortcuts_unique_id.read(NR_BYTES_H))[0]]
except IndexError:
possible_polygons = self.polygon_ids_of_shortcut(shortcut_id_x, shortcut_id_y)
nr_possible_polygons = len(possible_polygons)
if nr_possible_polygons == 0:
return None
if nr_possible_polygons == 1:
# there is only one polygon in that area. return its timezone name without further checks
return timezone_names[self.id_of(possible_polygons[0])]
# create a list of all the timezone ids of all possible polygons
ids = self.id_list(possible_polygons, nr_possible_polygons)
# check until the point is included in one of the possible polygons
for i in range(nr_possible_polygons):
# when including the current polygon only polygons from the same zone remain,
same_element = all_the_same(pointer=i, length=nr_possible_polygons, id_list=ids)
if same_element != -1:
# return the name of that zone
return timezone_names[same_element]
polygon_nr = possible_polygons[i]
# get the boundaries of the polygon = (lng_max, lng_min, lat_max, lat_min)
self.poly_max_values.seek(4 * NR_BYTES_I * polygon_nr)
boundaries = self.fromfile(self.poly_max_values, dtype=DTYPE_FORMAT_SIGNED_I_NUMPY, count=4)
# only run the expensive algorithm if the point is withing the boundaries
if not (x > boundaries[0] or x < boundaries[1] or y > boundaries[2] or y < boundaries[3]):
outside_all_holes = True
# when the point is within a hole of the polygon, this timezone must not be returned
for hole_coordinates in self._holes_of_line(polygon_nr):
if inside_polygon(x, y, hole_coordinates):
outside_all_holes = False
break
if outside_all_holes:
if inside_polygon(x, y, self.coords_of(line=polygon_nr)):
# the point is included in this polygon. return its timezone name without further checks
return timezone_names[ids[i]]
# the timezone name of the last polygon should always be returned
# if no other polygon has been matched beforehand.
raise ValueError('BUG: this statement should never be reached. Please open up an issue on Github!') | this function looks up in which polygons the point could be included in
to speed things up there are shortcuts being used (stored in a binary file)
especially for large polygons it is expensive to check if a point is really included,
so certain simplifications are made and even when you get a hit the point might actually
not be inside the polygon (for example when there is only one timezone nearby)
if you want to make sure a point is really inside a timezone use 'certain_timezone_at'
:param lng: longitude of the point in degree (-180 to 180)
:param lat: latitude in degree (90 to -90)
:return: the timezone name of a matching polygon or None | Below is the the instruction that describes the task:
### Input:
this function looks up in which polygons the point could be included in
to speed things up there are shortcuts being used (stored in a binary file)
especially for large polygons it is expensive to check if a point is really included,
so certain simplifications are made and even when you get a hit the point might actually
not be inside the polygon (for example when there is only one timezone nearby)
if you want to make sure a point is really inside a timezone use 'certain_timezone_at'
:param lng: longitude of the point in degree (-180 to 180)
:param lat: latitude in degree (90 to -90)
:return: the timezone name of a matching polygon or None
### Response:
def timezone_at(self, *, lng, lat):
"""
this function looks up in which polygons the point could be included in
to speed things up there are shortcuts being used (stored in a binary file)
especially for large polygons it is expensive to check if a point is really included,
so certain simplifications are made and even when you get a hit the point might actually
not be inside the polygon (for example when there is only one timezone nearby)
if you want to make sure a point is really inside a timezone use 'certain_timezone_at'
:param lng: longitude of the point in degree (-180 to 180)
:param lat: latitude in degree (90 to -90)
:return: the timezone name of a matching polygon or None
"""
lng, lat = rectify_coordinates(lng, lat)
# x = longitude y = latitude both converted to 8byte int
x = coord2int(lng)
y = coord2int(lat)
shortcut_id_x, shortcut_id_y = coord2shortcut(lng, lat)
self.shortcuts_unique_id.seek(
(180 * NR_SHORTCUTS_PER_LAT * NR_BYTES_H * shortcut_id_x + NR_BYTES_H * shortcut_id_y))
try:
# if there is just one possible zone in this shortcut instantly return its name
return timezone_names[unpack(DTYPE_FORMAT_H, self.shortcuts_unique_id.read(NR_BYTES_H))[0]]
except IndexError:
possible_polygons = self.polygon_ids_of_shortcut(shortcut_id_x, shortcut_id_y)
nr_possible_polygons = len(possible_polygons)
if nr_possible_polygons == 0:
return None
if nr_possible_polygons == 1:
# there is only one polygon in that area. return its timezone name without further checks
return timezone_names[self.id_of(possible_polygons[0])]
# create a list of all the timezone ids of all possible polygons
ids = self.id_list(possible_polygons, nr_possible_polygons)
# check until the point is included in one of the possible polygons
for i in range(nr_possible_polygons):
# when including the current polygon only polygons from the same zone remain,
same_element = all_the_same(pointer=i, length=nr_possible_polygons, id_list=ids)
if same_element != -1:
# return the name of that zone
return timezone_names[same_element]
polygon_nr = possible_polygons[i]
# get the boundaries of the polygon = (lng_max, lng_min, lat_max, lat_min)
self.poly_max_values.seek(4 * NR_BYTES_I * polygon_nr)
boundaries = self.fromfile(self.poly_max_values, dtype=DTYPE_FORMAT_SIGNED_I_NUMPY, count=4)
# only run the expensive algorithm if the point is withing the boundaries
if not (x > boundaries[0] or x < boundaries[1] or y > boundaries[2] or y < boundaries[3]):
outside_all_holes = True
# when the point is within a hole of the polygon, this timezone must not be returned
for hole_coordinates in self._holes_of_line(polygon_nr):
if inside_polygon(x, y, hole_coordinates):
outside_all_holes = False
break
if outside_all_holes:
if inside_polygon(x, y, self.coords_of(line=polygon_nr)):
# the point is included in this polygon. return its timezone name without further checks
return timezone_names[ids[i]]
# the timezone name of the last polygon should always be returned
# if no other polygon has been matched beforehand.
raise ValueError('BUG: this statement should never be reached. Please open up an issue on Github!') |
def set_filters(self, filters):
"""
set and validate filters dict
"""
if not isinstance(filters, dict):
raise Exception("filters must be a dict")
self.filters = {}
for key in filters.keys():
value = filters[key]
self.add_filter(key,value) | set and validate filters dict | Below is the the instruction that describes the task:
### Input:
set and validate filters dict
### Response:
def set_filters(self, filters):
"""
set and validate filters dict
"""
if not isinstance(filters, dict):
raise Exception("filters must be a dict")
self.filters = {}
for key in filters.keys():
value = filters[key]
self.add_filter(key,value) |
def set_value(self, value):
"""Set the value associated with the keyword"""
if not isinstance(value, str):
raise TypeError("A value must be a string, got %s." % value)
self.__value = value | Set the value associated with the keyword | Below is the the instruction that describes the task:
### Input:
Set the value associated with the keyword
### Response:
def set_value(self, value):
"""Set the value associated with the keyword"""
if not isinstance(value, str):
raise TypeError("A value must be a string, got %s." % value)
self.__value = value |
def get_stats(self, stat_name):
"""
:param stat_name: requested statistics name.
:returns: all values of the requested statistic for all objects.
"""
return [self.get_stat(r, stat_name) for r in self.statistics.keys()] | :param stat_name: requested statistics name.
:returns: all values of the requested statistic for all objects. | Below is the the instruction that describes the task:
### Input:
:param stat_name: requested statistics name.
:returns: all values of the requested statistic for all objects.
### Response:
def get_stats(self, stat_name):
"""
:param stat_name: requested statistics name.
:returns: all values of the requested statistic for all objects.
"""
return [self.get_stat(r, stat_name) for r in self.statistics.keys()] |
def view(self):
"""View slpkg config file
"""
print("") # new line at start
conf_args = [
"RELEASE",
"SLACKWARE_VERSION",
"COMP_ARCH",
"BUILD_PATH",
"PACKAGES",
"PATCHES",
"CHECKMD5",
"DEL_ALL",
"DEL_BUILD",
"SBO_BUILD_LOG",
"MAKEFLAGS",
"DEFAULT_ANSWER",
"REMOVE_DEPS_ANSWER",
"SKIP_UNST",
"RSL_DEPS",
"DEL_DEPS",
"USE_COLORS",
"DOWNDER",
"DOWNDER_OPTIONS",
"SLACKPKG_LOG",
"ONLY_INSTALLED",
"PRG_BAR",
"EDITOR",
"NOT_DOWNGRADE"
]
read_conf = Utils().read_file(self.config_file)
for line in read_conf.splitlines():
if not line.startswith("#") and line.split("=")[0] in conf_args:
print("{0}".format(line))
else:
print("{0}{1}{2}".format(self.meta.color["CYAN"], line,
self.meta.color["ENDC"]))
print("") | View slpkg config file | Below is the the instruction that describes the task:
### Input:
View slpkg config file
### Response:
def view(self):
"""View slpkg config file
"""
print("") # new line at start
conf_args = [
"RELEASE",
"SLACKWARE_VERSION",
"COMP_ARCH",
"BUILD_PATH",
"PACKAGES",
"PATCHES",
"CHECKMD5",
"DEL_ALL",
"DEL_BUILD",
"SBO_BUILD_LOG",
"MAKEFLAGS",
"DEFAULT_ANSWER",
"REMOVE_DEPS_ANSWER",
"SKIP_UNST",
"RSL_DEPS",
"DEL_DEPS",
"USE_COLORS",
"DOWNDER",
"DOWNDER_OPTIONS",
"SLACKPKG_LOG",
"ONLY_INSTALLED",
"PRG_BAR",
"EDITOR",
"NOT_DOWNGRADE"
]
read_conf = Utils().read_file(self.config_file)
for line in read_conf.splitlines():
if not line.startswith("#") and line.split("=")[0] in conf_args:
print("{0}".format(line))
else:
print("{0}{1}{2}".format(self.meta.color["CYAN"], line,
self.meta.color["ENDC"]))
print("") |
def clean_promoted_guids(raw_promoted_guids):
""" Verify that the promoted GUIDs are formatted correctly,
otherwise strip it down into an empty list.
"""
valid = True
for row in raw_promoted_guids:
if len(row) != 2:
valid = False
break
if not (
(isinstance(row[0], str) or isinstance(row[0], unicode))
and (isinstance(row[1], int) or isinstance(row[1], float)) # noqa
):
valid = False
break
if valid:
return raw_promoted_guids
return [] | Verify that the promoted GUIDs are formatted correctly,
otherwise strip it down into an empty list. | Below is the the instruction that describes the task:
### Input:
Verify that the promoted GUIDs are formatted correctly,
otherwise strip it down into an empty list.
### Response:
def clean_promoted_guids(raw_promoted_guids):
""" Verify that the promoted GUIDs are formatted correctly,
otherwise strip it down into an empty list.
"""
valid = True
for row in raw_promoted_guids:
if len(row) != 2:
valid = False
break
if not (
(isinstance(row[0], str) or isinstance(row[0], unicode))
and (isinstance(row[1], int) or isinstance(row[1], float)) # noqa
):
valid = False
break
if valid:
return raw_promoted_guids
return [] |
def delete_activity(self, id_num):
"""Delete an activity (run).
:param id_num: The activity ID to delete
"""
url = self._build_url('my', 'activities', id_num)
r = self.session.delete(url)
r.raise_for_status()
return r | Delete an activity (run).
:param id_num: The activity ID to delete | Below is the the instruction that describes the task:
### Input:
Delete an activity (run).
:param id_num: The activity ID to delete
### Response:
def delete_activity(self, id_num):
"""Delete an activity (run).
:param id_num: The activity ID to delete
"""
url = self._build_url('my', 'activities', id_num)
r = self.session.delete(url)
r.raise_for_status()
return r |
def _fill_queue(self, loglstar):
"""Sequentially add new live point proposals to the queue."""
# Add/zip arguments to submit to the queue.
point_queue = []
axes_queue = []
while self.nqueue < self.queue_size:
if self._beyond_unit_bound(loglstar):
# Propose points using the provided sampling/bounding options.
point, axes = self.propose_point()
evolve_point = self.evolve_point
else:
# Propose/evaluate points directly from the unit cube.
point = self.rstate.rand(self.npdim)
axes = np.identity(self.npdim)
evolve_point = sample_unif
point_queue.append(point)
axes_queue.append(axes)
self.nqueue += 1
loglstars = [loglstar for i in range(self.queue_size)]
scales = [self.scale for i in range(self.queue_size)]
ptforms = [self.prior_transform for i in range(self.queue_size)]
logls = [self.loglikelihood for i in range(self.queue_size)]
kwargs = [self.kwargs for i in range(self.queue_size)]
args = zip(point_queue, loglstars, axes_queue,
scales, ptforms, logls, kwargs)
if self.use_pool_evolve:
# Use the pool to propose ("evolve") a new live point.
self.queue = list(self.M(evolve_point, args))
else:
# Propose ("evolve") a new live point using the default `map`
# function.
self.queue = list(map(evolve_point, args)) | Sequentially add new live point proposals to the queue. | Below is the the instruction that describes the task:
### Input:
Sequentially add new live point proposals to the queue.
### Response:
def _fill_queue(self, loglstar):
"""Sequentially add new live point proposals to the queue."""
# Add/zip arguments to submit to the queue.
point_queue = []
axes_queue = []
while self.nqueue < self.queue_size:
if self._beyond_unit_bound(loglstar):
# Propose points using the provided sampling/bounding options.
point, axes = self.propose_point()
evolve_point = self.evolve_point
else:
# Propose/evaluate points directly from the unit cube.
point = self.rstate.rand(self.npdim)
axes = np.identity(self.npdim)
evolve_point = sample_unif
point_queue.append(point)
axes_queue.append(axes)
self.nqueue += 1
loglstars = [loglstar for i in range(self.queue_size)]
scales = [self.scale for i in range(self.queue_size)]
ptforms = [self.prior_transform for i in range(self.queue_size)]
logls = [self.loglikelihood for i in range(self.queue_size)]
kwargs = [self.kwargs for i in range(self.queue_size)]
args = zip(point_queue, loglstars, axes_queue,
scales, ptforms, logls, kwargs)
if self.use_pool_evolve:
# Use the pool to propose ("evolve") a new live point.
self.queue = list(self.M(evolve_point, args))
else:
# Propose ("evolve") a new live point using the default `map`
# function.
self.queue = list(map(evolve_point, args)) |
def new(self, array):
"""
Convert an array of compatible length into a DictArray:
>>> d = DictArray({'PGA': [0.01, 0.02, 0.04], 'PGV': [0.1, 0.2]})
>>> d.new(numpy.arange(0, 5, 1)) # array of lenght 5 = 3 + 2
<DictArray
PGA: [0 1 2]
PGV: [3 4]>
"""
assert len(self.array) == len(array)
arr = object.__new__(self.__class__)
arr.dt = self.dt
arr.slicedic = self.slicedic
arr.array = array
return arr | Convert an array of compatible length into a DictArray:
>>> d = DictArray({'PGA': [0.01, 0.02, 0.04], 'PGV': [0.1, 0.2]})
>>> d.new(numpy.arange(0, 5, 1)) # array of lenght 5 = 3 + 2
<DictArray
PGA: [0 1 2]
PGV: [3 4]> | Below is the the instruction that describes the task:
### Input:
Convert an array of compatible length into a DictArray:
>>> d = DictArray({'PGA': [0.01, 0.02, 0.04], 'PGV': [0.1, 0.2]})
>>> d.new(numpy.arange(0, 5, 1)) # array of lenght 5 = 3 + 2
<DictArray
PGA: [0 1 2]
PGV: [3 4]>
### Response:
def new(self, array):
"""
Convert an array of compatible length into a DictArray:
>>> d = DictArray({'PGA': [0.01, 0.02, 0.04], 'PGV': [0.1, 0.2]})
>>> d.new(numpy.arange(0, 5, 1)) # array of lenght 5 = 3 + 2
<DictArray
PGA: [0 1 2]
PGV: [3 4]>
"""
assert len(self.array) == len(array)
arr = object.__new__(self.__class__)
arr.dt = self.dt
arr.slicedic = self.slicedic
arr.array = array
return arr |
def to_csv(self, filename, stimuli=None, inhibitors=None, prepend=""):
"""
Writes the list of clampings to a CSV file
Parameters
----------
filename : str
Absolute path where to write the CSV file
stimuli : Optional[list[str]]
List of stimuli names. If given, stimuli are converted to {0,1} instead of {-1,1}.
inhibitors : Optional[list[str]]
List of inhibitors names. If given, inhibitors are renamed and converted to {0,1} instead of {-1,1}.
prepend : str
Columns are renamed using the given string at the beginning
"""
self.to_dataframe(stimuli, inhibitors, prepend).to_csv(filename, index=False) | Writes the list of clampings to a CSV file
Parameters
----------
filename : str
Absolute path where to write the CSV file
stimuli : Optional[list[str]]
List of stimuli names. If given, stimuli are converted to {0,1} instead of {-1,1}.
inhibitors : Optional[list[str]]
List of inhibitors names. If given, inhibitors are renamed and converted to {0,1} instead of {-1,1}.
prepend : str
Columns are renamed using the given string at the beginning | Below is the the instruction that describes the task:
### Input:
Writes the list of clampings to a CSV file
Parameters
----------
filename : str
Absolute path where to write the CSV file
stimuli : Optional[list[str]]
List of stimuli names. If given, stimuli are converted to {0,1} instead of {-1,1}.
inhibitors : Optional[list[str]]
List of inhibitors names. If given, inhibitors are renamed and converted to {0,1} instead of {-1,1}.
prepend : str
Columns are renamed using the given string at the beginning
### Response:
def to_csv(self, filename, stimuli=None, inhibitors=None, prepend=""):
"""
Writes the list of clampings to a CSV file
Parameters
----------
filename : str
Absolute path where to write the CSV file
stimuli : Optional[list[str]]
List of stimuli names. If given, stimuli are converted to {0,1} instead of {-1,1}.
inhibitors : Optional[list[str]]
List of inhibitors names. If given, inhibitors are renamed and converted to {0,1} instead of {-1,1}.
prepend : str
Columns are renamed using the given string at the beginning
"""
self.to_dataframe(stimuli, inhibitors, prepend).to_csv(filename, index=False) |
def extract_full(rec, sites, flank, fw):
"""
Full extraction of seq flanking the sites.
"""
for s in sites:
newid = "{0}:{1}".format(rec.name, s)
left = max(s - flank, 0)
right = min(s + flank, len(rec))
frag = rec.seq[left:right].strip("Nn")
newrec = SeqRecord(frag, id=newid, description="")
SeqIO.write([newrec], fw, "fasta") | Full extraction of seq flanking the sites. | Below is the the instruction that describes the task:
### Input:
Full extraction of seq flanking the sites.
### Response:
def extract_full(rec, sites, flank, fw):
"""
Full extraction of seq flanking the sites.
"""
for s in sites:
newid = "{0}:{1}".format(rec.name, s)
left = max(s - flank, 0)
right = min(s + flank, len(rec))
frag = rec.seq[left:right].strip("Nn")
newrec = SeqRecord(frag, id=newid, description="")
SeqIO.write([newrec], fw, "fasta") |
def has_perm(self, service, perm_name, obj, call_name):
"""
Raise PermissionDenied if user has no permission in object
"""
user = service.user
if not (perm_name is False):
if not user.has_perm(perm_name, obj=obj):
LOG_PERM.warn(
u'User %s has no permission %s. Access to %s with obj=%s',
user, perm_name, call_name, obj)
raise PermissionDenied(u'User %s has no permission %s for object %s' % (service.user, perm_name, obj))
LOG_PERM.debug(
u'User %s was authorized to access %s with permission %s with obj=%s',
user, call_name, perm_name, obj) | Raise PermissionDenied if user has no permission in object | Below is the the instruction that describes the task:
### Input:
Raise PermissionDenied if user has no permission in object
### Response:
def has_perm(self, service, perm_name, obj, call_name):
"""
Raise PermissionDenied if user has no permission in object
"""
user = service.user
if not (perm_name is False):
if not user.has_perm(perm_name, obj=obj):
LOG_PERM.warn(
u'User %s has no permission %s. Access to %s with obj=%s',
user, perm_name, call_name, obj)
raise PermissionDenied(u'User %s has no permission %s for object %s' % (service.user, perm_name, obj))
LOG_PERM.debug(
u'User %s was authorized to access %s with permission %s with obj=%s',
user, call_name, perm_name, obj) |
def mask(self):
'''
The array of indices to be masked. This is the union of the sets of
outliers, bad (flagged) cadences, transit cadences, and :py:obj:`NaN`
cadences.
'''
return np.array(list(set(np.concatenate([self.outmask, self.badmask,
self.transitmask, self.nanmask]))), dtype=int) | The array of indices to be masked. This is the union of the sets of
outliers, bad (flagged) cadences, transit cadences, and :py:obj:`NaN`
cadences. | Below is the the instruction that describes the task:
### Input:
The array of indices to be masked. This is the union of the sets of
outliers, bad (flagged) cadences, transit cadences, and :py:obj:`NaN`
cadences.
### Response:
def mask(self):
'''
The array of indices to be masked. This is the union of the sets of
outliers, bad (flagged) cadences, transit cadences, and :py:obj:`NaN`
cadences.
'''
return np.array(list(set(np.concatenate([self.outmask, self.badmask,
self.transitmask, self.nanmask]))), dtype=int) |
def irregular_sampling(T, N, rseed=None):
"""
Generates an irregularly sampled time vector by perturbating a
linearly spaced vector and latter deleting a certain number of
points
Parameters
----------
T: float
Time span of the vector, i.e. how long it is in time
N: positive integer
Number of samples of the resulting time vector
rseed:
Random seed to feed the random number generator
Returns
-------
t_irr: ndarray
An irregulary sampled time vector
"""
sampling_period = (T/float(N))
N = int(N)
np.random.seed(rseed)
t = np.linspace(0, T, num=5*N)
# First we add jitter
t[1:-1] += sampling_period*0.5*np.random.randn(5*N-2)
# Then we do a random permutation and keep only N points
P = np.random.permutation(5*N)
t_irr = np.sort(t[P[:N]])
return t_irr | Generates an irregularly sampled time vector by perturbating a
linearly spaced vector and latter deleting a certain number of
points
Parameters
----------
T: float
Time span of the vector, i.e. how long it is in time
N: positive integer
Number of samples of the resulting time vector
rseed:
Random seed to feed the random number generator
Returns
-------
t_irr: ndarray
An irregulary sampled time vector | Below is the the instruction that describes the task:
### Input:
Generates an irregularly sampled time vector by perturbating a
linearly spaced vector and latter deleting a certain number of
points
Parameters
----------
T: float
Time span of the vector, i.e. how long it is in time
N: positive integer
Number of samples of the resulting time vector
rseed:
Random seed to feed the random number generator
Returns
-------
t_irr: ndarray
An irregulary sampled time vector
### Response:
def irregular_sampling(T, N, rseed=None):
"""
Generates an irregularly sampled time vector by perturbating a
linearly spaced vector and latter deleting a certain number of
points
Parameters
----------
T: float
Time span of the vector, i.e. how long it is in time
N: positive integer
Number of samples of the resulting time vector
rseed:
Random seed to feed the random number generator
Returns
-------
t_irr: ndarray
An irregulary sampled time vector
"""
sampling_period = (T/float(N))
N = int(N)
np.random.seed(rseed)
t = np.linspace(0, T, num=5*N)
# First we add jitter
t[1:-1] += sampling_period*0.5*np.random.randn(5*N-2)
# Then we do a random permutation and keep only N points
P = np.random.permutation(5*N)
t_irr = np.sort(t[P[:N]])
return t_irr |
def step1_get_authorize_url(self, redirect_uri=None, state=None):
"""Returns a URI to redirect to the provider.
Args:
redirect_uri: string, Either the string 'urn:ietf:wg:oauth:2.0:oob'
for a non-web-based application, or a URI that
handles the callback from the authorization server.
This parameter is deprecated, please move to passing
the redirect_uri in via the constructor.
state: string, Opaque state string which is passed through the
OAuth2 flow and returned to the client as a query parameter
in the callback.
Returns:
A URI as a string to redirect the user to begin the authorization
flow.
"""
if redirect_uri is not None:
logger.warning((
'The redirect_uri parameter for '
'OAuth2WebServerFlow.step1_get_authorize_url is deprecated. '
'Please move to passing the redirect_uri in via the '
'constructor.'))
self.redirect_uri = redirect_uri
if self.redirect_uri is None:
raise ValueError('The value of redirect_uri must not be None.')
query_params = {
'client_id': self.client_id,
'redirect_uri': self.redirect_uri,
'scope': self.scope,
}
if state is not None:
query_params['state'] = state
if self.login_hint is not None:
query_params['login_hint'] = self.login_hint
if self._pkce:
if not self.code_verifier:
self.code_verifier = _pkce.code_verifier()
challenge = _pkce.code_challenge(self.code_verifier)
query_params['code_challenge'] = challenge
query_params['code_challenge_method'] = 'S256'
query_params.update(self.params)
return _helpers.update_query_params(self.auth_uri, query_params) | Returns a URI to redirect to the provider.
Args:
redirect_uri: string, Either the string 'urn:ietf:wg:oauth:2.0:oob'
for a non-web-based application, or a URI that
handles the callback from the authorization server.
This parameter is deprecated, please move to passing
the redirect_uri in via the constructor.
state: string, Opaque state string which is passed through the
OAuth2 flow and returned to the client as a query parameter
in the callback.
Returns:
A URI as a string to redirect the user to begin the authorization
flow. | Below is the the instruction that describes the task:
### Input:
Returns a URI to redirect to the provider.
Args:
redirect_uri: string, Either the string 'urn:ietf:wg:oauth:2.0:oob'
for a non-web-based application, or a URI that
handles the callback from the authorization server.
This parameter is deprecated, please move to passing
the redirect_uri in via the constructor.
state: string, Opaque state string which is passed through the
OAuth2 flow and returned to the client as a query parameter
in the callback.
Returns:
A URI as a string to redirect the user to begin the authorization
flow.
### Response:
def step1_get_authorize_url(self, redirect_uri=None, state=None):
"""Returns a URI to redirect to the provider.
Args:
redirect_uri: string, Either the string 'urn:ietf:wg:oauth:2.0:oob'
for a non-web-based application, or a URI that
handles the callback from the authorization server.
This parameter is deprecated, please move to passing
the redirect_uri in via the constructor.
state: string, Opaque state string which is passed through the
OAuth2 flow and returned to the client as a query parameter
in the callback.
Returns:
A URI as a string to redirect the user to begin the authorization
flow.
"""
if redirect_uri is not None:
logger.warning((
'The redirect_uri parameter for '
'OAuth2WebServerFlow.step1_get_authorize_url is deprecated. '
'Please move to passing the redirect_uri in via the '
'constructor.'))
self.redirect_uri = redirect_uri
if self.redirect_uri is None:
raise ValueError('The value of redirect_uri must not be None.')
query_params = {
'client_id': self.client_id,
'redirect_uri': self.redirect_uri,
'scope': self.scope,
}
if state is not None:
query_params['state'] = state
if self.login_hint is not None:
query_params['login_hint'] = self.login_hint
if self._pkce:
if not self.code_verifier:
self.code_verifier = _pkce.code_verifier()
challenge = _pkce.code_challenge(self.code_verifier)
query_params['code_challenge'] = challenge
query_params['code_challenge_method'] = 'S256'
query_params.update(self.params)
return _helpers.update_query_params(self.auth_uri, query_params) |
def check_bam(bam, samtype="bam"):
"""Check if bam file is valid.
Bam file should:
- exists
- has an index (create if necessary)
- is sorted by coordinate
- has at least one mapped read
"""
ut.check_existance(bam)
samfile = pysam.AlignmentFile(bam, "rb")
if not samfile.has_index():
pysam.index(bam)
samfile = pysam.AlignmentFile(bam, "rb") # Need to reload the samfile after creating index
logging.info("Nanoget: No index for bam file could be found, created index.")
if not samfile.header['HD']['SO'] == 'coordinate':
logging.error("Nanoget: Bam file {} not sorted by coordinate!.".format(bam))
sys.exit("Please use a bam file sorted by coordinate.")
if samtype == "bam":
logging.info("Nanoget: Bam file {} contains {} mapped and {} unmapped reads.".format(
bam, samfile.mapped, samfile.unmapped))
if samfile.mapped == 0:
logging.error("Nanoget: Bam file {} does not contain aligned reads.".format(bam))
sys.exit("FATAL: not a single read was mapped in bam file {}".format(bam))
return samfile | Check if bam file is valid.
Bam file should:
- exists
- has an index (create if necessary)
- is sorted by coordinate
- has at least one mapped read | Below is the the instruction that describes the task:
### Input:
Check if bam file is valid.
Bam file should:
- exists
- has an index (create if necessary)
- is sorted by coordinate
- has at least one mapped read
### Response:
def check_bam(bam, samtype="bam"):
"""Check if bam file is valid.
Bam file should:
- exists
- has an index (create if necessary)
- is sorted by coordinate
- has at least one mapped read
"""
ut.check_existance(bam)
samfile = pysam.AlignmentFile(bam, "rb")
if not samfile.has_index():
pysam.index(bam)
samfile = pysam.AlignmentFile(bam, "rb") # Need to reload the samfile after creating index
logging.info("Nanoget: No index for bam file could be found, created index.")
if not samfile.header['HD']['SO'] == 'coordinate':
logging.error("Nanoget: Bam file {} not sorted by coordinate!.".format(bam))
sys.exit("Please use a bam file sorted by coordinate.")
if samtype == "bam":
logging.info("Nanoget: Bam file {} contains {} mapped and {} unmapped reads.".format(
bam, samfile.mapped, samfile.unmapped))
if samfile.mapped == 0:
logging.error("Nanoget: Bam file {} does not contain aligned reads.".format(bam))
sys.exit("FATAL: not a single read was mapped in bam file {}".format(bam))
return samfile |
def get_logins(self, user_id, start_date=None):
"""Gets the login history for a user, default start_date is 30 days ago
:param int id: User id to get
:param string start_date: "%m/%d/%Y %H:%M:%s" formatted string.
:returns: list https://softlayer.github.io/reference/datatypes/SoftLayer_User_Customer_Access_Authentication/
Example::
get_logins(123, '04/08/2018 0:0:0')
"""
if start_date is None:
date_object = datetime.datetime.today() - datetime.timedelta(days=30)
start_date = date_object.strftime("%m/%d/%Y 0:0:0")
date_filter = {
'loginAttempts': {
'createDate': {
'operation': 'greaterThanDate',
'options': [{'name': 'date', 'value': [start_date]}]
}
}
}
login_log = self.user_service.getLoginAttempts(id=user_id, filter=date_filter)
return login_log | Gets the login history for a user, default start_date is 30 days ago
:param int id: User id to get
:param string start_date: "%m/%d/%Y %H:%M:%s" formatted string.
:returns: list https://softlayer.github.io/reference/datatypes/SoftLayer_User_Customer_Access_Authentication/
Example::
get_logins(123, '04/08/2018 0:0:0') | Below is the the instruction that describes the task:
### Input:
Gets the login history for a user, default start_date is 30 days ago
:param int id: User id to get
:param string start_date: "%m/%d/%Y %H:%M:%s" formatted string.
:returns: list https://softlayer.github.io/reference/datatypes/SoftLayer_User_Customer_Access_Authentication/
Example::
get_logins(123, '04/08/2018 0:0:0')
### Response:
def get_logins(self, user_id, start_date=None):
"""Gets the login history for a user, default start_date is 30 days ago
:param int id: User id to get
:param string start_date: "%m/%d/%Y %H:%M:%s" formatted string.
:returns: list https://softlayer.github.io/reference/datatypes/SoftLayer_User_Customer_Access_Authentication/
Example::
get_logins(123, '04/08/2018 0:0:0')
"""
if start_date is None:
date_object = datetime.datetime.today() - datetime.timedelta(days=30)
start_date = date_object.strftime("%m/%d/%Y 0:0:0")
date_filter = {
'loginAttempts': {
'createDate': {
'operation': 'greaterThanDate',
'options': [{'name': 'date', 'value': [start_date]}]
}
}
}
login_log = self.user_service.getLoginAttempts(id=user_id, filter=date_filter)
return login_log |
def animate(self,*args,**kwargs): #pragma: no cover
"""
NAME:
animate
PURPOSE:
animate an Orbit
INPUT:
d1= first dimension to plot ('x', 'y', 'R', 'vR', 'vT', 'z', 'vz', ...); can be list with up to three entries for three subplots
d2= second dimension to plot; can be list with up to three entries for three subplots
width= (600) width of output div in px
height= (400) height of output div in px
json_filename= (None) if set, save the data necessary for the figure in this filename (e.g., json_filename= 'orbit_data/orbit.json'); this path is also used in the output HTML, so needs to be accessible
load_jslibs= (True) if True, load the require and jQuery Javascript libraries (necessary in Jupyterlab, not necessary but harmless in notebooks; if embedding on a webpage one typically wants to load these libraries in the header)
ro= (Object-wide default) physical scale for distances to use to convert
vo= (Object-wide default) physical scale for velocities to use to convert
use_physical= use to override Object-wide default for using a physical scale for output
+kwargs for ra,dec,ll,bb, etc. functions
OUTPUT:
IPython.display.HTML object with code to animate the orbit; can be directly shown in jupyter notebook or embedded in HTML pages; get a text version of the HTML using the _repr_html_() function
HISTORY:
2017-09-17-24 - Written - Bovy (UofT)
2017-11-28 - Allow arbitrary functions of time to be plotted - Bovy (UofT)
"""
try:
from IPython.display import HTML
except ImportError:
raise ImportError("Orbit.animate requires ipython/jupyter to be installed")
if (kwargs.get('use_physical',False) \
and kwargs.get('ro',self._roSet)) or \
(not 'use_physical' in kwargs \
and kwargs.get('ro',self._roSet)):
labeldict= {'t':'t (Gyr)',
'R':'R (kpc)',
'vR':'v_R (km/s)',
'vT':'v_T (km/s)',
'z':'z (kpc)',
'vz':'v_z (km/s)',
'phi':'azimuthal angle',
'r':'r (kpc)',
'x':'x (kpc)',
'y':'y (kpc)',
'vx':'v_x (km/s)',
'vy':'v_y (km/s)',
'E':'E (km^2/s^2)',
'Ez':'E_z (km^2/s^2)',
'ER':'E_R (km^2/s^2)',
'Enorm':'E(t)/E(0.)',
'Eznorm':'E_z(t)/E_z(0.)',
'ERnorm':'E_R(t)/E_R(0.)',
'Jacobi':'E-Omega_p L (km^2/s^2)',
'Jacobinorm':'(E-Omega_p L)(t)/(E-Omega_p L)(0)'}
else:
labeldict= {'t':'t','R':'R','vR':'v_R','vT':'v_T',
'z':'z','vz':'v_z','phi':r'azimuthal angle',
'r':'r',
'x':'x','y':'y','vx':'v_x','vy':'v_y',
'E':'E','Enorm':'E(t)/E(0.)',
'Ez':'E_z','Eznorm':'E_z(t)/E_z(0.)',
'ER':r'E_R','ERnorm':r'E_R(t)/E_R(0.)',
'Jacobi':r'E-Omega_p L',
'Jacobinorm':r'(E-Omega_p L)(t)/(E-Omega_p L)(0)'}
labeldict.update({'ra':'RA (deg)',
'dec':'Dec (deg)',
'll':'Galactic lon (deg)',
'bb':'Galactic lat (deg)',
'dist':'distance (kpc)',
'pmra':'pmRA (mas/yr)',
'pmdec':'pmDec (mas/yr)',
'pmll':'pmGlon (mas/yr)',
'pmbb':'pmGlat (mas/yr)',
'vlos':'line-of-sight vel (km/s)',
'helioX':'X (kpc)',
'helioY':'Y (kpc)',
'helioZ':'Z (kpc)',
'U':'U (km/s)',
'V':'V (km/s)',
'W':'W (km/s)'})
# Cannot be using Quantity output
kwargs['quantity']= False
#Defaults
if not 'd1' in kwargs and not 'd2' in kwargs:
if len(self.vxvv) == 3:
d1= 'R'
d2= 'vR'
elif len(self.vxvv) == 4:
d1= 'x'
d2= 'y'
elif len(self.vxvv) == 2:
d1= 'x'
d2= 'vx'
elif len(self.vxvv) == 5 or len(self.vxvv) == 6:
d1= 'R'
d2= 'z'
elif not 'd1' in kwargs:
d2= kwargs.pop('d2')
d1= 't'
elif not 'd2' in kwargs:
d1= kwargs.pop('d1')
d2= 't'
else:
d1= kwargs.pop('d1')
d2= kwargs.pop('d2')
xs= []
ys= []
xlabels= []
ylabels= []
if isinstance(d1,str) or callable(d1):
d1s= [d1]
d2s= [d2]
else:
d1s= d1
d2s= d2
if len(d1s) > 3:
raise ValueError('Orbit.animate only works for up to three subplots')
all_xlabel= kwargs.get('xlabel',[None for d in d1])
all_ylabel= kwargs.get('ylabel',[None for d in d2])
for d1,d2, xlabel, ylabel in zip(d1s,d2s,all_xlabel,all_ylabel):
#Get x and y for each subplot
x= self._parse_plot_quantity(d1,**kwargs)
y= self._parse_plot_quantity(d2,**kwargs)
xs.append(x)
ys.append(y)
if xlabel is None:
xlabels.append(labeldict.get(d1,'\mathrm{No\ xlabel\ specified}'))
else:
xlabels.append(xlabel)
if ylabel is None:
ylabels.append(labeldict.get(d2,'\mathrm{No\ ylabel\ specified}'))
else:
ylabels.append(ylabel)
kwargs.pop('ro',None)
kwargs.pop('vo',None)
kwargs.pop('obs',None)
kwargs.pop('use_physical',None)
kwargs.pop('pot',None)
kwargs.pop('OmegaP',None)
kwargs.pop('quantity',None)
width= kwargs.pop('width',600)
height= kwargs.pop('height',400)
load_jslibs= kwargs.pop('load_jslibs',True)
if load_jslibs:
load_jslibs_code= """</script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/require.js/2.3.5/require.min.js"></script>
<script src="https://ajax.googleapis.com/ajax/libs/jquery/3.3.1/jquery.min.js"></script>
<script>
"""
else:
load_jslibs_code= ""
# Dump data to HTML
nplots= len(xs)
jsonDict= {}
jsonDict['x']= xs[0].tolist()
jsonDict['y']= ys[0].tolist()
for ii in range(1,nplots):
jsonDict['x%i' % (ii+1)]= xs[ii].tolist()
jsonDict['y%i' % (ii+1)]= ys[ii].tolist()
json_filename= kwargs.pop('json_filename',None)
if json_filename is None:
jd= json.dumps(jsonDict)
json_code= """ let data= JSON.parse('{jd}');""".format(jd=jd)
close_json_code= ""
else:
with open(json_filename,'w') as jfile:
json.dump(jsonDict,jfile)
json_code= """Plotly.d3.json('{jfilename}',function(data){{""".format(jfilename=json_filename)
close_json_code= "});"
self.divid= 'galpy-'\
+''.join(choice(ascii_lowercase) for i in range(24))
button_width= 419.51+4.*10.
button_margin_left= int(nu.round((width-button_width)/2.))
if button_margin_left < 0: button_margin_left= 0
# Layout for multiple plots
if len(d1s) == 1:
xmin= [0,0,0]
xmax= [1,1,1]
elif len(d1s) == 2:
xmin= [0,0.55,0]
xmax= [0.45,1,1]
elif len(d1s) == 3:
xmin= [0,0.365,0.73]
xmax= [0.27,0.635,1]
layout= """{{
xaxis: {{
title: '{xlabel}',
domain: [{xmin},{xmax}],
}},
yaxis: {{title: '{ylabel}'}},
margin: {{t: 20}},
hovermode: 'closest',
showlegend: false,
""".format(xlabel=xlabels[0],ylabel=ylabels[0],xmin=xmin[0],xmax=xmax[0])
for ii in range(1,nplots):
layout+= """ xaxis{idx}: {{
title: '{xlabel}',
anchor: 'y{idx}',
domain: [{xmin},{xmax}],
}},
yaxis{idx}: {{
title: '{ylabel}',
anchor: 'x{idx}',
}},
""".format(idx=ii+1,xlabel=xlabels[ii],ylabel=ylabels[ii],
xmin=xmin[ii],xmax=xmax[ii])
layout+="""}"""
# Additional traces for additional plots
if len(d1s) > 1:
setup_trace2= """
let trace3= {{
x: data.x2.slice(0,numPerFrame),
y: data.y2.slice(0,numPerFrame),
xaxis: 'x2',
yaxis: 'y2',
mode: 'lines',
line: {{
shape: 'spline',
width: 0.8,
color: '#1f77b4',
}},
}};
let trace4= {{
x: data.x2.slice(0,numPerFrame),
y: data.y2.slice(0,numPerFrame),
xaxis: 'x2',
yaxis: 'y2',
mode: 'lines',
line: {{
shape: 'spline',
width: 3.,
color: '#d62728',
}},
}};
""".format(divid=self.divid) # not used!
delete_trace4= """Plotly.deleteTraces('{divid}',3);""".format(divid=self.divid)
delete_trace3= """Plotly.deleteTraces('{divid}',0);""".format(divid=self.divid)
update_trace34= """
trace_slice_begin+= trace_slice_len;
Plotly.extendTraces('{divid}', {{
x: [data.x2.slice(trace_slice_begin,trace_slice_end)],
y: [data.y2.slice(trace_slice_begin,trace_slice_end)],
}}, [2]);
trace_slice_begin-= trace_slice_len;
trace4= {{
x: [data.x2.slice(trace_slice_begin,trace_slice_end)],
y: [data.y2.slice(trace_slice_begin,trace_slice_end)],
}},
Plotly.restyle('{divid}',trace4,[3]);
""".format(divid=self.divid)
else:
setup_trace2= """
let traces= [trace1,trace2];
"""
delete_trace4= ""
delete_trace3= ""
update_trace34= ""
if len(d1s) > 2:
setup_trace3= """
let trace5= {{
x: data.x3.slice(0,numPerFrame),
y: data.y3.slice(0,numPerFrame),
xaxis: 'x3',
yaxis: 'y3',
mode: 'lines',
line: {{
shape: 'spline',
width: 0.8,
color: '#1f77b4',
}},
}};
let trace6= {{
x: data.x3.slice(0,numPerFrame),
y: data.y3.slice(0,numPerFrame),
xaxis: 'x3',
yaxis: 'y3',
mode: 'lines',
line: {{
shape: 'spline',
width: 3.,
color: '#d62728',
}},
}};
let traces= [trace1,trace2,trace3,trace4,trace5,trace6];
""".format(divid=self.divid)
delete_trace6= """Plotly.deleteTraces('{divid}',5);""".format(divid=self.divid)
delete_trace5= """Plotly.deleteTraces('{divid}',0);""".format(divid=self.divid)
update_trace56= """
trace_slice_begin+= trace_slice_len;
Plotly.extendTraces('{divid}', {{
x: [data.x3.slice(trace_slice_begin,trace_slice_end)],
y: [data.y3.slice(trace_slice_begin,trace_slice_end)],
}}, [4]);
trace_slice_begin-= trace_slice_len;
trace6= {{
x: [data.x3.slice(trace_slice_begin,trace_slice_end)],
y: [data.y3.slice(trace_slice_begin,trace_slice_end)],
}},
Plotly.restyle('{divid}',trace6,[5]);
""".format(divid=self.divid)
elif len(d1s) > 1:
setup_trace3= """
let traces= [trace1,trace2,trace3,trace4];
"""
delete_trace5= ""
delete_trace6= ""
update_trace56= ""
else:
setup_trace3= ""
delete_trace5= ""
delete_trace6= ""
update_trace56= ""
return HTML("""
<style>
.galpybutton {{
background-color:#ffffff;
-moz-border-radius:16px;
-webkit-border-radius:16px;
border-radius:16px;
border:1px solid #1f77b4;
display:inline-block;
cursor:pointer;
color:#1f77b4;
font-family:Courier;
font-size:17px;
padding:8px 10px;
text-decoration:none;
text-shadow:0px 1px 0px #2f6627;
}}
.galpybutton:hover {{
background-color:#ffffff;
}}
.galpybutton:active {{
position:relative;
top:1px;
}}
.galpybutton:focus{{
outline:0;
}}
</style>
<div id='{divid}' style='width:{width}px;height:{height}px;'></div>
<div class="controlbutton" id="{divid}-play" style="margin-left:{button_margin_left}px;display: inline-block;">
<button class="galpybutton">Play</button></div>
<div class="controlbutton" id="{divid}-pause" style="margin-left:10px;display: inline-block;">
<button class="galpybutton">Pause</button></div>
<div class="controlbutton" id="{divid}-timestwo" style="margin-left:10px;display: inline-block;">
<button class="galpybutton">Speed<font face="Arial"> </font>x<font face="Arial"> </font>2</button></div>
<div class="controlbutton" id="{divid}-timeshalf" style="margin-left:10px;display: inline-block;">
<button class="galpybutton">Speed<font face="Arial"> </font>/<font face="Arial"> </font>2</button></div>
<div class="controlbutton" id="{divid}-replay" style="margin-left:10px;display: inline-block;">
<button class="galpybutton">Replay</button></div>
<script>
require.config({{
paths: {{
Plotly: 'https://cdn.plot.ly/plotly-latest.min',
}}
}});
{load_jslibs_code}
require(['Plotly'], function (Plotly) {{
{json_code}
let layout = {layout};
let numPerFrame= 5;
let cnt= 1;
let interval;
let trace_slice_len;
let trace_slice_begin;
let trace_slice_end;
setup_trace();
$('.controlbutton button').click(function() {{
let button_type= this.parentNode.id;
if ( button_type === '{divid}-play' ) {{
clearInterval(interval);
interval= animate_trace();
}}
else if ( button_type === '{divid}-pause' )
clearInterval(interval);
else if ( button_type === '{divid}-timestwo' ) {{
cnt/= 2;
numPerFrame*= 2;
}}
else if ( button_type === '{divid}-timeshalf' ) {{
cnt*= 2;
numPerFrame/= 2;
}}
else if ( button_type === '{divid}-replay' ) {{
cnt= 1;
try {{ // doesn't exist if animation has already ended
{delete_trace6}
{delete_trace4}
Plotly.deleteTraces('{divid}',1);
}}
catch (err) {{
}}
Plotly.deleteTraces('{divid}',0);
{delete_trace3}
{delete_trace5}
clearInterval(interval);
setup_trace();
interval= animate_trace();
}}
}});
function setup_trace() {{
let trace1= {{
x: data.x.slice(0,numPerFrame),
y: data.y.slice(0,numPerFrame),
mode: 'lines',
line: {{
shape: 'spline',
width: 0.8,
color: '#1f77b4',
}},
}};
let trace2= {{
x: data.x.slice(0,numPerFrame),
y: data.y.slice(0,numPerFrame),
mode: 'lines',
line: {{
shape: 'spline',
width: 3.,
color: '#d62728',
}},
}};
{setup_trace2}
{setup_trace3}
Plotly.plot('{divid}',traces,layout);
}}
function animate_trace() {{
return setInterval(function() {{
// Make sure narrow and thick trace end in the same
// and the highlighted length has constant length
trace_slice_len= Math.floor(numPerFrame);
if ( trace_slice_len < 1) trace_slice_len= 1;
trace_slice_begin= Math.floor(cnt*numPerFrame);
trace_slice_end= Math.floor(Math.min(cnt*numPerFrame+trace_slice_len,data.x.length-1));
Plotly.extendTraces('{divid}', {{
x: [data.x.slice(trace_slice_begin,trace_slice_end)],
y: [data.y.slice(trace_slice_begin,trace_slice_end)],
}}, [0]);
trace_slice_begin-= trace_slice_len;
trace2= {{
x: [data.x.slice(trace_slice_begin,trace_slice_end)],
y: [data.y.slice(trace_slice_begin,trace_slice_end)],
}};
Plotly.restyle('{divid}',trace2,[1]);
{update_trace34}
{update_trace56}
cnt+= 1;
if(cnt*numPerFrame+trace_slice_len > data.x.length/1) {{
clearInterval(interval);
{delete_trace6}
{delete_trace4}
Plotly.deleteTraces('{divid}',1);
}}
}}, 30);
}}
{close_json_code}}});
</script>""".format(json_code=json_code,close_json_code=close_json_code,
divid=self.divid,width=width,height=height,
button_margin_left=button_margin_left,
layout=layout,load_jslibs_code=load_jslibs_code,
setup_trace2=setup_trace2,setup_trace3=setup_trace3,
delete_trace4=delete_trace4,delete_trace6=delete_trace6,
delete_trace3=delete_trace3,delete_trace5=delete_trace5,
update_trace34=update_trace34,
update_trace56=update_trace56)) | NAME:
animate
PURPOSE:
animate an Orbit
INPUT:
d1= first dimension to plot ('x', 'y', 'R', 'vR', 'vT', 'z', 'vz', ...); can be list with up to three entries for three subplots
d2= second dimension to plot; can be list with up to three entries for three subplots
width= (600) width of output div in px
height= (400) height of output div in px
json_filename= (None) if set, save the data necessary for the figure in this filename (e.g., json_filename= 'orbit_data/orbit.json'); this path is also used in the output HTML, so needs to be accessible
load_jslibs= (True) if True, load the require and jQuery Javascript libraries (necessary in Jupyterlab, not necessary but harmless in notebooks; if embedding on a webpage one typically wants to load these libraries in the header)
ro= (Object-wide default) physical scale for distances to use to convert
vo= (Object-wide default) physical scale for velocities to use to convert
use_physical= use to override Object-wide default for using a physical scale for output
+kwargs for ra,dec,ll,bb, etc. functions
OUTPUT:
IPython.display.HTML object with code to animate the orbit; can be directly shown in jupyter notebook or embedded in HTML pages; get a text version of the HTML using the _repr_html_() function
HISTORY:
2017-09-17-24 - Written - Bovy (UofT)
2017-11-28 - Allow arbitrary functions of time to be plotted - Bovy (UofT) | Below is the the instruction that describes the task:
### Input:
NAME:
animate
PURPOSE:
animate an Orbit
INPUT:
d1= first dimension to plot ('x', 'y', 'R', 'vR', 'vT', 'z', 'vz', ...); can be list with up to three entries for three subplots
d2= second dimension to plot; can be list with up to three entries for three subplots
width= (600) width of output div in px
height= (400) height of output div in px
json_filename= (None) if set, save the data necessary for the figure in this filename (e.g., json_filename= 'orbit_data/orbit.json'); this path is also used in the output HTML, so needs to be accessible
load_jslibs= (True) if True, load the require and jQuery Javascript libraries (necessary in Jupyterlab, not necessary but harmless in notebooks; if embedding on a webpage one typically wants to load these libraries in the header)
ro= (Object-wide default) physical scale for distances to use to convert
vo= (Object-wide default) physical scale for velocities to use to convert
use_physical= use to override Object-wide default for using a physical scale for output
+kwargs for ra,dec,ll,bb, etc. functions
OUTPUT:
IPython.display.HTML object with code to animate the orbit; can be directly shown in jupyter notebook or embedded in HTML pages; get a text version of the HTML using the _repr_html_() function
HISTORY:
2017-09-17-24 - Written - Bovy (UofT)
2017-11-28 - Allow arbitrary functions of time to be plotted - Bovy (UofT)
### Response:
def animate(self,*args,**kwargs): #pragma: no cover
"""
NAME:
animate
PURPOSE:
animate an Orbit
INPUT:
d1= first dimension to plot ('x', 'y', 'R', 'vR', 'vT', 'z', 'vz', ...); can be list with up to three entries for three subplots
d2= second dimension to plot; can be list with up to three entries for three subplots
width= (600) width of output div in px
height= (400) height of output div in px
json_filename= (None) if set, save the data necessary for the figure in this filename (e.g., json_filename= 'orbit_data/orbit.json'); this path is also used in the output HTML, so needs to be accessible
load_jslibs= (True) if True, load the require and jQuery Javascript libraries (necessary in Jupyterlab, not necessary but harmless in notebooks; if embedding on a webpage one typically wants to load these libraries in the header)
ro= (Object-wide default) physical scale for distances to use to convert
vo= (Object-wide default) physical scale for velocities to use to convert
use_physical= use to override Object-wide default for using a physical scale for output
+kwargs for ra,dec,ll,bb, etc. functions
OUTPUT:
IPython.display.HTML object with code to animate the orbit; can be directly shown in jupyter notebook or embedded in HTML pages; get a text version of the HTML using the _repr_html_() function
HISTORY:
2017-09-17-24 - Written - Bovy (UofT)
2017-11-28 - Allow arbitrary functions of time to be plotted - Bovy (UofT)
"""
try:
from IPython.display import HTML
except ImportError:
raise ImportError("Orbit.animate requires ipython/jupyter to be installed")
if (kwargs.get('use_physical',False) \
and kwargs.get('ro',self._roSet)) or \
(not 'use_physical' in kwargs \
and kwargs.get('ro',self._roSet)):
labeldict= {'t':'t (Gyr)',
'R':'R (kpc)',
'vR':'v_R (km/s)',
'vT':'v_T (km/s)',
'z':'z (kpc)',
'vz':'v_z (km/s)',
'phi':'azimuthal angle',
'r':'r (kpc)',
'x':'x (kpc)',
'y':'y (kpc)',
'vx':'v_x (km/s)',
'vy':'v_y (km/s)',
'E':'E (km^2/s^2)',
'Ez':'E_z (km^2/s^2)',
'ER':'E_R (km^2/s^2)',
'Enorm':'E(t)/E(0.)',
'Eznorm':'E_z(t)/E_z(0.)',
'ERnorm':'E_R(t)/E_R(0.)',
'Jacobi':'E-Omega_p L (km^2/s^2)',
'Jacobinorm':'(E-Omega_p L)(t)/(E-Omega_p L)(0)'}
else:
labeldict= {'t':'t','R':'R','vR':'v_R','vT':'v_T',
'z':'z','vz':'v_z','phi':r'azimuthal angle',
'r':'r',
'x':'x','y':'y','vx':'v_x','vy':'v_y',
'E':'E','Enorm':'E(t)/E(0.)',
'Ez':'E_z','Eznorm':'E_z(t)/E_z(0.)',
'ER':r'E_R','ERnorm':r'E_R(t)/E_R(0.)',
'Jacobi':r'E-Omega_p L',
'Jacobinorm':r'(E-Omega_p L)(t)/(E-Omega_p L)(0)'}
labeldict.update({'ra':'RA (deg)',
'dec':'Dec (deg)',
'll':'Galactic lon (deg)',
'bb':'Galactic lat (deg)',
'dist':'distance (kpc)',
'pmra':'pmRA (mas/yr)',
'pmdec':'pmDec (mas/yr)',
'pmll':'pmGlon (mas/yr)',
'pmbb':'pmGlat (mas/yr)',
'vlos':'line-of-sight vel (km/s)',
'helioX':'X (kpc)',
'helioY':'Y (kpc)',
'helioZ':'Z (kpc)',
'U':'U (km/s)',
'V':'V (km/s)',
'W':'W (km/s)'})
# Cannot be using Quantity output
kwargs['quantity']= False
#Defaults
if not 'd1' in kwargs and not 'd2' in kwargs:
if len(self.vxvv) == 3:
d1= 'R'
d2= 'vR'
elif len(self.vxvv) == 4:
d1= 'x'
d2= 'y'
elif len(self.vxvv) == 2:
d1= 'x'
d2= 'vx'
elif len(self.vxvv) == 5 or len(self.vxvv) == 6:
d1= 'R'
d2= 'z'
elif not 'd1' in kwargs:
d2= kwargs.pop('d2')
d1= 't'
elif not 'd2' in kwargs:
d1= kwargs.pop('d1')
d2= 't'
else:
d1= kwargs.pop('d1')
d2= kwargs.pop('d2')
xs= []
ys= []
xlabels= []
ylabels= []
if isinstance(d1,str) or callable(d1):
d1s= [d1]
d2s= [d2]
else:
d1s= d1
d2s= d2
if len(d1s) > 3:
raise ValueError('Orbit.animate only works for up to three subplots')
all_xlabel= kwargs.get('xlabel',[None for d in d1])
all_ylabel= kwargs.get('ylabel',[None for d in d2])
for d1,d2, xlabel, ylabel in zip(d1s,d2s,all_xlabel,all_ylabel):
#Get x and y for each subplot
x= self._parse_plot_quantity(d1,**kwargs)
y= self._parse_plot_quantity(d2,**kwargs)
xs.append(x)
ys.append(y)
if xlabel is None:
xlabels.append(labeldict.get(d1,'\mathrm{No\ xlabel\ specified}'))
else:
xlabels.append(xlabel)
if ylabel is None:
ylabels.append(labeldict.get(d2,'\mathrm{No\ ylabel\ specified}'))
else:
ylabels.append(ylabel)
kwargs.pop('ro',None)
kwargs.pop('vo',None)
kwargs.pop('obs',None)
kwargs.pop('use_physical',None)
kwargs.pop('pot',None)
kwargs.pop('OmegaP',None)
kwargs.pop('quantity',None)
width= kwargs.pop('width',600)
height= kwargs.pop('height',400)
load_jslibs= kwargs.pop('load_jslibs',True)
if load_jslibs:
load_jslibs_code= """</script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/require.js/2.3.5/require.min.js"></script>
<script src="https://ajax.googleapis.com/ajax/libs/jquery/3.3.1/jquery.min.js"></script>
<script>
"""
else:
load_jslibs_code= ""
# Dump data to HTML
nplots= len(xs)
jsonDict= {}
jsonDict['x']= xs[0].tolist()
jsonDict['y']= ys[0].tolist()
for ii in range(1,nplots):
jsonDict['x%i' % (ii+1)]= xs[ii].tolist()
jsonDict['y%i' % (ii+1)]= ys[ii].tolist()
json_filename= kwargs.pop('json_filename',None)
if json_filename is None:
jd= json.dumps(jsonDict)
json_code= """ let data= JSON.parse('{jd}');""".format(jd=jd)
close_json_code= ""
else:
with open(json_filename,'w') as jfile:
json.dump(jsonDict,jfile)
json_code= """Plotly.d3.json('{jfilename}',function(data){{""".format(jfilename=json_filename)
close_json_code= "});"
self.divid= 'galpy-'\
+''.join(choice(ascii_lowercase) for i in range(24))
button_width= 419.51+4.*10.
button_margin_left= int(nu.round((width-button_width)/2.))
if button_margin_left < 0: button_margin_left= 0
# Layout for multiple plots
if len(d1s) == 1:
xmin= [0,0,0]
xmax= [1,1,1]
elif len(d1s) == 2:
xmin= [0,0.55,0]
xmax= [0.45,1,1]
elif len(d1s) == 3:
xmin= [0,0.365,0.73]
xmax= [0.27,0.635,1]
layout= """{{
xaxis: {{
title: '{xlabel}',
domain: [{xmin},{xmax}],
}},
yaxis: {{title: '{ylabel}'}},
margin: {{t: 20}},
hovermode: 'closest',
showlegend: false,
""".format(xlabel=xlabels[0],ylabel=ylabels[0],xmin=xmin[0],xmax=xmax[0])
for ii in range(1,nplots):
layout+= """ xaxis{idx}: {{
title: '{xlabel}',
anchor: 'y{idx}',
domain: [{xmin},{xmax}],
}},
yaxis{idx}: {{
title: '{ylabel}',
anchor: 'x{idx}',
}},
""".format(idx=ii+1,xlabel=xlabels[ii],ylabel=ylabels[ii],
xmin=xmin[ii],xmax=xmax[ii])
layout+="""}"""
# Additional traces for additional plots
if len(d1s) > 1:
setup_trace2= """
let trace3= {{
x: data.x2.slice(0,numPerFrame),
y: data.y2.slice(0,numPerFrame),
xaxis: 'x2',
yaxis: 'y2',
mode: 'lines',
line: {{
shape: 'spline',
width: 0.8,
color: '#1f77b4',
}},
}};
let trace4= {{
x: data.x2.slice(0,numPerFrame),
y: data.y2.slice(0,numPerFrame),
xaxis: 'x2',
yaxis: 'y2',
mode: 'lines',
line: {{
shape: 'spline',
width: 3.,
color: '#d62728',
}},
}};
""".format(divid=self.divid) # not used!
delete_trace4= """Plotly.deleteTraces('{divid}',3);""".format(divid=self.divid)
delete_trace3= """Plotly.deleteTraces('{divid}',0);""".format(divid=self.divid)
update_trace34= """
trace_slice_begin+= trace_slice_len;
Plotly.extendTraces('{divid}', {{
x: [data.x2.slice(trace_slice_begin,trace_slice_end)],
y: [data.y2.slice(trace_slice_begin,trace_slice_end)],
}}, [2]);
trace_slice_begin-= trace_slice_len;
trace4= {{
x: [data.x2.slice(trace_slice_begin,trace_slice_end)],
y: [data.y2.slice(trace_slice_begin,trace_slice_end)],
}},
Plotly.restyle('{divid}',trace4,[3]);
""".format(divid=self.divid)
else:
setup_trace2= """
let traces= [trace1,trace2];
"""
delete_trace4= ""
delete_trace3= ""
update_trace34= ""
if len(d1s) > 2:
setup_trace3= """
let trace5= {{
x: data.x3.slice(0,numPerFrame),
y: data.y3.slice(0,numPerFrame),
xaxis: 'x3',
yaxis: 'y3',
mode: 'lines',
line: {{
shape: 'spline',
width: 0.8,
color: '#1f77b4',
}},
}};
let trace6= {{
x: data.x3.slice(0,numPerFrame),
y: data.y3.slice(0,numPerFrame),
xaxis: 'x3',
yaxis: 'y3',
mode: 'lines',
line: {{
shape: 'spline',
width: 3.,
color: '#d62728',
}},
}};
let traces= [trace1,trace2,trace3,trace4,trace5,trace6];
""".format(divid=self.divid)
delete_trace6= """Plotly.deleteTraces('{divid}',5);""".format(divid=self.divid)
delete_trace5= """Plotly.deleteTraces('{divid}',0);""".format(divid=self.divid)
update_trace56= """
trace_slice_begin+= trace_slice_len;
Plotly.extendTraces('{divid}', {{
x: [data.x3.slice(trace_slice_begin,trace_slice_end)],
y: [data.y3.slice(trace_slice_begin,trace_slice_end)],
}}, [4]);
trace_slice_begin-= trace_slice_len;
trace6= {{
x: [data.x3.slice(trace_slice_begin,trace_slice_end)],
y: [data.y3.slice(trace_slice_begin,trace_slice_end)],
}},
Plotly.restyle('{divid}',trace6,[5]);
""".format(divid=self.divid)
elif len(d1s) > 1:
setup_trace3= """
let traces= [trace1,trace2,trace3,trace4];
"""
delete_trace5= ""
delete_trace6= ""
update_trace56= ""
else:
setup_trace3= ""
delete_trace5= ""
delete_trace6= ""
update_trace56= ""
return HTML("""
<style>
.galpybutton {{
background-color:#ffffff;
-moz-border-radius:16px;
-webkit-border-radius:16px;
border-radius:16px;
border:1px solid #1f77b4;
display:inline-block;
cursor:pointer;
color:#1f77b4;
font-family:Courier;
font-size:17px;
padding:8px 10px;
text-decoration:none;
text-shadow:0px 1px 0px #2f6627;
}}
.galpybutton:hover {{
background-color:#ffffff;
}}
.galpybutton:active {{
position:relative;
top:1px;
}}
.galpybutton:focus{{
outline:0;
}}
</style>
<div id='{divid}' style='width:{width}px;height:{height}px;'></div>
<div class="controlbutton" id="{divid}-play" style="margin-left:{button_margin_left}px;display: inline-block;">
<button class="galpybutton">Play</button></div>
<div class="controlbutton" id="{divid}-pause" style="margin-left:10px;display: inline-block;">
<button class="galpybutton">Pause</button></div>
<div class="controlbutton" id="{divid}-timestwo" style="margin-left:10px;display: inline-block;">
<button class="galpybutton">Speed<font face="Arial"> </font>x<font face="Arial"> </font>2</button></div>
<div class="controlbutton" id="{divid}-timeshalf" style="margin-left:10px;display: inline-block;">
<button class="galpybutton">Speed<font face="Arial"> </font>/<font face="Arial"> </font>2</button></div>
<div class="controlbutton" id="{divid}-replay" style="margin-left:10px;display: inline-block;">
<button class="galpybutton">Replay</button></div>
<script>
require.config({{
paths: {{
Plotly: 'https://cdn.plot.ly/plotly-latest.min',
}}
}});
{load_jslibs_code}
require(['Plotly'], function (Plotly) {{
{json_code}
let layout = {layout};
let numPerFrame= 5;
let cnt= 1;
let interval;
let trace_slice_len;
let trace_slice_begin;
let trace_slice_end;
setup_trace();
$('.controlbutton button').click(function() {{
let button_type= this.parentNode.id;
if ( button_type === '{divid}-play' ) {{
clearInterval(interval);
interval= animate_trace();
}}
else if ( button_type === '{divid}-pause' )
clearInterval(interval);
else if ( button_type === '{divid}-timestwo' ) {{
cnt/= 2;
numPerFrame*= 2;
}}
else if ( button_type === '{divid}-timeshalf' ) {{
cnt*= 2;
numPerFrame/= 2;
}}
else if ( button_type === '{divid}-replay' ) {{
cnt= 1;
try {{ // doesn't exist if animation has already ended
{delete_trace6}
{delete_trace4}
Plotly.deleteTraces('{divid}',1);
}}
catch (err) {{
}}
Plotly.deleteTraces('{divid}',0);
{delete_trace3}
{delete_trace5}
clearInterval(interval);
setup_trace();
interval= animate_trace();
}}
}});
function setup_trace() {{
let trace1= {{
x: data.x.slice(0,numPerFrame),
y: data.y.slice(0,numPerFrame),
mode: 'lines',
line: {{
shape: 'spline',
width: 0.8,
color: '#1f77b4',
}},
}};
let trace2= {{
x: data.x.slice(0,numPerFrame),
y: data.y.slice(0,numPerFrame),
mode: 'lines',
line: {{
shape: 'spline',
width: 3.,
color: '#d62728',
}},
}};
{setup_trace2}
{setup_trace3}
Plotly.plot('{divid}',traces,layout);
}}
function animate_trace() {{
return setInterval(function() {{
// Make sure narrow and thick trace end in the same
// and the highlighted length has constant length
trace_slice_len= Math.floor(numPerFrame);
if ( trace_slice_len < 1) trace_slice_len= 1;
trace_slice_begin= Math.floor(cnt*numPerFrame);
trace_slice_end= Math.floor(Math.min(cnt*numPerFrame+trace_slice_len,data.x.length-1));
Plotly.extendTraces('{divid}', {{
x: [data.x.slice(trace_slice_begin,trace_slice_end)],
y: [data.y.slice(trace_slice_begin,trace_slice_end)],
}}, [0]);
trace_slice_begin-= trace_slice_len;
trace2= {{
x: [data.x.slice(trace_slice_begin,trace_slice_end)],
y: [data.y.slice(trace_slice_begin,trace_slice_end)],
}};
Plotly.restyle('{divid}',trace2,[1]);
{update_trace34}
{update_trace56}
cnt+= 1;
if(cnt*numPerFrame+trace_slice_len > data.x.length/1) {{
clearInterval(interval);
{delete_trace6}
{delete_trace4}
Plotly.deleteTraces('{divid}',1);
}}
}}, 30);
}}
{close_json_code}}});
</script>""".format(json_code=json_code,close_json_code=close_json_code,
divid=self.divid,width=width,height=height,
button_margin_left=button_margin_left,
layout=layout,load_jslibs_code=load_jslibs_code,
setup_trace2=setup_trace2,setup_trace3=setup_trace3,
delete_trace4=delete_trace4,delete_trace6=delete_trace6,
delete_trace3=delete_trace3,delete_trace5=delete_trace5,
update_trace34=update_trace34,
update_trace56=update_trace56)) |
def read_object(ctx, pin, object_id):
"""
Read arbitrary PIV object.
Read PIV object by providing the object id.
\b
OBJECT-ID Id of PIV object in HEX.
"""
controller = ctx.obj['controller']
def do_read_object(retry=True):
try:
click.echo(controller.get_data(object_id))
except APDUError as e:
if e.sw == SW.NOT_FOUND:
ctx.fail('No data found.')
elif e.sw == SW.SECURITY_CONDITION_NOT_SATISFIED:
_verify_pin(ctx, controller, pin)
do_read_object(retry=False)
else:
raise
do_read_object() | Read arbitrary PIV object.
Read PIV object by providing the object id.
\b
OBJECT-ID Id of PIV object in HEX. | Below is the the instruction that describes the task:
### Input:
Read arbitrary PIV object.
Read PIV object by providing the object id.
\b
OBJECT-ID Id of PIV object in HEX.
### Response:
def read_object(ctx, pin, object_id):
"""
Read arbitrary PIV object.
Read PIV object by providing the object id.
\b
OBJECT-ID Id of PIV object in HEX.
"""
controller = ctx.obj['controller']
def do_read_object(retry=True):
try:
click.echo(controller.get_data(object_id))
except APDUError as e:
if e.sw == SW.NOT_FOUND:
ctx.fail('No data found.')
elif e.sw == SW.SECURITY_CONDITION_NOT_SATISFIED:
_verify_pin(ctx, controller, pin)
do_read_object(retry=False)
else:
raise
do_read_object() |
def sub_dfs_by_size(df, size):
"""Get a generator yielding consecutive sub-dataframes of the given size.
Arguments
---------
df : pandas.DataFrame
The dataframe for which to get sub-dataframes.
size : int
The size of each sub-dataframe.
Returns
-------
generator
A generator yielding consecutive sub-dataframe of the given size.
Example
-------
>>> import pandas as pd; import pdutil;
>>> data = [[23, "Jen"], [42, "Ray"], [15, "Fin"]]
>>> df = pd.DataFrame(data, columns=['age', 'name'])
>>> for subdf in pdutil.iter.sub_dfs_by_size(df, 2): print(subdf)
age name
0 23 Jen
1 42 Ray
age name
2 15 Fin
"""
for i in range(0, len(df), size):
yield (df.iloc[i:i + size]) | Get a generator yielding consecutive sub-dataframes of the given size.
Arguments
---------
df : pandas.DataFrame
The dataframe for which to get sub-dataframes.
size : int
The size of each sub-dataframe.
Returns
-------
generator
A generator yielding consecutive sub-dataframe of the given size.
Example
-------
>>> import pandas as pd; import pdutil;
>>> data = [[23, "Jen"], [42, "Ray"], [15, "Fin"]]
>>> df = pd.DataFrame(data, columns=['age', 'name'])
>>> for subdf in pdutil.iter.sub_dfs_by_size(df, 2): print(subdf)
age name
0 23 Jen
1 42 Ray
age name
2 15 Fin | Below is the the instruction that describes the task:
### Input:
Get a generator yielding consecutive sub-dataframes of the given size.
Arguments
---------
df : pandas.DataFrame
The dataframe for which to get sub-dataframes.
size : int
The size of each sub-dataframe.
Returns
-------
generator
A generator yielding consecutive sub-dataframe of the given size.
Example
-------
>>> import pandas as pd; import pdutil;
>>> data = [[23, "Jen"], [42, "Ray"], [15, "Fin"]]
>>> df = pd.DataFrame(data, columns=['age', 'name'])
>>> for subdf in pdutil.iter.sub_dfs_by_size(df, 2): print(subdf)
age name
0 23 Jen
1 42 Ray
age name
2 15 Fin
### Response:
def sub_dfs_by_size(df, size):
"""Get a generator yielding consecutive sub-dataframes of the given size.
Arguments
---------
df : pandas.DataFrame
The dataframe for which to get sub-dataframes.
size : int
The size of each sub-dataframe.
Returns
-------
generator
A generator yielding consecutive sub-dataframe of the given size.
Example
-------
>>> import pandas as pd; import pdutil;
>>> data = [[23, "Jen"], [42, "Ray"], [15, "Fin"]]
>>> df = pd.DataFrame(data, columns=['age', 'name'])
>>> for subdf in pdutil.iter.sub_dfs_by_size(df, 2): print(subdf)
age name
0 23 Jen
1 42 Ray
age name
2 15 Fin
"""
for i in range(0, len(df), size):
yield (df.iloc[i:i + size]) |
def get_hangul_syllable_type_property(value, is_bytes=False):
"""Get `HANGUL SYLLABLE TYPE` property."""
obj = unidata.ascii_hangul_syllable_type if is_bytes else unidata.unicode_hangul_syllable_type
if value.startswith('^'):
negated = value[1:]
value = '^' + unidata.unicode_alias['hangulsyllabletype'].get(negated, negated)
else:
value = unidata.unicode_alias['hangulsyllabletype'].get(value, value)
return obj[value] | Get `HANGUL SYLLABLE TYPE` property. | Below is the the instruction that describes the task:
### Input:
Get `HANGUL SYLLABLE TYPE` property.
### Response:
def get_hangul_syllable_type_property(value, is_bytes=False):
"""Get `HANGUL SYLLABLE TYPE` property."""
obj = unidata.ascii_hangul_syllable_type if is_bytes else unidata.unicode_hangul_syllable_type
if value.startswith('^'):
negated = value[1:]
value = '^' + unidata.unicode_alias['hangulsyllabletype'].get(negated, negated)
else:
value = unidata.unicode_alias['hangulsyllabletype'].get(value, value)
return obj[value] |
def post_request(profile, resource, payload):
"""Do a POST request to Github's API.
Args:
profile
A profile generated from ``simplygithub.authentication.profile``.
Such profiles tell this module (i) the ``repo`` to connect to,
and (ii) the ``token`` to connect with.
resource
The part of a Github API URL that comes after ``.../:repo/git``.
For instance, for ``.../:repo/git/commits``, it's ``/commits``.
payload
A dict of values to send as the payload of the POST request.
The data will be JSON-encoded.
Returns:
The body of the response, converted from JSON into a Python dict.
"""
url = get_url(profile, resource)
headers = get_headers(profile)
response = requests.post(url, json=payload, headers=headers)
return response.json() | Do a POST request to Github's API.
Args:
profile
A profile generated from ``simplygithub.authentication.profile``.
Such profiles tell this module (i) the ``repo`` to connect to,
and (ii) the ``token`` to connect with.
resource
The part of a Github API URL that comes after ``.../:repo/git``.
For instance, for ``.../:repo/git/commits``, it's ``/commits``.
payload
A dict of values to send as the payload of the POST request.
The data will be JSON-encoded.
Returns:
The body of the response, converted from JSON into a Python dict. | Below is the the instruction that describes the task:
### Input:
Do a POST request to Github's API.
Args:
profile
A profile generated from ``simplygithub.authentication.profile``.
Such profiles tell this module (i) the ``repo`` to connect to,
and (ii) the ``token`` to connect with.
resource
The part of a Github API URL that comes after ``.../:repo/git``.
For instance, for ``.../:repo/git/commits``, it's ``/commits``.
payload
A dict of values to send as the payload of the POST request.
The data will be JSON-encoded.
Returns:
The body of the response, converted from JSON into a Python dict.
### Response:
def post_request(profile, resource, payload):
"""Do a POST request to Github's API.
Args:
profile
A profile generated from ``simplygithub.authentication.profile``.
Such profiles tell this module (i) the ``repo`` to connect to,
and (ii) the ``token`` to connect with.
resource
The part of a Github API URL that comes after ``.../:repo/git``.
For instance, for ``.../:repo/git/commits``, it's ``/commits``.
payload
A dict of values to send as the payload of the POST request.
The data will be JSON-encoded.
Returns:
The body of the response, converted from JSON into a Python dict.
"""
url = get_url(profile, resource)
headers = get_headers(profile)
response = requests.post(url, json=payload, headers=headers)
return response.json() |
def _on_items_changed(self, change):
""" Observe container events on the items list and update the
adapter appropriately.
"""
if change['type'] != 'container':
return
op = change['operation']
if op == 'append':
i = len(change['value'])-1
self.adapter.notifyItemInserted(i)
elif op == 'insert':
self.adapter.notifyItemInserted(change['index'])
elif op in ('pop', '__delitem__'):
self.adapter.notifyItemRemoved(change['index'])
elif op == '__setitem__':
self.adapter.notifyItemChanged(change['index'])
elif op == 'extend':
n = len(change['items'])
i = len(change['value'])-n
self.adapter.notifyItemRangeInserted(i, n)
elif op in ('remove', 'reverse', 'sort'):
# Reset everything for these
self.adapter.notifyDataSetChanged() | Observe container events on the items list and update the
adapter appropriately. | Below is the the instruction that describes the task:
### Input:
Observe container events on the items list and update the
adapter appropriately.
### Response:
def _on_items_changed(self, change):
""" Observe container events on the items list and update the
adapter appropriately.
"""
if change['type'] != 'container':
return
op = change['operation']
if op == 'append':
i = len(change['value'])-1
self.adapter.notifyItemInserted(i)
elif op == 'insert':
self.adapter.notifyItemInserted(change['index'])
elif op in ('pop', '__delitem__'):
self.adapter.notifyItemRemoved(change['index'])
elif op == '__setitem__':
self.adapter.notifyItemChanged(change['index'])
elif op == 'extend':
n = len(change['items'])
i = len(change['value'])-n
self.adapter.notifyItemRangeInserted(i, n)
elif op in ('remove', 'reverse', 'sort'):
# Reset everything for these
self.adapter.notifyDataSetChanged() |
def update_multi_precision(self, index, weight, grad, state):
"""Updates the given parameter using the corresponding gradient and state.
Mixed precision version.
Parameters
----------
index : int
The unique index of the parameter into the individual learning
rates and weight decays. Learning rates and weight decay
may be set via `set_lr_mult()` and `set_wd_mult()`, respectively.
weight : NDArray
The parameter to be updated.
grad : NDArray
The gradient of the objective with respect to this parameter.
state : any obj
The state returned by `create_state()`.
"""
if self.multi_precision and weight.dtype == numpy.float16:
# Wrapper for mixed precision
weight_master_copy = state[0]
original_state = state[1]
grad32 = grad.astype(numpy.float32)
self.update(index, weight_master_copy, grad32, original_state)
cast(weight_master_copy, dtype=weight.dtype, out=weight)
else:
self.update(index, weight, grad, state) | Updates the given parameter using the corresponding gradient and state.
Mixed precision version.
Parameters
----------
index : int
The unique index of the parameter into the individual learning
rates and weight decays. Learning rates and weight decay
may be set via `set_lr_mult()` and `set_wd_mult()`, respectively.
weight : NDArray
The parameter to be updated.
grad : NDArray
The gradient of the objective with respect to this parameter.
state : any obj
The state returned by `create_state()`. | Below is the the instruction that describes the task:
### Input:
Updates the given parameter using the corresponding gradient and state.
Mixed precision version.
Parameters
----------
index : int
The unique index of the parameter into the individual learning
rates and weight decays. Learning rates and weight decay
may be set via `set_lr_mult()` and `set_wd_mult()`, respectively.
weight : NDArray
The parameter to be updated.
grad : NDArray
The gradient of the objective with respect to this parameter.
state : any obj
The state returned by `create_state()`.
### Response:
def update_multi_precision(self, index, weight, grad, state):
"""Updates the given parameter using the corresponding gradient and state.
Mixed precision version.
Parameters
----------
index : int
The unique index of the parameter into the individual learning
rates and weight decays. Learning rates and weight decay
may be set via `set_lr_mult()` and `set_wd_mult()`, respectively.
weight : NDArray
The parameter to be updated.
grad : NDArray
The gradient of the objective with respect to this parameter.
state : any obj
The state returned by `create_state()`.
"""
if self.multi_precision and weight.dtype == numpy.float16:
# Wrapper for mixed precision
weight_master_copy = state[0]
original_state = state[1]
grad32 = grad.astype(numpy.float32)
self.update(index, weight_master_copy, grad32, original_state)
cast(weight_master_copy, dtype=weight.dtype, out=weight)
else:
self.update(index, weight, grad, state) |
def reset_new_request(self):
"""Remove the non-sense args from the self.ignore, return self.new_request"""
raw_url = self.new_request['url']
parsed_url = urlparse(raw_url)
qsl = parse_qsl(parsed_url.query)
new_url = self._join_url(
parsed_url, [i for i in qsl if i not in self.ignore['qsl']])
self.new_request['url'] = new_url
self.logger_function('ignore: %s' % self.ignore)
for key in self.ignore['headers']:
self.new_request['headers'].pop(key)
if not self.new_request.get('headers'):
self.new_request.pop('headers', None)
if self.ignore['Cookie'] and 'Cookie' not in self.ignore['headers']:
headers = self.new_request['headers']
headers = {key.title(): headers[key] for key in headers}
if 'Cookie' in headers:
cookies = SimpleCookie(headers['Cookie'])
new_cookie = '; '.join([
i[1].OutputString()
for i in cookies.items()
if i[0] not in self.ignore['Cookie']
])
self.new_request['headers']['Cookie'] = new_cookie
if self.new_request['method'] == 'post':
data = self.new_request.get('data')
if data:
if isinstance(data, dict):
for key in self.ignore['form_data']:
data.pop(key)
if (not data) or self.ignore['total_data']:
# not need data any more
self.new_request.pop('data', None)
if self.has_json_data and 'data' in self.new_request:
json_data = json.loads(data.decode(self.encoding))
for key in self.ignore['json_data']:
json_data.pop(key)
self.new_request['data'] = json.dumps(json_data).encode(
self.encoding)
return self.new_request | Remove the non-sense args from the self.ignore, return self.new_request | Below is the the instruction that describes the task:
### Input:
Remove the non-sense args from the self.ignore, return self.new_request
### Response:
def reset_new_request(self):
"""Remove the non-sense args from the self.ignore, return self.new_request"""
raw_url = self.new_request['url']
parsed_url = urlparse(raw_url)
qsl = parse_qsl(parsed_url.query)
new_url = self._join_url(
parsed_url, [i for i in qsl if i not in self.ignore['qsl']])
self.new_request['url'] = new_url
self.logger_function('ignore: %s' % self.ignore)
for key in self.ignore['headers']:
self.new_request['headers'].pop(key)
if not self.new_request.get('headers'):
self.new_request.pop('headers', None)
if self.ignore['Cookie'] and 'Cookie' not in self.ignore['headers']:
headers = self.new_request['headers']
headers = {key.title(): headers[key] for key in headers}
if 'Cookie' in headers:
cookies = SimpleCookie(headers['Cookie'])
new_cookie = '; '.join([
i[1].OutputString()
for i in cookies.items()
if i[0] not in self.ignore['Cookie']
])
self.new_request['headers']['Cookie'] = new_cookie
if self.new_request['method'] == 'post':
data = self.new_request.get('data')
if data:
if isinstance(data, dict):
for key in self.ignore['form_data']:
data.pop(key)
if (not data) or self.ignore['total_data']:
# not need data any more
self.new_request.pop('data', None)
if self.has_json_data and 'data' in self.new_request:
json_data = json.loads(data.decode(self.encoding))
for key in self.ignore['json_data']:
json_data.pop(key)
self.new_request['data'] = json.dumps(json_data).encode(
self.encoding)
return self.new_request |
def _parse_args():
"""
Parse the command line for options
"""
parser = optparse.OptionParser()
parser.add_option(
'--user', dest='user_install', action='store_true', default=False,
help='install in user site package (requires Python 2.6 or later)')
parser.add_option(
'--download-base', dest='download_base', metavar="URL",
default=DEFAULT_URL,
help='alternative URL from where to download the setuptools package')
parser.add_option(
'--insecure', dest='downloader_factory', action='store_const',
const=lambda: download_file_insecure, default=get_best_downloader,
help='Use internal, non-validating downloader'
)
options, args = parser.parse_args()
# positional arguments are ignored
return options | Parse the command line for options | Below is the the instruction that describes the task:
### Input:
Parse the command line for options
### Response:
def _parse_args():
"""
Parse the command line for options
"""
parser = optparse.OptionParser()
parser.add_option(
'--user', dest='user_install', action='store_true', default=False,
help='install in user site package (requires Python 2.6 or later)')
parser.add_option(
'--download-base', dest='download_base', metavar="URL",
default=DEFAULT_URL,
help='alternative URL from where to download the setuptools package')
parser.add_option(
'--insecure', dest='downloader_factory', action='store_const',
const=lambda: download_file_insecure, default=get_best_downloader,
help='Use internal, non-validating downloader'
)
options, args = parser.parse_args()
# positional arguments are ignored
return options |
def actions(acts, done):
'''
Prepare actions pipeline.
:param tuple acts: called functions
:param function done: get result from actions
:returns function: function that starts executio
'''
def _intermediate(acc, action):
result = action(acc['state'])
values = concatv(acc['values'], [result['answer']])
return {'values': values, 'state': result['state']}
def _actions(seed):
init = {'values': [], 'state': seed}
result = reduce(_intermediate, acts, init)
keep = remove(lambda x: x is None, result['values'])
return done(keep, result['state'])
return _actions | Prepare actions pipeline.
:param tuple acts: called functions
:param function done: get result from actions
:returns function: function that starts executio | Below is the the instruction that describes the task:
### Input:
Prepare actions pipeline.
:param tuple acts: called functions
:param function done: get result from actions
:returns function: function that starts executio
### Response:
def actions(acts, done):
'''
Prepare actions pipeline.
:param tuple acts: called functions
:param function done: get result from actions
:returns function: function that starts executio
'''
def _intermediate(acc, action):
result = action(acc['state'])
values = concatv(acc['values'], [result['answer']])
return {'values': values, 'state': result['state']}
def _actions(seed):
init = {'values': [], 'state': seed}
result = reduce(_intermediate, acts, init)
keep = remove(lambda x: x is None, result['values'])
return done(keep, result['state'])
return _actions |
def hashkey(*args, **kwargs):
"""Return a cache key for the specified hashable arguments."""
if kwargs:
return _HashedTuple(args + sum(sorted(kwargs.items()), _kwmark))
else:
return _HashedTuple(args) | Return a cache key for the specified hashable arguments. | Below is the the instruction that describes the task:
### Input:
Return a cache key for the specified hashable arguments.
### Response:
def hashkey(*args, **kwargs):
"""Return a cache key for the specified hashable arguments."""
if kwargs:
return _HashedTuple(args + sum(sorted(kwargs.items()), _kwmark))
else:
return _HashedTuple(args) |
def get_taints(arg, taint=None):
"""
Helper to list an object taints.
:param arg: a value or Expression
:param taint: a regular expression matching a taint value (eg. 'IMPORTANT.*'). If None, this function checks for any taint value.
"""
if not issymbolic(arg):
return
for arg_taint in arg.taint:
if taint is not None:
m = re.match(taint, arg_taint, re.DOTALL | re.IGNORECASE)
if m:
yield arg_taint
else:
yield arg_taint
return | Helper to list an object taints.
:param arg: a value or Expression
:param taint: a regular expression matching a taint value (eg. 'IMPORTANT.*'). If None, this function checks for any taint value. | Below is the the instruction that describes the task:
### Input:
Helper to list an object taints.
:param arg: a value or Expression
:param taint: a regular expression matching a taint value (eg. 'IMPORTANT.*'). If None, this function checks for any taint value.
### Response:
def get_taints(arg, taint=None):
"""
Helper to list an object taints.
:param arg: a value or Expression
:param taint: a regular expression matching a taint value (eg. 'IMPORTANT.*'). If None, this function checks for any taint value.
"""
if not issymbolic(arg):
return
for arg_taint in arg.taint:
if taint is not None:
m = re.match(taint, arg_taint, re.DOTALL | re.IGNORECASE)
if m:
yield arg_taint
else:
yield arg_taint
return |
def _readlines(fname, fpointer1=open, fpointer2=open): # pragma: no cover
"""Read all lines from file."""
# fpointer1, fpointer2 arguments to ease testing
try:
with fpointer1(fname, "r") as fobj:
return fobj.readlines()
except UnicodeDecodeError: # pragma: no cover
with fpointer2(fname, "r", encoding="utf-8") as fobj:
return fobj.readlines() | Read all lines from file. | Below is the the instruction that describes the task:
### Input:
Read all lines from file.
### Response:
def _readlines(fname, fpointer1=open, fpointer2=open): # pragma: no cover
"""Read all lines from file."""
# fpointer1, fpointer2 arguments to ease testing
try:
with fpointer1(fname, "r") as fobj:
return fobj.readlines()
except UnicodeDecodeError: # pragma: no cover
with fpointer2(fname, "r", encoding="utf-8") as fobj:
return fobj.readlines() |
def triple_reference_of(label: ShExJ.tripleExprLabel, cntxt: Context) -> Optional[ShExJ.tripleExpr]:
""" Search for the label in a Schema """
te: Optional[ShExJ.tripleExpr] = None
if cntxt.schema.start is not None:
te = triple_in_shape(cntxt.schema.start, label, cntxt)
if te is None:
for shapeExpr in cntxt.schema.shapes:
te = triple_in_shape(shapeExpr, label, cntxt)
if te:
break
return te | Search for the label in a Schema | Below is the the instruction that describes the task:
### Input:
Search for the label in a Schema
### Response:
def triple_reference_of(label: ShExJ.tripleExprLabel, cntxt: Context) -> Optional[ShExJ.tripleExpr]:
""" Search for the label in a Schema """
te: Optional[ShExJ.tripleExpr] = None
if cntxt.schema.start is not None:
te = triple_in_shape(cntxt.schema.start, label, cntxt)
if te is None:
for shapeExpr in cntxt.schema.shapes:
te = triple_in_shape(shapeExpr, label, cntxt)
if te:
break
return te |
def _virt_call(domain, function, section, comment,
connection=None, username=None, password=None, **kwargs):
'''
Helper to call the virt functions. Wildcards supported.
:param domain:
:param function:
:param section:
:param comment:
:return:
'''
ret = {'name': domain, 'changes': {}, 'result': True, 'comment': ''}
targeted_domains = fnmatch.filter(__salt__['virt.list_domains'](), domain)
changed_domains = list()
ignored_domains = list()
for targeted_domain in targeted_domains:
try:
response = __salt__['virt.{0}'.format(function)](targeted_domain,
connection=connection,
username=username,
password=password,
**kwargs)
if isinstance(response, dict):
response = response['name']
changed_domains.append({'domain': targeted_domain, function: response})
except libvirt.libvirtError as err:
ignored_domains.append({'domain': targeted_domain, 'issue': six.text_type(err)})
if not changed_domains:
ret['result'] = False
ret['comment'] = 'No changes had happened'
if ignored_domains:
ret['changes'] = {'ignored': ignored_domains}
else:
ret['changes'] = {section: changed_domains}
ret['comment'] = comment
return ret | Helper to call the virt functions. Wildcards supported.
:param domain:
:param function:
:param section:
:param comment:
:return: | Below is the the instruction that describes the task:
### Input:
Helper to call the virt functions. Wildcards supported.
:param domain:
:param function:
:param section:
:param comment:
:return:
### Response:
def _virt_call(domain, function, section, comment,
connection=None, username=None, password=None, **kwargs):
'''
Helper to call the virt functions. Wildcards supported.
:param domain:
:param function:
:param section:
:param comment:
:return:
'''
ret = {'name': domain, 'changes': {}, 'result': True, 'comment': ''}
targeted_domains = fnmatch.filter(__salt__['virt.list_domains'](), domain)
changed_domains = list()
ignored_domains = list()
for targeted_domain in targeted_domains:
try:
response = __salt__['virt.{0}'.format(function)](targeted_domain,
connection=connection,
username=username,
password=password,
**kwargs)
if isinstance(response, dict):
response = response['name']
changed_domains.append({'domain': targeted_domain, function: response})
except libvirt.libvirtError as err:
ignored_domains.append({'domain': targeted_domain, 'issue': six.text_type(err)})
if not changed_domains:
ret['result'] = False
ret['comment'] = 'No changes had happened'
if ignored_domains:
ret['changes'] = {'ignored': ignored_domains}
else:
ret['changes'] = {section: changed_domains}
ret['comment'] = comment
return ret |
def create_rrset(self, zone_name, rtype, owner_name, ttl, rdata):
"""Creates a new RRSet in the specified zone.
Arguments:
zone_name -- The zone that will contain the new RRSet. The trailing dot is optional.
rtype -- The type of the RRSet. This can be numeric (1) or
if a well-known name is defined for the type (A), you can use it instead.
owner_name -- The owner name for the RRSet.
If no trailing dot is supplied, the owner_name is assumed to be relative (foo).
If a trailing dot is supplied, the owner name is assumed to be absolute (foo.zonename.com.)
ttl -- The TTL value for the RRSet.
rdata -- The BIND data for the RRSet as a string.
If there is a single resource record in the RRSet, you can pass in the single string.
If there are multiple resource records in this RRSet, pass in a list of strings.
"""
if type(rdata) is not list:
rdata = [rdata]
rrset = {"ttl": ttl, "rdata": rdata}
return self.rest_api_connection.post("/v1/zones/" + zone_name + "/rrsets/" + rtype + "/" + owner_name, json.dumps(rrset)) | Creates a new RRSet in the specified zone.
Arguments:
zone_name -- The zone that will contain the new RRSet. The trailing dot is optional.
rtype -- The type of the RRSet. This can be numeric (1) or
if a well-known name is defined for the type (A), you can use it instead.
owner_name -- The owner name for the RRSet.
If no trailing dot is supplied, the owner_name is assumed to be relative (foo).
If a trailing dot is supplied, the owner name is assumed to be absolute (foo.zonename.com.)
ttl -- The TTL value for the RRSet.
rdata -- The BIND data for the RRSet as a string.
If there is a single resource record in the RRSet, you can pass in the single string.
If there are multiple resource records in this RRSet, pass in a list of strings. | Below is the the instruction that describes the task:
### Input:
Creates a new RRSet in the specified zone.
Arguments:
zone_name -- The zone that will contain the new RRSet. The trailing dot is optional.
rtype -- The type of the RRSet. This can be numeric (1) or
if a well-known name is defined for the type (A), you can use it instead.
owner_name -- The owner name for the RRSet.
If no trailing dot is supplied, the owner_name is assumed to be relative (foo).
If a trailing dot is supplied, the owner name is assumed to be absolute (foo.zonename.com.)
ttl -- The TTL value for the RRSet.
rdata -- The BIND data for the RRSet as a string.
If there is a single resource record in the RRSet, you can pass in the single string.
If there are multiple resource records in this RRSet, pass in a list of strings.
### Response:
def create_rrset(self, zone_name, rtype, owner_name, ttl, rdata):
"""Creates a new RRSet in the specified zone.
Arguments:
zone_name -- The zone that will contain the new RRSet. The trailing dot is optional.
rtype -- The type of the RRSet. This can be numeric (1) or
if a well-known name is defined for the type (A), you can use it instead.
owner_name -- The owner name for the RRSet.
If no trailing dot is supplied, the owner_name is assumed to be relative (foo).
If a trailing dot is supplied, the owner name is assumed to be absolute (foo.zonename.com.)
ttl -- The TTL value for the RRSet.
rdata -- The BIND data for the RRSet as a string.
If there is a single resource record in the RRSet, you can pass in the single string.
If there are multiple resource records in this RRSet, pass in a list of strings.
"""
if type(rdata) is not list:
rdata = [rdata]
rrset = {"ttl": ttl, "rdata": rdata}
return self.rest_api_connection.post("/v1/zones/" + zone_name + "/rrsets/" + rtype + "/" + owner_name, json.dumps(rrset)) |
def submit_populator_batch(self, column_name, batch):
"""Submit a populator batch
Submit a populator batch as a series of HTTP requests in small chunks,
returning the batch GUID, or raising exception on error."""
if not set(column_name).issubset(_allowedCustomDimensionChars):
raise ValueError('Invalid custom dimension name "%s": must only contain letters, digits, and underscores' % column_name)
if len(column_name) < 3 or len(column_name) > 20:
raise ValueError('Invalid value "%s": must be between 3-20 characters' % column_name)
url = '%s/api/v5/batch/customdimensions/%s/populators' % (self.base_url, column_name)
resp_json_dict = self._submit_batch(url, batch)
if resp_json_dict.get('error') is not None:
raise RuntimeError('Error received from server: %s' % resp_json_dict['error'])
return resp_json_dict['guid'] | Submit a populator batch
Submit a populator batch as a series of HTTP requests in small chunks,
returning the batch GUID, or raising exception on error. | Below is the the instruction that describes the task:
### Input:
Submit a populator batch
Submit a populator batch as a series of HTTP requests in small chunks,
returning the batch GUID, or raising exception on error.
### Response:
def submit_populator_batch(self, column_name, batch):
"""Submit a populator batch
Submit a populator batch as a series of HTTP requests in small chunks,
returning the batch GUID, or raising exception on error."""
if not set(column_name).issubset(_allowedCustomDimensionChars):
raise ValueError('Invalid custom dimension name "%s": must only contain letters, digits, and underscores' % column_name)
if len(column_name) < 3 or len(column_name) > 20:
raise ValueError('Invalid value "%s": must be between 3-20 characters' % column_name)
url = '%s/api/v5/batch/customdimensions/%s/populators' % (self.base_url, column_name)
resp_json_dict = self._submit_batch(url, batch)
if resp_json_dict.get('error') is not None:
raise RuntimeError('Error received from server: %s' % resp_json_dict['error'])
return resp_json_dict['guid'] |
def _get_texture(arr, default, n_items, from_bounds):
"""Prepare data to be uploaded as a texture.
The from_bounds must be specified.
"""
if not hasattr(default, '__len__'): # pragma: no cover
default = [default]
n_cols = len(default)
if arr is None: # pragma: no cover
arr = np.tile(default, (n_items, 1))
assert arr.shape == (n_items, n_cols)
# Convert to 3D texture.
arr = arr[np.newaxis, ...].astype(np.float64)
assert arr.shape == (1, n_items, n_cols)
# NOTE: we need to cast the texture to [0., 1.] (float texture).
# This is easy as soon as we assume that the signal bounds are in
# [-1, 1].
assert len(from_bounds) == 2
m, M = map(float, from_bounds)
assert np.all(arr >= m)
assert np.all(arr <= M)
arr = (arr - m) / (M - m)
assert np.all(arr >= 0)
assert np.all(arr <= 1.)
return arr | Prepare data to be uploaded as a texture.
The from_bounds must be specified. | Below is the the instruction that describes the task:
### Input:
Prepare data to be uploaded as a texture.
The from_bounds must be specified.
### Response:
def _get_texture(arr, default, n_items, from_bounds):
"""Prepare data to be uploaded as a texture.
The from_bounds must be specified.
"""
if not hasattr(default, '__len__'): # pragma: no cover
default = [default]
n_cols = len(default)
if arr is None: # pragma: no cover
arr = np.tile(default, (n_items, 1))
assert arr.shape == (n_items, n_cols)
# Convert to 3D texture.
arr = arr[np.newaxis, ...].astype(np.float64)
assert arr.shape == (1, n_items, n_cols)
# NOTE: we need to cast the texture to [0., 1.] (float texture).
# This is easy as soon as we assume that the signal bounds are in
# [-1, 1].
assert len(from_bounds) == 2
m, M = map(float, from_bounds)
assert np.all(arr >= m)
assert np.all(arr <= M)
arr = (arr - m) / (M - m)
assert np.all(arr >= 0)
assert np.all(arr <= 1.)
return arr |
def _ordered_node_addrs(self, function_address):
"""
For a given function, return all nodes in an optimal traversal order. If the function does not exist, return an
empty list.
:param int function_address: Address of the function.
:return: A ordered list of the nodes.
:rtype: list
"""
try:
function = self.kb.functions[function_address]
except KeyError:
# the function does not exist
return [ ]
if function_address not in self._function_node_addrs:
sorted_nodes = CFGUtils.quasi_topological_sort_nodes(function.graph)
self._function_node_addrs[function_address] = [ n.addr for n in sorted_nodes ]
return self._function_node_addrs[function_address] | For a given function, return all nodes in an optimal traversal order. If the function does not exist, return an
empty list.
:param int function_address: Address of the function.
:return: A ordered list of the nodes.
:rtype: list | Below is the the instruction that describes the task:
### Input:
For a given function, return all nodes in an optimal traversal order. If the function does not exist, return an
empty list.
:param int function_address: Address of the function.
:return: A ordered list of the nodes.
:rtype: list
### Response:
def _ordered_node_addrs(self, function_address):
"""
For a given function, return all nodes in an optimal traversal order. If the function does not exist, return an
empty list.
:param int function_address: Address of the function.
:return: A ordered list of the nodes.
:rtype: list
"""
try:
function = self.kb.functions[function_address]
except KeyError:
# the function does not exist
return [ ]
if function_address not in self._function_node_addrs:
sorted_nodes = CFGUtils.quasi_topological_sort_nodes(function.graph)
self._function_node_addrs[function_address] = [ n.addr for n in sorted_nodes ]
return self._function_node_addrs[function_address] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.