code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def file_sizes(self):
"""Returns total filesize (in MB)"""
size = sum(map(os.path.getsize, self.file_list))
return size / 1024 / 1024 | Returns total filesize (in MB) | Below is the the instruction that describes the task:
### Input:
Returns total filesize (in MB)
### Response:
def file_sizes(self):
"""Returns total filesize (in MB)"""
size = sum(map(os.path.getsize, self.file_list))
return size / 1024 / 1024 |
def _traverse(self, n=1):
"""Traverse state history. Used by `back` and `forward` methods.
:param int n: Cursor increment. Positive values move forward in the
browser history; negative values move backward.
"""
if not self.history:
raise exceptions.RoboError('Not tracking history')
cursor = self._cursor + n
if cursor >= len(self._states) or cursor < 0:
raise exceptions.RoboError('Index out of range')
self._cursor = cursor | Traverse state history. Used by `back` and `forward` methods.
:param int n: Cursor increment. Positive values move forward in the
browser history; negative values move backward. | Below is the the instruction that describes the task:
### Input:
Traverse state history. Used by `back` and `forward` methods.
:param int n: Cursor increment. Positive values move forward in the
browser history; negative values move backward.
### Response:
def _traverse(self, n=1):
"""Traverse state history. Used by `back` and `forward` methods.
:param int n: Cursor increment. Positive values move forward in the
browser history; negative values move backward.
"""
if not self.history:
raise exceptions.RoboError('Not tracking history')
cursor = self._cursor + n
if cursor >= len(self._states) or cursor < 0:
raise exceptions.RoboError('Index out of range')
self._cursor = cursor |
def index_split(index, chunks):
"""Function to split pandas.Index and pandas.MultiIndex objects.
Split :class:`pandas.Index` and :class:`pandas.MultiIndex` objects
into chunks. This function is based on :func:`numpy.array_split`.
Parameters
----------
index : pandas.Index, pandas.MultiIndex
A pandas.Index or pandas.MultiIndex to split into chunks.
chunks : int
The number of parts to split the index into.
Returns
-------
list
A list with chunked pandas.Index or pandas.MultiIndex objects.
"""
Ntotal = index.shape[0]
Nsections = int(chunks)
if Nsections <= 0:
raise ValueError('number sections must be larger than 0.')
Neach_section, extras = divmod(Ntotal, Nsections)
section_sizes = ([0] + extras * [Neach_section + 1] +
(Nsections - extras) * [Neach_section])
div_points = numpy.array(section_sizes).cumsum()
sub_ind = []
for i in range(Nsections):
st = div_points[i]
end = div_points[i + 1]
sub_ind.append(index[st:end])
return sub_ind | Function to split pandas.Index and pandas.MultiIndex objects.
Split :class:`pandas.Index` and :class:`pandas.MultiIndex` objects
into chunks. This function is based on :func:`numpy.array_split`.
Parameters
----------
index : pandas.Index, pandas.MultiIndex
A pandas.Index or pandas.MultiIndex to split into chunks.
chunks : int
The number of parts to split the index into.
Returns
-------
list
A list with chunked pandas.Index or pandas.MultiIndex objects. | Below is the the instruction that describes the task:
### Input:
Function to split pandas.Index and pandas.MultiIndex objects.
Split :class:`pandas.Index` and :class:`pandas.MultiIndex` objects
into chunks. This function is based on :func:`numpy.array_split`.
Parameters
----------
index : pandas.Index, pandas.MultiIndex
A pandas.Index or pandas.MultiIndex to split into chunks.
chunks : int
The number of parts to split the index into.
Returns
-------
list
A list with chunked pandas.Index or pandas.MultiIndex objects.
### Response:
def index_split(index, chunks):
"""Function to split pandas.Index and pandas.MultiIndex objects.
Split :class:`pandas.Index` and :class:`pandas.MultiIndex` objects
into chunks. This function is based on :func:`numpy.array_split`.
Parameters
----------
index : pandas.Index, pandas.MultiIndex
A pandas.Index or pandas.MultiIndex to split into chunks.
chunks : int
The number of parts to split the index into.
Returns
-------
list
A list with chunked pandas.Index or pandas.MultiIndex objects.
"""
Ntotal = index.shape[0]
Nsections = int(chunks)
if Nsections <= 0:
raise ValueError('number sections must be larger than 0.')
Neach_section, extras = divmod(Ntotal, Nsections)
section_sizes = ([0] + extras * [Neach_section + 1] +
(Nsections - extras) * [Neach_section])
div_points = numpy.array(section_sizes).cumsum()
sub_ind = []
for i in range(Nsections):
st = div_points[i]
end = div_points[i + 1]
sub_ind.append(index[st:end])
return sub_ind |
def upload(cls):
"""Uploads all the local views from :attr:`VIEW_PATHS` directory
to CouchBase server
This method **over-writes** all the server-side views with the same
named ones coming from :attr:`VIEW_PATHS` folder.
"""
cls._check_folder()
os.chdir(cls.VIEWS_PATH)
buckets = dict()
# iterate local folders
for bucket_name in os.listdir(cls.VIEWS_PATH):
if not os.path.isdir(bucket_name):
continue
# get bucket object
if bucket_name not in buckets:
try:
bucket = Connection.bucket(bucket_name)
except BucketNotFoundError as why:
print("[WARNING] %s" % str(why))
continue
else:
buckets[bucket_name] = bucket
else:
bucket = buckets[bucket_name]
# go through design docs
for ddoc_name in os.listdir(bucket_name):
views_path = '%s/%s/views' % (bucket_name, ddoc_name)
spatial_path = '%s/%s/spatial' % (bucket_name, ddoc_name)
if not (os.path.exists(views_path) and os.path.isdir(views_path)) and \
not (os.path.exists(spatial_path) and os.path.isdir(spatial_path)):
continue
# initialize design doc
new_ddoc = {
'views': {},
'spatial': {},
}
# map and reduces
if os.path.exists(views_path) and os.path.isdir(views_path):
for filename in os.listdir(views_path):
if not os.path.isfile('%s/%s' % (views_path, filename)) or \
not filename.endswith(('.map.js', '.reduce.js')):
continue
view_name, view_type, js = filename.rsplit('.', 2)
if view_name not in new_ddoc['views']:
new_ddoc['views'][view_name] = {}
with open('%s/%s' % (views_path, filename), 'r') as f:
new_ddoc['views'][view_name][view_type] = f.read()
# spatial views
if os.path.exists(spatial_path) and os.path.isdir(spatial_path):
for filename in os.listdir(spatial_path):
if not os.path.isfile('%s/%s' % (spatial_path, filename)) or \
not filename.endswith('.spatial.js'):
continue
view_name = filename.rsplit('.', 2)[0]
with open('%s/%s' % (spatial_path, filename), 'r') as f:
new_ddoc['spatial'][view_name] = f.read()
bucket['_design/%s' % ddoc_name] = new_ddoc
print('Uploaded design document: %s' % ddoc_name)
pass | Uploads all the local views from :attr:`VIEW_PATHS` directory
to CouchBase server
This method **over-writes** all the server-side views with the same
named ones coming from :attr:`VIEW_PATHS` folder. | Below is the the instruction that describes the task:
### Input:
Uploads all the local views from :attr:`VIEW_PATHS` directory
to CouchBase server
This method **over-writes** all the server-side views with the same
named ones coming from :attr:`VIEW_PATHS` folder.
### Response:
def upload(cls):
"""Uploads all the local views from :attr:`VIEW_PATHS` directory
to CouchBase server
This method **over-writes** all the server-side views with the same
named ones coming from :attr:`VIEW_PATHS` folder.
"""
cls._check_folder()
os.chdir(cls.VIEWS_PATH)
buckets = dict()
# iterate local folders
for bucket_name in os.listdir(cls.VIEWS_PATH):
if not os.path.isdir(bucket_name):
continue
# get bucket object
if bucket_name not in buckets:
try:
bucket = Connection.bucket(bucket_name)
except BucketNotFoundError as why:
print("[WARNING] %s" % str(why))
continue
else:
buckets[bucket_name] = bucket
else:
bucket = buckets[bucket_name]
# go through design docs
for ddoc_name in os.listdir(bucket_name):
views_path = '%s/%s/views' % (bucket_name, ddoc_name)
spatial_path = '%s/%s/spatial' % (bucket_name, ddoc_name)
if not (os.path.exists(views_path) and os.path.isdir(views_path)) and \
not (os.path.exists(spatial_path) and os.path.isdir(spatial_path)):
continue
# initialize design doc
new_ddoc = {
'views': {},
'spatial': {},
}
# map and reduces
if os.path.exists(views_path) and os.path.isdir(views_path):
for filename in os.listdir(views_path):
if not os.path.isfile('%s/%s' % (views_path, filename)) or \
not filename.endswith(('.map.js', '.reduce.js')):
continue
view_name, view_type, js = filename.rsplit('.', 2)
if view_name not in new_ddoc['views']:
new_ddoc['views'][view_name] = {}
with open('%s/%s' % (views_path, filename), 'r') as f:
new_ddoc['views'][view_name][view_type] = f.read()
# spatial views
if os.path.exists(spatial_path) and os.path.isdir(spatial_path):
for filename in os.listdir(spatial_path):
if not os.path.isfile('%s/%s' % (spatial_path, filename)) or \
not filename.endswith('.spatial.js'):
continue
view_name = filename.rsplit('.', 2)[0]
with open('%s/%s' % (spatial_path, filename), 'r') as f:
new_ddoc['spatial'][view_name] = f.read()
bucket['_design/%s' % ddoc_name] = new_ddoc
print('Uploaded design document: %s' % ddoc_name)
pass |
def remaining(self):
"""Determines how many bytes are remaining in the current context."""
if self.depth == 0:
return _STREAM_REMAINING
return self.limit - self.queue.position | Determines how many bytes are remaining in the current context. | Below is the the instruction that describes the task:
### Input:
Determines how many bytes are remaining in the current context.
### Response:
def remaining(self):
"""Determines how many bytes are remaining in the current context."""
if self.depth == 0:
return _STREAM_REMAINING
return self.limit - self.queue.position |
def where(self, *args):
''' This method simulates a where condition. Use as follow:
>>> yql.select('mytable').where(['name', '=', 'alain'], ['location', '!=', 'paris'])
'''
if not self._table:
raise errors.NoTableSelectedError('No Table Selected')
clause = []
self._query += ' WHERE '
clause = [ self._clause_formatter(x) for x in args if x ]
self._query += ' AND '.join(clause)
payload = self._payload_builder(self._query)
response = self.execute_query(payload)
return response | This method simulates a where condition. Use as follow:
>>> yql.select('mytable').where(['name', '=', 'alain'], ['location', '!=', 'paris']) | Below is the the instruction that describes the task:
### Input:
This method simulates a where condition. Use as follow:
>>> yql.select('mytable').where(['name', '=', 'alain'], ['location', '!=', 'paris'])
### Response:
def where(self, *args):
''' This method simulates a where condition. Use as follow:
>>> yql.select('mytable').where(['name', '=', 'alain'], ['location', '!=', 'paris'])
'''
if not self._table:
raise errors.NoTableSelectedError('No Table Selected')
clause = []
self._query += ' WHERE '
clause = [ self._clause_formatter(x) for x in args if x ]
self._query += ' AND '.join(clause)
payload = self._payload_builder(self._query)
response = self.execute_query(payload)
return response |
def sortby(listoflists,sortcols):
"""
Sorts a list of lists on the column(s) specified in the sequence
sortcols.
Usage: sortby(listoflists,sortcols)
Returns: sorted list, unchanged column ordering
"""
newlist = abut(colex(listoflists,sortcols),listoflists)
newlist.sort()
try:
numcols = len(sortcols)
except TypeError:
numcols = 1
crit = '[' + str(numcols) + ':]'
newlist = colex(newlist,crit)
return newlist | Sorts a list of lists on the column(s) specified in the sequence
sortcols.
Usage: sortby(listoflists,sortcols)
Returns: sorted list, unchanged column ordering | Below is the the instruction that describes the task:
### Input:
Sorts a list of lists on the column(s) specified in the sequence
sortcols.
Usage: sortby(listoflists,sortcols)
Returns: sorted list, unchanged column ordering
### Response:
def sortby(listoflists,sortcols):
"""
Sorts a list of lists on the column(s) specified in the sequence
sortcols.
Usage: sortby(listoflists,sortcols)
Returns: sorted list, unchanged column ordering
"""
newlist = abut(colex(listoflists,sortcols),listoflists)
newlist.sort()
try:
numcols = len(sortcols)
except TypeError:
numcols = 1
crit = '[' + str(numcols) + ':]'
newlist = colex(newlist,crit)
return newlist |
def load_pickle(file, encoding=None):
"""Load a pickle file.
Args:
file (str): Path to pickle file
Returns:
object: Loaded object from pickle file
"""
# TODO: test set encoding='latin1' for 2/3 incompatibility
if encoding:
with open(file, 'rb') as f:
return pickle.load(f, encoding=encoding)
with open(file, 'rb') as f:
return pickle.load(f) | Load a pickle file.
Args:
file (str): Path to pickle file
Returns:
object: Loaded object from pickle file | Below is the the instruction that describes the task:
### Input:
Load a pickle file.
Args:
file (str): Path to pickle file
Returns:
object: Loaded object from pickle file
### Response:
def load_pickle(file, encoding=None):
"""Load a pickle file.
Args:
file (str): Path to pickle file
Returns:
object: Loaded object from pickle file
"""
# TODO: test set encoding='latin1' for 2/3 incompatibility
if encoding:
with open(file, 'rb') as f:
return pickle.load(f, encoding=encoding)
with open(file, 'rb') as f:
return pickle.load(f) |
def _block_from_ip_and_prefix(ip, prefix):
"""Create a tuple of (start, end) dotted-quad addresses from the given
ip address and prefix length.
:param ip: Ip address in block
:type ip: long
:param prefix: Prefix size for block
:type prefix: int
:returns: Tuple of block (start, end)
"""
# keep left most prefix bits of ip
shift = 32 - prefix
block_start = ip >> shift << shift
# expand right most 32 - prefix bits to 1
mask = (1 << shift) - 1
block_end = block_start | mask
return (long2ip(block_start), long2ip(block_end)) | Create a tuple of (start, end) dotted-quad addresses from the given
ip address and prefix length.
:param ip: Ip address in block
:type ip: long
:param prefix: Prefix size for block
:type prefix: int
:returns: Tuple of block (start, end) | Below is the the instruction that describes the task:
### Input:
Create a tuple of (start, end) dotted-quad addresses from the given
ip address and prefix length.
:param ip: Ip address in block
:type ip: long
:param prefix: Prefix size for block
:type prefix: int
:returns: Tuple of block (start, end)
### Response:
def _block_from_ip_and_prefix(ip, prefix):
"""Create a tuple of (start, end) dotted-quad addresses from the given
ip address and prefix length.
:param ip: Ip address in block
:type ip: long
:param prefix: Prefix size for block
:type prefix: int
:returns: Tuple of block (start, end)
"""
# keep left most prefix bits of ip
shift = 32 - prefix
block_start = ip >> shift << shift
# expand right most 32 - prefix bits to 1
mask = (1 << shift) - 1
block_end = block_start | mask
return (long2ip(block_start), long2ip(block_end)) |
def _show_organisation_logo(self):
"""Show the organisation logo in the dock if possible."""
dock_width = float(self.width())
# Don't let the image be more tha 100px height
maximum_height = 100.0 # px
pixmap = QPixmap(self.organisation_logo_path)
if pixmap.height() < 1 or pixmap.width() < 1:
return
height_ratio = maximum_height / pixmap.height()
maximum_width = int(pixmap.width() * height_ratio)
# Don't let the image be more than the dock width wide
if maximum_width > dock_width:
width_ratio = dock_width / float(pixmap.width())
maximum_height = int(pixmap.height() * width_ratio)
maximum_width = dock_width
too_high = pixmap.height() > maximum_height
too_wide = pixmap.width() > dock_width
if too_wide or too_high:
pixmap = pixmap.scaled(
maximum_width, maximum_height, Qt.KeepAspectRatio)
self.organisation_logo.setMaximumWidth(maximum_width)
# We have manually scaled using logic above
self.organisation_logo.setScaledContents(False)
self.organisation_logo.setPixmap(pixmap)
self.organisation_logo.show() | Show the organisation logo in the dock if possible. | Below is the the instruction that describes the task:
### Input:
Show the organisation logo in the dock if possible.
### Response:
def _show_organisation_logo(self):
"""Show the organisation logo in the dock if possible."""
dock_width = float(self.width())
# Don't let the image be more tha 100px height
maximum_height = 100.0 # px
pixmap = QPixmap(self.organisation_logo_path)
if pixmap.height() < 1 or pixmap.width() < 1:
return
height_ratio = maximum_height / pixmap.height()
maximum_width = int(pixmap.width() * height_ratio)
# Don't let the image be more than the dock width wide
if maximum_width > dock_width:
width_ratio = dock_width / float(pixmap.width())
maximum_height = int(pixmap.height() * width_ratio)
maximum_width = dock_width
too_high = pixmap.height() > maximum_height
too_wide = pixmap.width() > dock_width
if too_wide or too_high:
pixmap = pixmap.scaled(
maximum_width, maximum_height, Qt.KeepAspectRatio)
self.organisation_logo.setMaximumWidth(maximum_width)
# We have manually scaled using logic above
self.organisation_logo.setScaledContents(False)
self.organisation_logo.setPixmap(pixmap)
self.organisation_logo.show() |
def _get_odoo_version_info(addons_dir, odoo_version_override=None):
""" Detect Odoo version from an addons directory """
odoo_version_info = None
addons = os.listdir(addons_dir)
for addon in addons:
addon_dir = os.path.join(addons_dir, addon)
if is_installable_addon(addon_dir):
manifest = read_manifest(addon_dir)
_, _, addon_odoo_version_info = _get_version(
addon_dir, manifest, odoo_version_override,
git_post_version=False)
if odoo_version_info is not None and \
odoo_version_info != addon_odoo_version_info:
raise DistutilsSetupError("Not all addons are for the same "
"odoo version in %s (error detected "
"in %s)" % (addons_dir, addon))
odoo_version_info = addon_odoo_version_info
return odoo_version_info | Detect Odoo version from an addons directory | Below is the the instruction that describes the task:
### Input:
Detect Odoo version from an addons directory
### Response:
def _get_odoo_version_info(addons_dir, odoo_version_override=None):
""" Detect Odoo version from an addons directory """
odoo_version_info = None
addons = os.listdir(addons_dir)
for addon in addons:
addon_dir = os.path.join(addons_dir, addon)
if is_installable_addon(addon_dir):
manifest = read_manifest(addon_dir)
_, _, addon_odoo_version_info = _get_version(
addon_dir, manifest, odoo_version_override,
git_post_version=False)
if odoo_version_info is not None and \
odoo_version_info != addon_odoo_version_info:
raise DistutilsSetupError("Not all addons are for the same "
"odoo version in %s (error detected "
"in %s)" % (addons_dir, addon))
odoo_version_info = addon_odoo_version_info
return odoo_version_info |
def on_view_not_found(
self, _,
start_response: Callable[[str, List[Tuple[str, str]]], None],
) -> Iterable[bytes]:
""" called when valid view is not found """
start_response(
"405 Method Not Allowed",
[('Content-type', 'text/plain')])
return [b"Method Not Allowed"] | called when valid view is not found | Below is the the instruction that describes the task:
### Input:
called when valid view is not found
### Response:
def on_view_not_found(
self, _,
start_response: Callable[[str, List[Tuple[str, str]]], None],
) -> Iterable[bytes]:
""" called when valid view is not found """
start_response(
"405 Method Not Allowed",
[('Content-type', 'text/plain')])
return [b"Method Not Allowed"] |
def _ScanNode(self, scan_context, scan_node, auto_recurse=True):
"""Scans a node for supported formats.
Args:
scan_context (SourceScannerContext): source scanner context.
scan_node (SourceScanNode): source scan node.
auto_recurse (Optional[bool]): True if the scan should automatically
recurse as far as possible.
Raises:
BackEndError: if the source cannot be scanned.
ValueError: if the scan context or scan node is invalid.
"""
if not scan_context:
raise ValueError('Invalid scan context.')
if not scan_node:
raise ValueError('Invalid scan node.')
scan_path_spec = scan_node.path_spec
system_level_file_entry = None
if scan_node.IsSystemLevel():
system_level_file_entry = resolver.Resolver.OpenFileEntry(
scan_node.path_spec, resolver_context=self._resolver_context)
if system_level_file_entry is None:
raise errors.BackEndError('Unable to open file entry.')
if system_level_file_entry.IsDirectory():
scan_context.SetSourceType(definitions.SOURCE_TYPE_DIRECTORY)
return
source_path_spec = self.ScanForStorageMediaImage(scan_node.path_spec)
if source_path_spec:
scan_node.scanned = True
scan_node = scan_context.AddScanNode(source_path_spec, scan_node)
if system_level_file_entry.IsDevice():
source_type = definitions.SOURCE_TYPE_STORAGE_MEDIA_DEVICE
else:
source_type = definitions.SOURCE_TYPE_STORAGE_MEDIA_IMAGE
scan_context.SetSourceType(source_type)
if not auto_recurse:
return
# In case we did not find a storage media image type we keep looking
# since not all RAW storage media image naming schemas are known and
# its type can only detected by its content.
source_path_spec = None
while True:
if scan_node.IsFileSystem():
# No need to scan a file systems scan node for volume systems.
break
if scan_node.SupportsEncryption():
self._ScanEncryptedVolumeNode(scan_context, scan_node)
if scan_context.IsLockedScanNode(scan_node.path_spec):
# Scan node is locked, such as an encrypted volume, and we cannot
# scan it for a volume system.
break
source_path_spec = self.ScanForVolumeSystem(scan_node.path_spec)
if not source_path_spec:
# No volume system found continue with a file system scan.
break
if not scan_context.HasScanNode(source_path_spec):
scan_node.scanned = True
scan_node = scan_context.AddScanNode(source_path_spec, scan_node)
if system_level_file_entry and system_level_file_entry.IsDevice():
source_type = definitions.SOURCE_TYPE_STORAGE_MEDIA_DEVICE
else:
source_type = definitions.SOURCE_TYPE_STORAGE_MEDIA_IMAGE
scan_context.SetSourceType(source_type)
if scan_node.IsVolumeSystemRoot():
self._ScanVolumeSystemRootNode(
scan_context, scan_node, auto_recurse=auto_recurse)
# We already have already scanned for the file systems.
return
if not auto_recurse and scan_context.updated:
return
# Nothing new found.
if not scan_context.updated:
break
# In case we did not find a volume system type we keep looking
# since we could be dealing with a storage media image that contains
# a single volume.
# No need to scan the root of a volume system for a file system.
if scan_node.IsVolumeSystemRoot():
pass
elif scan_context.IsLockedScanNode(scan_node.path_spec):
# Scan node is locked, such as an encrypted volume, and we cannot
# scan it for a file system.
pass
elif (scan_node.type_indicator == definitions.TYPE_INDICATOR_VSHADOW and
auto_recurse and scan_node.path_spec != scan_path_spec):
# Since scanning for file systems in VSS snapshot volumes can
# be expensive we only do this when explicitly asked for.
pass
elif not scan_node.IsFileSystem():
source_path_spec = self.ScanForFileSystem(scan_node.path_spec)
if not source_path_spec:
# Since RAW storage media image can only be determined by naming schema
# we could have single file that is not a RAW storage media image yet
# matches the naming schema.
if scan_node.path_spec.type_indicator == definitions.TYPE_INDICATOR_RAW:
scan_node = scan_context.RemoveScanNode(scan_node.path_spec)
# Make sure to override the previously assigned source type.
scan_context.source_type = definitions.SOURCE_TYPE_FILE
else:
scan_context.SetSourceType(definitions.SOURCE_TYPE_FILE)
elif not scan_context.HasScanNode(source_path_spec):
scan_node.scanned = True
scan_node = scan_context.AddScanNode(source_path_spec, scan_node)
if system_level_file_entry and system_level_file_entry.IsDevice():
source_type = definitions.SOURCE_TYPE_STORAGE_MEDIA_DEVICE
else:
source_type = definitions.SOURCE_TYPE_STORAGE_MEDIA_IMAGE
scan_context.SetSourceType(source_type)
# If all scans failed mark the scan node as scanned so we do not scan it
# again.
if not scan_node.scanned:
scan_node.scanned = True | Scans a node for supported formats.
Args:
scan_context (SourceScannerContext): source scanner context.
scan_node (SourceScanNode): source scan node.
auto_recurse (Optional[bool]): True if the scan should automatically
recurse as far as possible.
Raises:
BackEndError: if the source cannot be scanned.
ValueError: if the scan context or scan node is invalid. | Below is the the instruction that describes the task:
### Input:
Scans a node for supported formats.
Args:
scan_context (SourceScannerContext): source scanner context.
scan_node (SourceScanNode): source scan node.
auto_recurse (Optional[bool]): True if the scan should automatically
recurse as far as possible.
Raises:
BackEndError: if the source cannot be scanned.
ValueError: if the scan context or scan node is invalid.
### Response:
def _ScanNode(self, scan_context, scan_node, auto_recurse=True):
"""Scans a node for supported formats.
Args:
scan_context (SourceScannerContext): source scanner context.
scan_node (SourceScanNode): source scan node.
auto_recurse (Optional[bool]): True if the scan should automatically
recurse as far as possible.
Raises:
BackEndError: if the source cannot be scanned.
ValueError: if the scan context or scan node is invalid.
"""
if not scan_context:
raise ValueError('Invalid scan context.')
if not scan_node:
raise ValueError('Invalid scan node.')
scan_path_spec = scan_node.path_spec
system_level_file_entry = None
if scan_node.IsSystemLevel():
system_level_file_entry = resolver.Resolver.OpenFileEntry(
scan_node.path_spec, resolver_context=self._resolver_context)
if system_level_file_entry is None:
raise errors.BackEndError('Unable to open file entry.')
if system_level_file_entry.IsDirectory():
scan_context.SetSourceType(definitions.SOURCE_TYPE_DIRECTORY)
return
source_path_spec = self.ScanForStorageMediaImage(scan_node.path_spec)
if source_path_spec:
scan_node.scanned = True
scan_node = scan_context.AddScanNode(source_path_spec, scan_node)
if system_level_file_entry.IsDevice():
source_type = definitions.SOURCE_TYPE_STORAGE_MEDIA_DEVICE
else:
source_type = definitions.SOURCE_TYPE_STORAGE_MEDIA_IMAGE
scan_context.SetSourceType(source_type)
if not auto_recurse:
return
# In case we did not find a storage media image type we keep looking
# since not all RAW storage media image naming schemas are known and
# its type can only detected by its content.
source_path_spec = None
while True:
if scan_node.IsFileSystem():
# No need to scan a file systems scan node for volume systems.
break
if scan_node.SupportsEncryption():
self._ScanEncryptedVolumeNode(scan_context, scan_node)
if scan_context.IsLockedScanNode(scan_node.path_spec):
# Scan node is locked, such as an encrypted volume, and we cannot
# scan it for a volume system.
break
source_path_spec = self.ScanForVolumeSystem(scan_node.path_spec)
if not source_path_spec:
# No volume system found continue with a file system scan.
break
if not scan_context.HasScanNode(source_path_spec):
scan_node.scanned = True
scan_node = scan_context.AddScanNode(source_path_spec, scan_node)
if system_level_file_entry and system_level_file_entry.IsDevice():
source_type = definitions.SOURCE_TYPE_STORAGE_MEDIA_DEVICE
else:
source_type = definitions.SOURCE_TYPE_STORAGE_MEDIA_IMAGE
scan_context.SetSourceType(source_type)
if scan_node.IsVolumeSystemRoot():
self._ScanVolumeSystemRootNode(
scan_context, scan_node, auto_recurse=auto_recurse)
# We already have already scanned for the file systems.
return
if not auto_recurse and scan_context.updated:
return
# Nothing new found.
if not scan_context.updated:
break
# In case we did not find a volume system type we keep looking
# since we could be dealing with a storage media image that contains
# a single volume.
# No need to scan the root of a volume system for a file system.
if scan_node.IsVolumeSystemRoot():
pass
elif scan_context.IsLockedScanNode(scan_node.path_spec):
# Scan node is locked, such as an encrypted volume, and we cannot
# scan it for a file system.
pass
elif (scan_node.type_indicator == definitions.TYPE_INDICATOR_VSHADOW and
auto_recurse and scan_node.path_spec != scan_path_spec):
# Since scanning for file systems in VSS snapshot volumes can
# be expensive we only do this when explicitly asked for.
pass
elif not scan_node.IsFileSystem():
source_path_spec = self.ScanForFileSystem(scan_node.path_spec)
if not source_path_spec:
# Since RAW storage media image can only be determined by naming schema
# we could have single file that is not a RAW storage media image yet
# matches the naming schema.
if scan_node.path_spec.type_indicator == definitions.TYPE_INDICATOR_RAW:
scan_node = scan_context.RemoveScanNode(scan_node.path_spec)
# Make sure to override the previously assigned source type.
scan_context.source_type = definitions.SOURCE_TYPE_FILE
else:
scan_context.SetSourceType(definitions.SOURCE_TYPE_FILE)
elif not scan_context.HasScanNode(source_path_spec):
scan_node.scanned = True
scan_node = scan_context.AddScanNode(source_path_spec, scan_node)
if system_level_file_entry and system_level_file_entry.IsDevice():
source_type = definitions.SOURCE_TYPE_STORAGE_MEDIA_DEVICE
else:
source_type = definitions.SOURCE_TYPE_STORAGE_MEDIA_IMAGE
scan_context.SetSourceType(source_type)
# If all scans failed mark the scan node as scanned so we do not scan it
# again.
if not scan_node.scanned:
scan_node.scanned = True |
def get_all_fields(cls, class_obj=None, fields=None):
"""
TODO: This needs to be properly used
"""
def return_fields(obj):
internal_fields = fields
if internal_fields is None:
internal_fields = {}
for attribute in dir(obj):
try:
attr_val = getattr(obj, attribute)
attr_cls = attr_val.__class__
# If it is a model field, call on init
if issubclass(attr_cls, ModelField):
internal_fields[attribute] = attr_val
except:
pass
return internal_fields
if class_obj is None:
class_obj = cls
fields = return_fields(class_obj)
for parent_class in cls.__bases__:
parent_fields = cls.get_all_fields(parent_class, fields)
for field_name, field_value in list(parent_fields.items()):
if not field_name in fields:
fields[field_name] = field_value
return fields
else:
if not isinstance(class_obj, CollectionModel):
return fields | TODO: This needs to be properly used | Below is the the instruction that describes the task:
### Input:
TODO: This needs to be properly used
### Response:
def get_all_fields(cls, class_obj=None, fields=None):
"""
TODO: This needs to be properly used
"""
def return_fields(obj):
internal_fields = fields
if internal_fields is None:
internal_fields = {}
for attribute in dir(obj):
try:
attr_val = getattr(obj, attribute)
attr_cls = attr_val.__class__
# If it is a model field, call on init
if issubclass(attr_cls, ModelField):
internal_fields[attribute] = attr_val
except:
pass
return internal_fields
if class_obj is None:
class_obj = cls
fields = return_fields(class_obj)
for parent_class in cls.__bases__:
parent_fields = cls.get_all_fields(parent_class, fields)
for field_name, field_value in list(parent_fields.items()):
if not field_name in fields:
fields[field_name] = field_value
return fields
else:
if not isinstance(class_obj, CollectionModel):
return fields |
def createPedChr23UsingPlink(options):
"""Run Plink to create a ped format.
:param options: the options.
:type options: argparse.Namespace
Uses Plink to create a ``ped`` file of markers on the chromosome ``23``. It
uses the ``recodeA`` options to use additive coding. It also subsets the
data to keep only samples with sex problems.
"""
plinkCommand = ["plink", "--noweb", "--bfile", options.bfile, "--chr",
"23", "--recodeA", "--keep",
options.out + ".list_problem_sex_ids", "--out",
options.out + ".chr23_recodeA"]
runCommand(plinkCommand) | Run Plink to create a ped format.
:param options: the options.
:type options: argparse.Namespace
Uses Plink to create a ``ped`` file of markers on the chromosome ``23``. It
uses the ``recodeA`` options to use additive coding. It also subsets the
data to keep only samples with sex problems. | Below is the the instruction that describes the task:
### Input:
Run Plink to create a ped format.
:param options: the options.
:type options: argparse.Namespace
Uses Plink to create a ``ped`` file of markers on the chromosome ``23``. It
uses the ``recodeA`` options to use additive coding. It also subsets the
data to keep only samples with sex problems.
### Response:
def createPedChr23UsingPlink(options):
"""Run Plink to create a ped format.
:param options: the options.
:type options: argparse.Namespace
Uses Plink to create a ``ped`` file of markers on the chromosome ``23``. It
uses the ``recodeA`` options to use additive coding. It also subsets the
data to keep only samples with sex problems.
"""
plinkCommand = ["plink", "--noweb", "--bfile", options.bfile, "--chr",
"23", "--recodeA", "--keep",
options.out + ".list_problem_sex_ids", "--out",
options.out + ".chr23_recodeA"]
runCommand(plinkCommand) |
def after_app_request(self, f):
"""Like :meth:`Flask.after_request` but for a blueprint. Such a function
is executed after each request, even if outside of the blueprint.
"""
self.record_once(lambda s: s.app.after_request_funcs
.setdefault(None, []).append(f))
return f | Like :meth:`Flask.after_request` but for a blueprint. Such a function
is executed after each request, even if outside of the blueprint. | Below is the the instruction that describes the task:
### Input:
Like :meth:`Flask.after_request` but for a blueprint. Such a function
is executed after each request, even if outside of the blueprint.
### Response:
def after_app_request(self, f):
"""Like :meth:`Flask.after_request` but for a blueprint. Such a function
is executed after each request, even if outside of the blueprint.
"""
self.record_once(lambda s: s.app.after_request_funcs
.setdefault(None, []).append(f))
return f |
def get_internal_call_graph(fpath, with_doctests=False):
"""
CommandLine:
python -m utool.util_inspect get_internal_call_graph --show --modpath=~/code/ibeis/ibeis/init/main_helpers.py --show
python -m utool.util_inspect get_internal_call_graph --show --modpath=~/code/dtool/dtool/depcache_table.py --show
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_inspect import * # NOQA
>>> import utool as ut
>>> fpath = ut.get_argval('--modpath', default='.')
>>> with_doctests = ut.get_argflag('--with_doctests')
>>> G = get_internal_call_graph(fpath, with_doctests)
>>> ut.quit_if_noshow()
>>> import plottool as pt
>>> pt.qt4ensure()
>>> pt.show_nx(G, fontsize=8, as_directed=False)
>>> z = pt.zoom_factory()
>>> p = pt.pan_factory()
>>> ut.show_if_requested()
"""
import utool as ut
fpath = ut.truepath(fpath)
sourcecode = ut.readfrom(fpath)
self = ut.BaronWraper(sourcecode)
G = self.internal_call_graph(with_doctests=with_doctests)
return G | CommandLine:
python -m utool.util_inspect get_internal_call_graph --show --modpath=~/code/ibeis/ibeis/init/main_helpers.py --show
python -m utool.util_inspect get_internal_call_graph --show --modpath=~/code/dtool/dtool/depcache_table.py --show
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_inspect import * # NOQA
>>> import utool as ut
>>> fpath = ut.get_argval('--modpath', default='.')
>>> with_doctests = ut.get_argflag('--with_doctests')
>>> G = get_internal_call_graph(fpath, with_doctests)
>>> ut.quit_if_noshow()
>>> import plottool as pt
>>> pt.qt4ensure()
>>> pt.show_nx(G, fontsize=8, as_directed=False)
>>> z = pt.zoom_factory()
>>> p = pt.pan_factory()
>>> ut.show_if_requested() | Below is the the instruction that describes the task:
### Input:
CommandLine:
python -m utool.util_inspect get_internal_call_graph --show --modpath=~/code/ibeis/ibeis/init/main_helpers.py --show
python -m utool.util_inspect get_internal_call_graph --show --modpath=~/code/dtool/dtool/depcache_table.py --show
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_inspect import * # NOQA
>>> import utool as ut
>>> fpath = ut.get_argval('--modpath', default='.')
>>> with_doctests = ut.get_argflag('--with_doctests')
>>> G = get_internal_call_graph(fpath, with_doctests)
>>> ut.quit_if_noshow()
>>> import plottool as pt
>>> pt.qt4ensure()
>>> pt.show_nx(G, fontsize=8, as_directed=False)
>>> z = pt.zoom_factory()
>>> p = pt.pan_factory()
>>> ut.show_if_requested()
### Response:
def get_internal_call_graph(fpath, with_doctests=False):
"""
CommandLine:
python -m utool.util_inspect get_internal_call_graph --show --modpath=~/code/ibeis/ibeis/init/main_helpers.py --show
python -m utool.util_inspect get_internal_call_graph --show --modpath=~/code/dtool/dtool/depcache_table.py --show
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_inspect import * # NOQA
>>> import utool as ut
>>> fpath = ut.get_argval('--modpath', default='.')
>>> with_doctests = ut.get_argflag('--with_doctests')
>>> G = get_internal_call_graph(fpath, with_doctests)
>>> ut.quit_if_noshow()
>>> import plottool as pt
>>> pt.qt4ensure()
>>> pt.show_nx(G, fontsize=8, as_directed=False)
>>> z = pt.zoom_factory()
>>> p = pt.pan_factory()
>>> ut.show_if_requested()
"""
import utool as ut
fpath = ut.truepath(fpath)
sourcecode = ut.readfrom(fpath)
self = ut.BaronWraper(sourcecode)
G = self.internal_call_graph(with_doctests=with_doctests)
return G |
def child_count(self, only_direct=True):
"""Returns how many children this node has, either only the direct
children of this node or inclusive of all children nodes of this node.
"""
if not only_direct:
count = 0
for _node in self.dfs_iter():
count += 1
return count
return len(self._children) | Returns how many children this node has, either only the direct
children of this node or inclusive of all children nodes of this node. | Below is the the instruction that describes the task:
### Input:
Returns how many children this node has, either only the direct
children of this node or inclusive of all children nodes of this node.
### Response:
def child_count(self, only_direct=True):
"""Returns how many children this node has, either only the direct
children of this node or inclusive of all children nodes of this node.
"""
if not only_direct:
count = 0
for _node in self.dfs_iter():
count += 1
return count
return len(self._children) |
def left_to_right(self):
"""This is for text that flows Left to Right"""
self._entry_mode |= Command.MODE_INCREMENT
self.command(self._entry_mode) | This is for text that flows Left to Right | Below is the the instruction that describes the task:
### Input:
This is for text that flows Left to Right
### Response:
def left_to_right(self):
"""This is for text that flows Left to Right"""
self._entry_mode |= Command.MODE_INCREMENT
self.command(self._entry_mode) |
def parse_te(header):
"""Parse the "TE" header."""
pos = 0
names = []
while pos < len(header):
name, pos = expect_re(re_token, header, pos)
_, pos = accept_ws(header, pos)
_, pos = accept_lit(';', header, pos)
_, pos = accept_ws(header, pos)
qvalue, pos = accept_re(re_qvalue, header, pos)
if name:
names.append((name, qvalue))
_, pos = accept_ws(header, pos)
_, pos = expect_lit(',', header, pos)
_, pos = accept_ws(header, pos)
return names | Parse the "TE" header. | Below is the the instruction that describes the task:
### Input:
Parse the "TE" header.
### Response:
def parse_te(header):
"""Parse the "TE" header."""
pos = 0
names = []
while pos < len(header):
name, pos = expect_re(re_token, header, pos)
_, pos = accept_ws(header, pos)
_, pos = accept_lit(';', header, pos)
_, pos = accept_ws(header, pos)
qvalue, pos = accept_re(re_qvalue, header, pos)
if name:
names.append((name, qvalue))
_, pos = accept_ws(header, pos)
_, pos = expect_lit(',', header, pos)
_, pos = accept_ws(header, pos)
return names |
def get_cached_files(url, server_name="", document_root=None):
"""
Given a URL, return a list of paths of all cached variations of that file.
Doesn't include the original file.
"""
import glob
url_info = process_url(url, server_name, document_root, check_security=False)
# get path to cache directory with basename of file (no extension)
filedir = os.path.dirname(url_info['requested_file'])
fileglob = '{0}*{1}'.format(url_info['base_filename'], url_info['ext'])
return glob.glob(os.path.join(filedir, fileglob)) | Given a URL, return a list of paths of all cached variations of that file.
Doesn't include the original file. | Below is the the instruction that describes the task:
### Input:
Given a URL, return a list of paths of all cached variations of that file.
Doesn't include the original file.
### Response:
def get_cached_files(url, server_name="", document_root=None):
"""
Given a URL, return a list of paths of all cached variations of that file.
Doesn't include the original file.
"""
import glob
url_info = process_url(url, server_name, document_root, check_security=False)
# get path to cache directory with basename of file (no extension)
filedir = os.path.dirname(url_info['requested_file'])
fileglob = '{0}*{1}'.format(url_info['base_filename'], url_info['ext'])
return glob.glob(os.path.join(filedir, fileglob)) |
def mount(self, app, script_path):
''' Mount a Bottle application to a specific URL prefix '''
if not isinstance(app, Bottle):
raise TypeError('Only Bottle instances are supported for now.')
script_path = '/'.join(filter(None, script_path.split('/')))
path_depth = script_path.count('/') + 1
if not script_path:
raise TypeError('Empty script_path. Perhaps you want a merge()?')
for other in self.mounts:
if other.startswith(script_path):
raise TypeError('Conflict with existing mount: %s' % other)
@self.route('/%s/:#.*#' % script_path, method="ANY")
def mountpoint():
request.path_shift(path_depth)
return app.handle(request.path, request.method)
self.mounts[script_path] = app | Mount a Bottle application to a specific URL prefix | Below is the the instruction that describes the task:
### Input:
Mount a Bottle application to a specific URL prefix
### Response:
def mount(self, app, script_path):
''' Mount a Bottle application to a specific URL prefix '''
if not isinstance(app, Bottle):
raise TypeError('Only Bottle instances are supported for now.')
script_path = '/'.join(filter(None, script_path.split('/')))
path_depth = script_path.count('/') + 1
if not script_path:
raise TypeError('Empty script_path. Perhaps you want a merge()?')
for other in self.mounts:
if other.startswith(script_path):
raise TypeError('Conflict with existing mount: %s' % other)
@self.route('/%s/:#.*#' % script_path, method="ANY")
def mountpoint():
request.path_shift(path_depth)
return app.handle(request.path, request.method)
self.mounts[script_path] = app |
def RandomNormalInitializer(stddev=1e-2):
"""An initializer function for random normal coefficients."""
def init(shape, rng):
return (stddev * backend.random.normal(rng, shape)).astype('float32')
return init | An initializer function for random normal coefficients. | Below is the the instruction that describes the task:
### Input:
An initializer function for random normal coefficients.
### Response:
def RandomNormalInitializer(stddev=1e-2):
"""An initializer function for random normal coefficients."""
def init(shape, rng):
return (stddev * backend.random.normal(rng, shape)).astype('float32')
return init |
def _add_vdsm_forbidden_paths(self):
"""Add confidential sysprep vfds under /var/run/vdsm to
forbidden paths """
for file_path in glob.glob("/var/run/vdsm/*"):
if file_path.endswith(('.vfd', '/isoUploader', '/storage')):
self.add_forbidden_path(file_path) | Add confidential sysprep vfds under /var/run/vdsm to
forbidden paths | Below is the the instruction that describes the task:
### Input:
Add confidential sysprep vfds under /var/run/vdsm to
forbidden paths
### Response:
def _add_vdsm_forbidden_paths(self):
"""Add confidential sysprep vfds under /var/run/vdsm to
forbidden paths """
for file_path in glob.glob("/var/run/vdsm/*"):
if file_path.endswith(('.vfd', '/isoUploader', '/storage')):
self.add_forbidden_path(file_path) |
def resolve_sid(f):
"""View handler decorator that adds SID resolve and PID validation.
- For v1 calls, assume that ``did`` is a pid and raise NotFound exception if it's
not valid.
- For v2 calls, if DID is a valid PID, return it. If not, try to resolve it as a
SID and, if successful, return the new PID. Else, raise NotFound exception.
"""
@functools.wraps(f)
def wrapper(request, did, *args, **kwargs):
pid = resolve_sid_func(request, did)
return f(request, pid, *args, **kwargs)
return wrapper | View handler decorator that adds SID resolve and PID validation.
- For v1 calls, assume that ``did`` is a pid and raise NotFound exception if it's
not valid.
- For v2 calls, if DID is a valid PID, return it. If not, try to resolve it as a
SID and, if successful, return the new PID. Else, raise NotFound exception. | Below is the the instruction that describes the task:
### Input:
View handler decorator that adds SID resolve and PID validation.
- For v1 calls, assume that ``did`` is a pid and raise NotFound exception if it's
not valid.
- For v2 calls, if DID is a valid PID, return it. If not, try to resolve it as a
SID and, if successful, return the new PID. Else, raise NotFound exception.
### Response:
def resolve_sid(f):
"""View handler decorator that adds SID resolve and PID validation.
- For v1 calls, assume that ``did`` is a pid and raise NotFound exception if it's
not valid.
- For v2 calls, if DID is a valid PID, return it. If not, try to resolve it as a
SID and, if successful, return the new PID. Else, raise NotFound exception.
"""
@functools.wraps(f)
def wrapper(request, did, *args, **kwargs):
pid = resolve_sid_func(request, did)
return f(request, pid, *args, **kwargs)
return wrapper |
def parse_man_page(command, platform):
"""Parse the man page and return the parsed lines."""
page_path = find_page_location(command, platform)
output_lines = parse_page(page_path)
return output_lines | Parse the man page and return the parsed lines. | Below is the the instruction that describes the task:
### Input:
Parse the man page and return the parsed lines.
### Response:
def parse_man_page(command, platform):
"""Parse the man page and return the parsed lines."""
page_path = find_page_location(command, platform)
output_lines = parse_page(page_path)
return output_lines |
def create_payload(self):
"""Wrap submitted data within an extra dict."""
payload = super(JobTemplate, self).create_payload()
effective_user = payload.pop(u'effective_user', None)
if effective_user:
payload[u'ssh'] = {u'effective_user': effective_user}
return {u'job_template': payload} | Wrap submitted data within an extra dict. | Below is the the instruction that describes the task:
### Input:
Wrap submitted data within an extra dict.
### Response:
def create_payload(self):
"""Wrap submitted data within an extra dict."""
payload = super(JobTemplate, self).create_payload()
effective_user = payload.pop(u'effective_user', None)
if effective_user:
payload[u'ssh'] = {u'effective_user': effective_user}
return {u'job_template': payload} |
def catalog(self, table='', column=''):
"""Lookup the values available for querying."""
lookup_table = self.lookup_table
if lookup_table is not None:
if table:
if column:
column = column.upper()
return lookup_table[table][column]
return lookup_table[table]
# Show what methods are available.
return self.lookup_methods
return None | Lookup the values available for querying. | Below is the the instruction that describes the task:
### Input:
Lookup the values available for querying.
### Response:
def catalog(self, table='', column=''):
"""Lookup the values available for querying."""
lookup_table = self.lookup_table
if lookup_table is not None:
if table:
if column:
column = column.upper()
return lookup_table[table][column]
return lookup_table[table]
# Show what methods are available.
return self.lookup_methods
return None |
def getcpustat(self, process_name):
"""
get CPU stat for the give process name
@param process_name: Process name, ex: firefox-bin.
@type process_name: string
@return: cpu stat list on success, else empty list
If same process name, running multiple instance,
get the stat of all the process CPU usage
@rtype: list
"""
# Create an instance of process stat
_stat_inst = ProcessStats(process_name)
_stat_list = []
for p in _stat_inst.get_cpu_memory_stat():
try:
_stat_list.append(p.get_cpu_percent())
except psutil.AccessDenied:
pass
return _stat_list | get CPU stat for the give process name
@param process_name: Process name, ex: firefox-bin.
@type process_name: string
@return: cpu stat list on success, else empty list
If same process name, running multiple instance,
get the stat of all the process CPU usage
@rtype: list | Below is the the instruction that describes the task:
### Input:
get CPU stat for the give process name
@param process_name: Process name, ex: firefox-bin.
@type process_name: string
@return: cpu stat list on success, else empty list
If same process name, running multiple instance,
get the stat of all the process CPU usage
@rtype: list
### Response:
def getcpustat(self, process_name):
"""
get CPU stat for the give process name
@param process_name: Process name, ex: firefox-bin.
@type process_name: string
@return: cpu stat list on success, else empty list
If same process name, running multiple instance,
get the stat of all the process CPU usage
@rtype: list
"""
# Create an instance of process stat
_stat_inst = ProcessStats(process_name)
_stat_list = []
for p in _stat_inst.get_cpu_memory_stat():
try:
_stat_list.append(p.get_cpu_percent())
except psutil.AccessDenied:
pass
return _stat_list |
def add_completions(
replace_list: list, belstr: str, replace_span: Span, completion_text: str
) -> List[Mapping[str, Any]]:
"""Create completions to return given replacement list
Args:
replace_list: list of completion replacement values
belstr: BEL String
replace_span: start, stop of belstr to replace
completion_text: text to use for completion - used for creating highlight
Returns:
[{
"replacement": replacement,
"cursor_loc": cursor_loc,
"highlight": highlight,
"label": label,
}]
"""
completions = []
for r in replace_list:
# if '(' not in belstr:
# replacement = f'{r["replacement"]}()'
# cursor_loc = len(replacement) - 1 # inside parenthesis
# elif r['type'] == 'Function' and replace_span[1] == len(belstr) - 1:
if len(belstr) > 0:
belstr_end = len(belstr) - 1
else:
belstr_end = 0
log.debug(
f'Replace list {r} Replace_span {replace_span} BELstr: {belstr} Len: {belstr_end} Test1 {r["type"] == "Function"} Test2 {replace_span[1] + 1 == len(belstr)}'
)
# Put a space between comma and following function arg
if (
r["type"] == "Function"
and replace_span[0] > 0
and belstr[replace_span[0] - 1] == ","
):
log.debug("prior char is a comma")
replacement = (
belstr[0 : replace_span[0]]
+ " "
+ f"{r['replacement']}()"
+ belstr[replace_span[1] + 1 :]
)
cursor_loc = len(
belstr[0 : replace_span[0]] + " " + f"{r['replacement']}()"
)
# Put a space between comman and following NSArg or StrArg
elif replace_span[0] > 0 and belstr[replace_span[0] - 1] == ",":
log.debug("prior char is a comma")
replacement = (
belstr[0 : replace_span[0]]
+ " "
+ r["replacement"]
+ belstr[replace_span[1] + 1 :]
)
cursor_loc = len(belstr[0 : replace_span[0]] + " " + r["replacement"])
# Add function to end of belstr
elif r["type"] == "Function" and replace_span[1] >= belstr_end:
replacement = belstr[0 : replace_span[0]] + f"{r['replacement']}()"
cursor_loc = len(replacement) - 1 # inside parenthesis
log.debug(f"Replacement: {replacement}")
# Insert replacement in beginning or middle of belstr
else:
replacement = (
belstr[0 : replace_span[0]]
+ r["replacement"]
+ belstr[replace_span[1] + 1 :]
)
cursor_loc = len(
belstr[0 : replace_span[0]] + r["replacement"]
) # move cursor just past replacement
completions.append(
{
"replacement": replacement,
"cursor_loc": cursor_loc,
"highlight": r["highlight"],
"label": r["label"],
}
)
return completions | Create completions to return given replacement list
Args:
replace_list: list of completion replacement values
belstr: BEL String
replace_span: start, stop of belstr to replace
completion_text: text to use for completion - used for creating highlight
Returns:
[{
"replacement": replacement,
"cursor_loc": cursor_loc,
"highlight": highlight,
"label": label,
}] | Below is the the instruction that describes the task:
### Input:
Create completions to return given replacement list
Args:
replace_list: list of completion replacement values
belstr: BEL String
replace_span: start, stop of belstr to replace
completion_text: text to use for completion - used for creating highlight
Returns:
[{
"replacement": replacement,
"cursor_loc": cursor_loc,
"highlight": highlight,
"label": label,
}]
### Response:
def add_completions(
replace_list: list, belstr: str, replace_span: Span, completion_text: str
) -> List[Mapping[str, Any]]:
"""Create completions to return given replacement list
Args:
replace_list: list of completion replacement values
belstr: BEL String
replace_span: start, stop of belstr to replace
completion_text: text to use for completion - used for creating highlight
Returns:
[{
"replacement": replacement,
"cursor_loc": cursor_loc,
"highlight": highlight,
"label": label,
}]
"""
completions = []
for r in replace_list:
# if '(' not in belstr:
# replacement = f'{r["replacement"]}()'
# cursor_loc = len(replacement) - 1 # inside parenthesis
# elif r['type'] == 'Function' and replace_span[1] == len(belstr) - 1:
if len(belstr) > 0:
belstr_end = len(belstr) - 1
else:
belstr_end = 0
log.debug(
f'Replace list {r} Replace_span {replace_span} BELstr: {belstr} Len: {belstr_end} Test1 {r["type"] == "Function"} Test2 {replace_span[1] + 1 == len(belstr)}'
)
# Put a space between comma and following function arg
if (
r["type"] == "Function"
and replace_span[0] > 0
and belstr[replace_span[0] - 1] == ","
):
log.debug("prior char is a comma")
replacement = (
belstr[0 : replace_span[0]]
+ " "
+ f"{r['replacement']}()"
+ belstr[replace_span[1] + 1 :]
)
cursor_loc = len(
belstr[0 : replace_span[0]] + " " + f"{r['replacement']}()"
)
# Put a space between comman and following NSArg or StrArg
elif replace_span[0] > 0 and belstr[replace_span[0] - 1] == ",":
log.debug("prior char is a comma")
replacement = (
belstr[0 : replace_span[0]]
+ " "
+ r["replacement"]
+ belstr[replace_span[1] + 1 :]
)
cursor_loc = len(belstr[0 : replace_span[0]] + " " + r["replacement"])
# Add function to end of belstr
elif r["type"] == "Function" and replace_span[1] >= belstr_end:
replacement = belstr[0 : replace_span[0]] + f"{r['replacement']}()"
cursor_loc = len(replacement) - 1 # inside parenthesis
log.debug(f"Replacement: {replacement}")
# Insert replacement in beginning or middle of belstr
else:
replacement = (
belstr[0 : replace_span[0]]
+ r["replacement"]
+ belstr[replace_span[1] + 1 :]
)
cursor_loc = len(
belstr[0 : replace_span[0]] + r["replacement"]
) # move cursor just past replacement
completions.append(
{
"replacement": replacement,
"cursor_loc": cursor_loc,
"highlight": r["highlight"],
"label": r["label"],
}
)
return completions |
def get_usernames_like(username,**kwargs):
"""
Return a list of usernames like the given string.
"""
checkname = "%%%s%%"%username
rs = db.DBSession.query(User.username).filter(User.username.like(checkname)).all()
return [r.username for r in rs] | Return a list of usernames like the given string. | Below is the the instruction that describes the task:
### Input:
Return a list of usernames like the given string.
### Response:
def get_usernames_like(username,**kwargs):
"""
Return a list of usernames like the given string.
"""
checkname = "%%%s%%"%username
rs = db.DBSession.query(User.username).filter(User.username.like(checkname)).all()
return [r.username for r in rs] |
def call_proxy(self, engine, payload, method, analyze_json_error_param, retry_request_substr_variants,
stream=False):
"""
:param engine: Система
:param payload: Данные для запроса
:param method: string Может содержать native_call | tsv | json_newline
:param analyze_json_error_param: Нужно ли производить анализ параметра error в ответе прокси
:param retry_request_substr_variants: Список подстрок, при наличии которых в ответе будет происходить перезапрос
:param stream:
:return:
"""
return self.__api_proxy_call(engine, payload, method, analyze_json_error_param, retry_request_substr_variants,
stream) | :param engine: Система
:param payload: Данные для запроса
:param method: string Может содержать native_call | tsv | json_newline
:param analyze_json_error_param: Нужно ли производить анализ параметра error в ответе прокси
:param retry_request_substr_variants: Список подстрок, при наличии которых в ответе будет происходить перезапрос
:param stream:
:return: | Below is the the instruction that describes the task:
### Input:
:param engine: Система
:param payload: Данные для запроса
:param method: string Может содержать native_call | tsv | json_newline
:param analyze_json_error_param: Нужно ли производить анализ параметра error в ответе прокси
:param retry_request_substr_variants: Список подстрок, при наличии которых в ответе будет происходить перезапрос
:param stream:
:return:
### Response:
def call_proxy(self, engine, payload, method, analyze_json_error_param, retry_request_substr_variants,
stream=False):
"""
:param engine: Система
:param payload: Данные для запроса
:param method: string Может содержать native_call | tsv | json_newline
:param analyze_json_error_param: Нужно ли производить анализ параметра error в ответе прокси
:param retry_request_substr_variants: Список подстрок, при наличии которых в ответе будет происходить перезапрос
:param stream:
:return:
"""
return self.__api_proxy_call(engine, payload, method, analyze_json_error_param, retry_request_substr_variants,
stream) |
def child(self, **kwargs):
'''set childSelector.'''
return AutomatorDeviceObject(
self.device,
self.selector.clone().child(**kwargs)
) | set childSelector. | Below is the the instruction that describes the task:
### Input:
set childSelector.
### Response:
def child(self, **kwargs):
'''set childSelector.'''
return AutomatorDeviceObject(
self.device,
self.selector.clone().child(**kwargs)
) |
def cli(env, identifier, keys, permissions, hardware, virtual, logins, events):
"""User details."""
mgr = SoftLayer.UserManager(env.client)
user_id = helpers.resolve_id(mgr.resolve_ids, identifier, 'username')
object_mask = "userStatus[name], parent[id, username], apiAuthenticationKeys[authenticationKey], "\
"unsuccessfulLogins, successfulLogins"
user = mgr.get_user(user_id, object_mask)
env.fout(basic_info(user, keys))
if permissions:
perms = mgr.get_user_permissions(user_id)
env.fout(print_permissions(perms))
if hardware:
mask = "id, hardware, dedicatedHosts"
access = mgr.get_user(user_id, mask)
env.fout(print_dedicated_access(access.get('dedicatedHosts', [])))
env.fout(print_access(access.get('hardware', []), 'Hardware'))
if virtual:
mask = "id, virtualGuests"
access = mgr.get_user(user_id, mask)
env.fout(print_access(access.get('virtualGuests', []), 'Virtual Guests'))
if logins:
login_log = mgr.get_logins(user_id)
env.fout(print_logins(login_log))
if events:
event_log = mgr.get_events(user_id)
env.fout(print_events(event_log)) | User details. | Below is the the instruction that describes the task:
### Input:
User details.
### Response:
def cli(env, identifier, keys, permissions, hardware, virtual, logins, events):
"""User details."""
mgr = SoftLayer.UserManager(env.client)
user_id = helpers.resolve_id(mgr.resolve_ids, identifier, 'username')
object_mask = "userStatus[name], parent[id, username], apiAuthenticationKeys[authenticationKey], "\
"unsuccessfulLogins, successfulLogins"
user = mgr.get_user(user_id, object_mask)
env.fout(basic_info(user, keys))
if permissions:
perms = mgr.get_user_permissions(user_id)
env.fout(print_permissions(perms))
if hardware:
mask = "id, hardware, dedicatedHosts"
access = mgr.get_user(user_id, mask)
env.fout(print_dedicated_access(access.get('dedicatedHosts', [])))
env.fout(print_access(access.get('hardware', []), 'Hardware'))
if virtual:
mask = "id, virtualGuests"
access = mgr.get_user(user_id, mask)
env.fout(print_access(access.get('virtualGuests', []), 'Virtual Guests'))
if logins:
login_log = mgr.get_logins(user_id)
env.fout(print_logins(login_log))
if events:
event_log = mgr.get_events(user_id)
env.fout(print_events(event_log)) |
def make_node(cls, operator, left, right, lineno, func=None,
type_=None):
""" Creates a binary node for a binary operation,
e.g. A + 6 => '+' (A, 6) in prefix notation.
Parameters:
-operator: the binary operation token. e.g. 'PLUS' for A + 6
-left: left operand
-right: right operand
-func: is a lambda function used when constant folding is applied
-type_: resulting type (to enforce it).
If no type_ is specified the resulting one will be guessed.
"""
if left is None or right is None:
return None
a, b = left, right # short form names
# Check for constant non-numeric operations
c_type = common_type(a, b) # Resulting operation type or None
if c_type: # there must be a common type for a and b
if is_numeric(a, b) and (is_const(a) or is_number(a)) and \
(is_const(b) or is_number(b)):
if func is not None:
a = SymbolTYPECAST.make_node(c_type, a, lineno) # ensure type
b = SymbolTYPECAST.make_node(c_type, b, lineno) # ensure type
return SymbolNUMBER(func(a.value, b.value), type_=type_, lineno=lineno)
if is_static(a, b):
a = SymbolTYPECAST.make_node(c_type, a, lineno) # ensure type
b = SymbolTYPECAST.make_node(c_type, b, lineno) # ensure type
return SymbolCONST(cls(operator, a, b, lineno, type_=type_, func=func), lineno=lineno)
if operator in ('BNOT', 'BAND', 'BOR', 'BXOR', 'NOT', 'AND', 'OR',
'XOR', 'MINUS', 'MULT', 'DIV', 'SHL', 'SHR') and \
not is_numeric(a, b):
syntax_error(lineno, 'Operator %s cannot be used with STRINGS' % operator)
return None
if is_string(a, b) and func is not None: # Are they STRING Constants?
if operator == 'PLUS':
return SymbolSTRING(func(a.value, b.value), lineno)
return SymbolNUMBER(int(func(a.text, b.text)), type_=TYPE.ubyte,
lineno=lineno) # Convert to u8 (boolean)
if operator in ('BNOT', 'BAND', 'BOR', 'BXOR'):
if TYPE.is_decimal(c_type):
c_type = TYPE.long_
if a.type_ != b.type_ and TYPE.string in (a.type_, b.type_):
c_type = a.type_ # Will give an error based on the fist operand
if operator not in ('SHR', 'SHL'):
a = SymbolTYPECAST.make_node(c_type, a, lineno)
b = SymbolTYPECAST.make_node(c_type, b, lineno)
if a is None or b is None:
return None
if type_ is None:
if operator in ('LT', 'GT', 'EQ', 'LE', 'GE', 'NE', 'AND', 'OR',
'XOR', 'NOT'):
type_ = TYPE.ubyte # Boolean type
else:
type_ = c_type
return cls(operator, a, b, type_=type_, lineno=lineno) | Creates a binary node for a binary operation,
e.g. A + 6 => '+' (A, 6) in prefix notation.
Parameters:
-operator: the binary operation token. e.g. 'PLUS' for A + 6
-left: left operand
-right: right operand
-func: is a lambda function used when constant folding is applied
-type_: resulting type (to enforce it).
If no type_ is specified the resulting one will be guessed. | Below is the the instruction that describes the task:
### Input:
Creates a binary node for a binary operation,
e.g. A + 6 => '+' (A, 6) in prefix notation.
Parameters:
-operator: the binary operation token. e.g. 'PLUS' for A + 6
-left: left operand
-right: right operand
-func: is a lambda function used when constant folding is applied
-type_: resulting type (to enforce it).
If no type_ is specified the resulting one will be guessed.
### Response:
def make_node(cls, operator, left, right, lineno, func=None,
type_=None):
""" Creates a binary node for a binary operation,
e.g. A + 6 => '+' (A, 6) in prefix notation.
Parameters:
-operator: the binary operation token. e.g. 'PLUS' for A + 6
-left: left operand
-right: right operand
-func: is a lambda function used when constant folding is applied
-type_: resulting type (to enforce it).
If no type_ is specified the resulting one will be guessed.
"""
if left is None or right is None:
return None
a, b = left, right # short form names
# Check for constant non-numeric operations
c_type = common_type(a, b) # Resulting operation type or None
if c_type: # there must be a common type for a and b
if is_numeric(a, b) and (is_const(a) or is_number(a)) and \
(is_const(b) or is_number(b)):
if func is not None:
a = SymbolTYPECAST.make_node(c_type, a, lineno) # ensure type
b = SymbolTYPECAST.make_node(c_type, b, lineno) # ensure type
return SymbolNUMBER(func(a.value, b.value), type_=type_, lineno=lineno)
if is_static(a, b):
a = SymbolTYPECAST.make_node(c_type, a, lineno) # ensure type
b = SymbolTYPECAST.make_node(c_type, b, lineno) # ensure type
return SymbolCONST(cls(operator, a, b, lineno, type_=type_, func=func), lineno=lineno)
if operator in ('BNOT', 'BAND', 'BOR', 'BXOR', 'NOT', 'AND', 'OR',
'XOR', 'MINUS', 'MULT', 'DIV', 'SHL', 'SHR') and \
not is_numeric(a, b):
syntax_error(lineno, 'Operator %s cannot be used with STRINGS' % operator)
return None
if is_string(a, b) and func is not None: # Are they STRING Constants?
if operator == 'PLUS':
return SymbolSTRING(func(a.value, b.value), lineno)
return SymbolNUMBER(int(func(a.text, b.text)), type_=TYPE.ubyte,
lineno=lineno) # Convert to u8 (boolean)
if operator in ('BNOT', 'BAND', 'BOR', 'BXOR'):
if TYPE.is_decimal(c_type):
c_type = TYPE.long_
if a.type_ != b.type_ and TYPE.string in (a.type_, b.type_):
c_type = a.type_ # Will give an error based on the fist operand
if operator not in ('SHR', 'SHL'):
a = SymbolTYPECAST.make_node(c_type, a, lineno)
b = SymbolTYPECAST.make_node(c_type, b, lineno)
if a is None or b is None:
return None
if type_ is None:
if operator in ('LT', 'GT', 'EQ', 'LE', 'GE', 'NE', 'AND', 'OR',
'XOR', 'NOT'):
type_ = TYPE.ubyte # Boolean type
else:
type_ = c_type
return cls(operator, a, b, type_=type_, lineno=lineno) |
def do_edit(self, line):
"""edit Edit the queue of write operations."""
self._split_args(line, 0, 0)
self._command_processor.get_operation_queue().edit()
self._print_info_if_verbose("The write operation queue was successfully edited") | edit Edit the queue of write operations. | Below is the the instruction that describes the task:
### Input:
edit Edit the queue of write operations.
### Response:
def do_edit(self, line):
"""edit Edit the queue of write operations."""
self._split_args(line, 0, 0)
self._command_processor.get_operation_queue().edit()
self._print_info_if_verbose("The write operation queue was successfully edited") |
def get_field_by_name(self, name):
'''
:param name: name of field to get
:return: direct sub-field with the given name
:raises: :class:`~kitty.core.KittyException` if no direct subfield with this name
'''
if name in self._fields_dict:
return self._fields_dict[name]
raise KittyException('field named (%s) was not found in (%s)' % (self, name)) | :param name: name of field to get
:return: direct sub-field with the given name
:raises: :class:`~kitty.core.KittyException` if no direct subfield with this name | Below is the the instruction that describes the task:
### Input:
:param name: name of field to get
:return: direct sub-field with the given name
:raises: :class:`~kitty.core.KittyException` if no direct subfield with this name
### Response:
def get_field_by_name(self, name):
'''
:param name: name of field to get
:return: direct sub-field with the given name
:raises: :class:`~kitty.core.KittyException` if no direct subfield with this name
'''
if name in self._fields_dict:
return self._fields_dict[name]
raise KittyException('field named (%s) was not found in (%s)' % (self, name)) |
def create_geometry(self, input_geometry, dip, upper_depth, lower_depth,
mesh_spacing=1.0):
'''
If geometry is defined as a numpy array then create instance of
nhlib.geo.line.Line class, otherwise if already instance of class
accept class
:param input_geometry:
Trace (line) of the fault source as either
i) instance of nhlib.geo.line.Line class
ii) numpy.ndarray [Longitude, Latitude]
:param float dip:
Dip of fault surface (in degrees)
:param float upper_depth:
Upper seismogenic depth (km)
:param float lower_depth:
Lower seismogenic depth (km)
:param float mesh_spacing:
Spacing of the fault mesh (km) {default = 1.0}
'''
assert((dip > 0.) and (dip <= 90.))
self.dip = dip
self._check_seismogenic_depths(upper_depth, lower_depth)
if not isinstance(input_geometry, Line):
if not isinstance(input_geometry, np.ndarray):
raise ValueError('Unrecognised or unsupported geometry '
'definition')
else:
self.fault_trace = Line([Point(row[0], row[1]) for row in
input_geometry])
else:
self.fault_trace = input_geometry
# Build fault surface
self.geometry = SimpleFaultSurface.from_fault_data(self.fault_trace,
self.upper_depth,
self.lower_depth,
self.dip,
mesh_spacing) | If geometry is defined as a numpy array then create instance of
nhlib.geo.line.Line class, otherwise if already instance of class
accept class
:param input_geometry:
Trace (line) of the fault source as either
i) instance of nhlib.geo.line.Line class
ii) numpy.ndarray [Longitude, Latitude]
:param float dip:
Dip of fault surface (in degrees)
:param float upper_depth:
Upper seismogenic depth (km)
:param float lower_depth:
Lower seismogenic depth (km)
:param float mesh_spacing:
Spacing of the fault mesh (km) {default = 1.0} | Below is the the instruction that describes the task:
### Input:
If geometry is defined as a numpy array then create instance of
nhlib.geo.line.Line class, otherwise if already instance of class
accept class
:param input_geometry:
Trace (line) of the fault source as either
i) instance of nhlib.geo.line.Line class
ii) numpy.ndarray [Longitude, Latitude]
:param float dip:
Dip of fault surface (in degrees)
:param float upper_depth:
Upper seismogenic depth (km)
:param float lower_depth:
Lower seismogenic depth (km)
:param float mesh_spacing:
Spacing of the fault mesh (km) {default = 1.0}
### Response:
def create_geometry(self, input_geometry, dip, upper_depth, lower_depth,
mesh_spacing=1.0):
'''
If geometry is defined as a numpy array then create instance of
nhlib.geo.line.Line class, otherwise if already instance of class
accept class
:param input_geometry:
Trace (line) of the fault source as either
i) instance of nhlib.geo.line.Line class
ii) numpy.ndarray [Longitude, Latitude]
:param float dip:
Dip of fault surface (in degrees)
:param float upper_depth:
Upper seismogenic depth (km)
:param float lower_depth:
Lower seismogenic depth (km)
:param float mesh_spacing:
Spacing of the fault mesh (km) {default = 1.0}
'''
assert((dip > 0.) and (dip <= 90.))
self.dip = dip
self._check_seismogenic_depths(upper_depth, lower_depth)
if not isinstance(input_geometry, Line):
if not isinstance(input_geometry, np.ndarray):
raise ValueError('Unrecognised or unsupported geometry '
'definition')
else:
self.fault_trace = Line([Point(row[0], row[1]) for row in
input_geometry])
else:
self.fault_trace = input_geometry
# Build fault surface
self.geometry = SimpleFaultSurface.from_fault_data(self.fault_trace,
self.upper_depth,
self.lower_depth,
self.dip,
mesh_spacing) |
def restore_population_parameters(self, global_default=True):
"""Setup UI for population parameter page from setting.
:param global_default: If True, set to original default (from
the value in definitions).
:type global_default: bool
"""
if global_default:
data = generate_default_profile()
else:
data = setting('population_preference', generate_default_profile())
if not isinstance(data, dict):
LOGGER.debug(
'population parameter is not a dictionary. InaSAFE will use '
'the default one.')
data = generate_default_profile()
try:
self.profile_widget.data = data
except KeyError as e:
LOGGER.debug(
'Population parameter is not in correct format. InaSAFE will '
'use the default one.')
LOGGER.debug(e)
data = generate_default_profile()
self.profile_widget.data = data | Setup UI for population parameter page from setting.
:param global_default: If True, set to original default (from
the value in definitions).
:type global_default: bool | Below is the the instruction that describes the task:
### Input:
Setup UI for population parameter page from setting.
:param global_default: If True, set to original default (from
the value in definitions).
:type global_default: bool
### Response:
def restore_population_parameters(self, global_default=True):
"""Setup UI for population parameter page from setting.
:param global_default: If True, set to original default (from
the value in definitions).
:type global_default: bool
"""
if global_default:
data = generate_default_profile()
else:
data = setting('population_preference', generate_default_profile())
if not isinstance(data, dict):
LOGGER.debug(
'population parameter is not a dictionary. InaSAFE will use '
'the default one.')
data = generate_default_profile()
try:
self.profile_widget.data = data
except KeyError as e:
LOGGER.debug(
'Population parameter is not in correct format. InaSAFE will '
'use the default one.')
LOGGER.debug(e)
data = generate_default_profile()
self.profile_widget.data = data |
def get_records_from_basket(self, bskid, group_basket=False,
read_cache=True):
"""
Returns the records from the (public) basket with given bskid
"""
if bskid not in self.cached_baskets or not read_cache:
if self.user:
if group_basket:
group_basket = '&category=G'
else:
group_basket = ''
results = requests.get(
self.server_url + "/yourbaskets/display?of=xm&bskid=" +
str(bskid) + group_basket, cookies=self.cookies,
stream=True)
else:
results = requests.get(
self.server_url +
"/yourbaskets/display_public?of=xm&bskid=" + str(bskid),
stream=True)
else:
return self.cached_baskets[bskid]
parsed_records = self._parse_results(results.raw, self.cached_records)
self.cached_baskets[bskid] = parsed_records
return parsed_records | Returns the records from the (public) basket with given bskid | Below is the the instruction that describes the task:
### Input:
Returns the records from the (public) basket with given bskid
### Response:
def get_records_from_basket(self, bskid, group_basket=False,
read_cache=True):
"""
Returns the records from the (public) basket with given bskid
"""
if bskid not in self.cached_baskets or not read_cache:
if self.user:
if group_basket:
group_basket = '&category=G'
else:
group_basket = ''
results = requests.get(
self.server_url + "/yourbaskets/display?of=xm&bskid=" +
str(bskid) + group_basket, cookies=self.cookies,
stream=True)
else:
results = requests.get(
self.server_url +
"/yourbaskets/display_public?of=xm&bskid=" + str(bskid),
stream=True)
else:
return self.cached_baskets[bskid]
parsed_records = self._parse_results(results.raw, self.cached_records)
self.cached_baskets[bskid] = parsed_records
return parsed_records |
def condor_submit(cmd):
"""
Submits cmd to HTCondor queue
Parameters
----------
cmd: string
Command to be submitted
Returns
-------
int
returncode value from calling the submission command.
"""
is_running = subprocess.call('condor_status', shell=True) == 0
if not is_running:
raise CalledProcessError('HTCondor is not running.')
sub_cmd = 'condor_qsub -shell n -b y -r y -N ' \
+ cmd.split()[0] + ' -m n'
log.info('Calling: ' + sub_cmd)
return subprocess.call(sub_cmd + ' ' + cmd, shell=True) | Submits cmd to HTCondor queue
Parameters
----------
cmd: string
Command to be submitted
Returns
-------
int
returncode value from calling the submission command. | Below is the the instruction that describes the task:
### Input:
Submits cmd to HTCondor queue
Parameters
----------
cmd: string
Command to be submitted
Returns
-------
int
returncode value from calling the submission command.
### Response:
def condor_submit(cmd):
"""
Submits cmd to HTCondor queue
Parameters
----------
cmd: string
Command to be submitted
Returns
-------
int
returncode value from calling the submission command.
"""
is_running = subprocess.call('condor_status', shell=True) == 0
if not is_running:
raise CalledProcessError('HTCondor is not running.')
sub_cmd = 'condor_qsub -shell n -b y -r y -N ' \
+ cmd.split()[0] + ' -m n'
log.info('Calling: ' + sub_cmd)
return subprocess.call(sub_cmd + ' ' + cmd, shell=True) |
def create_rule(self, title, symbolizer=None, MinScaleDenominator=None, MaxScaleDenominator=None):
"""
Create a L{Rule} object on this style. A rule requires a title and
symbolizer. If no symbolizer is specified, a PointSymbolizer will be
assigned to the rule.
@type title: string
@param title: The name of the new L{Rule}.
@type symbolizer: L{Symbolizer} I{class}
@param symbolizer: The symbolizer type. This is the class object (as
opposed to a class instance) of the symbolizer to use.
@rtype: L{Rule}
@return: A newly created rule, attached to this FeatureTypeStyle.
"""
elem = self._node.makeelement('{%s}Rule' % SLDNode._nsmap['sld'], nsmap=SLDNode._nsmap)
self._node.append(elem)
rule = Rule(self, len(self._node) - 1)
rule.Title = title
if MinScaleDenominator is not None:
rule.MinScaleDenominator = MinScaleDenominator
if MaxScaleDenominator is not None:
rule.MaxScaleDenominator = MaxScaleDenominator
if symbolizer is None:
symbolizer = PointSymbolizer
sym = symbolizer(rule)
if symbolizer == PointSymbolizer:
gph = Graphic(sym)
mrk = Mark(gph)
mrk.WellKnownName = 'square'
fill = Fill(mrk)
fill.create_cssparameter('fill', '#ff0000')
elif symbolizer == LineSymbolizer:
stroke = Stroke(sym)
stroke.create_cssparameter('stroke', '#0000ff')
elif symbolizer == PolygonSymbolizer:
fill = Fill(sym)
fill.create_cssparameter('fill', '#AAAAAA')
stroke = Stroke(sym)
stroke.create_cssparameter('stroke', '#000000')
stroke.create_cssparameter('stroke-width', '1')
return rule | Create a L{Rule} object on this style. A rule requires a title and
symbolizer. If no symbolizer is specified, a PointSymbolizer will be
assigned to the rule.
@type title: string
@param title: The name of the new L{Rule}.
@type symbolizer: L{Symbolizer} I{class}
@param symbolizer: The symbolizer type. This is the class object (as
opposed to a class instance) of the symbolizer to use.
@rtype: L{Rule}
@return: A newly created rule, attached to this FeatureTypeStyle. | Below is the the instruction that describes the task:
### Input:
Create a L{Rule} object on this style. A rule requires a title and
symbolizer. If no symbolizer is specified, a PointSymbolizer will be
assigned to the rule.
@type title: string
@param title: The name of the new L{Rule}.
@type symbolizer: L{Symbolizer} I{class}
@param symbolizer: The symbolizer type. This is the class object (as
opposed to a class instance) of the symbolizer to use.
@rtype: L{Rule}
@return: A newly created rule, attached to this FeatureTypeStyle.
### Response:
def create_rule(self, title, symbolizer=None, MinScaleDenominator=None, MaxScaleDenominator=None):
"""
Create a L{Rule} object on this style. A rule requires a title and
symbolizer. If no symbolizer is specified, a PointSymbolizer will be
assigned to the rule.
@type title: string
@param title: The name of the new L{Rule}.
@type symbolizer: L{Symbolizer} I{class}
@param symbolizer: The symbolizer type. This is the class object (as
opposed to a class instance) of the symbolizer to use.
@rtype: L{Rule}
@return: A newly created rule, attached to this FeatureTypeStyle.
"""
elem = self._node.makeelement('{%s}Rule' % SLDNode._nsmap['sld'], nsmap=SLDNode._nsmap)
self._node.append(elem)
rule = Rule(self, len(self._node) - 1)
rule.Title = title
if MinScaleDenominator is not None:
rule.MinScaleDenominator = MinScaleDenominator
if MaxScaleDenominator is not None:
rule.MaxScaleDenominator = MaxScaleDenominator
if symbolizer is None:
symbolizer = PointSymbolizer
sym = symbolizer(rule)
if symbolizer == PointSymbolizer:
gph = Graphic(sym)
mrk = Mark(gph)
mrk.WellKnownName = 'square'
fill = Fill(mrk)
fill.create_cssparameter('fill', '#ff0000')
elif symbolizer == LineSymbolizer:
stroke = Stroke(sym)
stroke.create_cssparameter('stroke', '#0000ff')
elif symbolizer == PolygonSymbolizer:
fill = Fill(sym)
fill.create_cssparameter('fill', '#AAAAAA')
stroke = Stroke(sym)
stroke.create_cssparameter('stroke', '#000000')
stroke.create_cssparameter('stroke-width', '1')
return rule |
def send_build_data(build_dir, data, secret,
response_url=None,clean_up=True):
'''finish build sends the build and data (response) to a response url
:param build_dir: the directory of the build
:response_url: where to send the response. If None, won't send
:param data: the data object to send as a post
:param clean_up: If true (default) removes build directory
'''
# Send with Authentication header
body = '%s|%s|%s|%s|%s' %(data['container_id'],
data['commit'],
data['branch'],
data['token'],
data['tag'])
signature = generate_header_signature(secret=secret,
payload=body,
request_type="push")
headers = {'Authorization': signature }
if response_url is not None:
finish = requests.post(response_url,data=data, headers=headers)
bot.debug("RECEIVE POST TO SINGULARITY HUB ---------------------")
bot.debug(finish.status_code)
bot.debug(finish.reason)
else:
bot.warning("response_url set to None, skipping sending of build.")
if clean_up == True:
shutil.rmtree(build_dir)
# Delay a bit, to give buffer between bringing instance down
time.sleep(20) | finish build sends the build and data (response) to a response url
:param build_dir: the directory of the build
:response_url: where to send the response. If None, won't send
:param data: the data object to send as a post
:param clean_up: If true (default) removes build directory | Below is the the instruction that describes the task:
### Input:
finish build sends the build and data (response) to a response url
:param build_dir: the directory of the build
:response_url: where to send the response. If None, won't send
:param data: the data object to send as a post
:param clean_up: If true (default) removes build directory
### Response:
def send_build_data(build_dir, data, secret,
response_url=None,clean_up=True):
'''finish build sends the build and data (response) to a response url
:param build_dir: the directory of the build
:response_url: where to send the response. If None, won't send
:param data: the data object to send as a post
:param clean_up: If true (default) removes build directory
'''
# Send with Authentication header
body = '%s|%s|%s|%s|%s' %(data['container_id'],
data['commit'],
data['branch'],
data['token'],
data['tag'])
signature = generate_header_signature(secret=secret,
payload=body,
request_type="push")
headers = {'Authorization': signature }
if response_url is not None:
finish = requests.post(response_url,data=data, headers=headers)
bot.debug("RECEIVE POST TO SINGULARITY HUB ---------------------")
bot.debug(finish.status_code)
bot.debug(finish.reason)
else:
bot.warning("response_url set to None, skipping sending of build.")
if clean_up == True:
shutil.rmtree(build_dir)
# Delay a bit, to give buffer between bringing instance down
time.sleep(20) |
def create_package(self):
"""
Ensure that the package can be properly configured,
and then create it.
"""
# Create the Lambda zip package (includes project and virtualenvironment)
# Also define the path the handler file so it can be copied to the zip
# root for Lambda.
current_file = os.path.dirname(os.path.abspath(
inspect.getfile(inspect.currentframe())))
handler_file = os.sep.join(current_file.split(os.sep)[
0:-2]) + os.sep + 'handler.py'
exclude = self.zappa_settings[self.api_stage].get('exclude', []) + ['static', 'media']
self.zip_path = self.zappa.create_lambda_zip(
self.lambda_name,
handler_file=handler_file,
use_precompiled_packages=self.zappa_settings[self.api_stage].get('use_precompiled_packages', True),
exclude=exclude
)
# Add this environment's Django settings to that zipfile
with open(self.settings_file, 'r') as f:
contents = f.read()
all_contents = contents
if not self.zappa_settings[self.api_stage].has_key('domain'):
script_name = self.api_stage
else:
script_name = ''
all_contents = all_contents + \
'\n# Automatically added by Zappa:\nSCRIPT_NAME=\'/' + script_name + '\'\n'
f.close()
with open('zappa_settings.py', 'w') as f:
f.write(all_contents)
with zipfile.ZipFile(self.zip_path, 'a') as lambda_zip:
lambda_zip.write('zappa_settings.py', 'zappa_settings.py')
lambda_zip.close()
os.unlink('zappa_settings.py') | Ensure that the package can be properly configured,
and then create it. | Below is the the instruction that describes the task:
### Input:
Ensure that the package can be properly configured,
and then create it.
### Response:
def create_package(self):
"""
Ensure that the package can be properly configured,
and then create it.
"""
# Create the Lambda zip package (includes project and virtualenvironment)
# Also define the path the handler file so it can be copied to the zip
# root for Lambda.
current_file = os.path.dirname(os.path.abspath(
inspect.getfile(inspect.currentframe())))
handler_file = os.sep.join(current_file.split(os.sep)[
0:-2]) + os.sep + 'handler.py'
exclude = self.zappa_settings[self.api_stage].get('exclude', []) + ['static', 'media']
self.zip_path = self.zappa.create_lambda_zip(
self.lambda_name,
handler_file=handler_file,
use_precompiled_packages=self.zappa_settings[self.api_stage].get('use_precompiled_packages', True),
exclude=exclude
)
# Add this environment's Django settings to that zipfile
with open(self.settings_file, 'r') as f:
contents = f.read()
all_contents = contents
if not self.zappa_settings[self.api_stage].has_key('domain'):
script_name = self.api_stage
else:
script_name = ''
all_contents = all_contents + \
'\n# Automatically added by Zappa:\nSCRIPT_NAME=\'/' + script_name + '\'\n'
f.close()
with open('zappa_settings.py', 'w') as f:
f.write(all_contents)
with zipfile.ZipFile(self.zip_path, 'a') as lambda_zip:
lambda_zip.write('zappa_settings.py', 'zappa_settings.py')
lambda_zip.close()
os.unlink('zappa_settings.py') |
def unignore_reports(self):
"""Remove ignoring of future reports on this object.
Undoes 'ignore_reports'. Future reports will now cause notifications
and appear in the various moderation listings.
"""
url = self.reddit_session.config['unignore_reports']
data = {'id': self.fullname}
return self.reddit_session.request_json(url, data=data) | Remove ignoring of future reports on this object.
Undoes 'ignore_reports'. Future reports will now cause notifications
and appear in the various moderation listings. | Below is the the instruction that describes the task:
### Input:
Remove ignoring of future reports on this object.
Undoes 'ignore_reports'. Future reports will now cause notifications
and appear in the various moderation listings.
### Response:
def unignore_reports(self):
"""Remove ignoring of future reports on this object.
Undoes 'ignore_reports'. Future reports will now cause notifications
and appear in the various moderation listings.
"""
url = self.reddit_session.config['unignore_reports']
data = {'id': self.fullname}
return self.reddit_session.request_json(url, data=data) |
def _take_batch(self, min_records):
'''If we have enough data to build a batch, returns all the data in the
buffer and then clears the buffer.'''
if not self._buffer:
return []
enough_messages = len(self._buffer) >= min_records
enough_time = time.time() - self.time_last_batch_sent >= self.batch_delay_seconds
ready = enough_messages or enough_time
if not ready:
return []
result = list(self._buffer)
self._buffer.clear()
return result | If we have enough data to build a batch, returns all the data in the
buffer and then clears the buffer. | Below is the the instruction that describes the task:
### Input:
If we have enough data to build a batch, returns all the data in the
buffer and then clears the buffer.
### Response:
def _take_batch(self, min_records):
'''If we have enough data to build a batch, returns all the data in the
buffer and then clears the buffer.'''
if not self._buffer:
return []
enough_messages = len(self._buffer) >= min_records
enough_time = time.time() - self.time_last_batch_sent >= self.batch_delay_seconds
ready = enough_messages or enough_time
if not ready:
return []
result = list(self._buffer)
self._buffer.clear()
return result |
def vcmp_host(opt_vcmp_host, opt_username, opt_password, opt_port):
'''vcmp fixture'''
m = ManagementRoot(
opt_vcmp_host, opt_username, opt_password, port=opt_port)
return m | vcmp fixture | Below is the the instruction that describes the task:
### Input:
vcmp fixture
### Response:
def vcmp_host(opt_vcmp_host, opt_username, opt_password, opt_port):
'''vcmp fixture'''
m = ManagementRoot(
opt_vcmp_host, opt_username, opt_password, port=opt_port)
return m |
def stFeatureExtraction(signal, fs, win, step):
"""
This function implements the shor-term windowing process. For each short-term window a set of features is extracted.
This results to a sequence of feature vectors, stored in a numpy matrix.
ARGUMENTS
signal: the input signal samples
fs: the sampling freq (in Hz)
win: the short-term window size (in samples)
step: the short-term window step (in samples)
RETURNS
st_features: a numpy array (n_feats x numOfShortTermWindows)
"""
win = int(win)
step = int(step)
# Signal normalization
signal = numpy.double(signal)
signal = signal / (2.0 ** 15)
DC = signal.mean()
MAX = (numpy.abs(signal)).max()
signal = (signal - DC) / (MAX + 0.0000000001)
N = len(signal) # total number of samples
cur_p = 0
count_fr = 0
nFFT = int(win / 2)
[fbank, freqs] = mfccInitFilterBanks(fs, nFFT) # compute the triangular filter banks used in the mfcc calculation
nChroma, nFreqsPerChroma = stChromaFeaturesInit(nFFT, fs)
n_time_spectral_feats = 8
n_harmonic_feats = 0
n_mfcc_feats = 13
n_chroma_feats = 13
n_total_feats = n_time_spectral_feats + n_mfcc_feats + n_harmonic_feats + n_chroma_feats
# n_total_feats = n_time_spectral_feats + n_mfcc_feats + n_harmonic_feats
feature_names = []
feature_names.append("zcr")
feature_names.append("energy")
feature_names.append("energy_entropy")
feature_names += ["spectral_centroid", "spectral_spread"]
feature_names.append("spectral_entropy")
feature_names.append("spectral_flux")
feature_names.append("spectral_rolloff")
feature_names += ["mfcc_{0:d}".format(mfcc_i)
for mfcc_i in range(1, n_mfcc_feats+1)]
feature_names += ["chroma_{0:d}".format(chroma_i)
for chroma_i in range(1, n_chroma_feats)]
feature_names.append("chroma_std")
st_features = []
while (cur_p + win - 1 < N): # for each short-term window until the end of signal
count_fr += 1
x = signal[cur_p:cur_p+win] # get current window
cur_p = cur_p + step # update window position
X = abs(fft(x)) # get fft magnitude
X = X[0:nFFT] # normalize fft
X = X / len(X)
if count_fr == 1:
X_prev = X.copy() # keep previous fft mag (used in spectral flux)
curFV = numpy.zeros((n_total_feats, 1))
curFV[0] = stZCR(x) # zero crossing rate
curFV[1] = stEnergy(x) # short-term energy
curFV[2] = stEnergyEntropy(x) # short-term entropy of energy
[curFV[3], curFV[4]] = stSpectralCentroidAndSpread(X, fs) # spectral centroid and spread
curFV[5] = stSpectralEntropy(X) # spectral entropy
curFV[6] = stSpectralFlux(X, X_prev) # spectral flux
curFV[7] = stSpectralRollOff(X, 0.90, fs) # spectral rolloff
curFV[n_time_spectral_feats:n_time_spectral_feats+n_mfcc_feats, 0] = \
stMFCC(X, fbank, n_mfcc_feats).copy() # MFCCs
chromaNames, chromaF = stChromaFeatures(X, fs, nChroma, nFreqsPerChroma)
curFV[n_time_spectral_feats + n_mfcc_feats:
n_time_spectral_feats + n_mfcc_feats + n_chroma_feats - 1] = \
chromaF
curFV[n_time_spectral_feats + n_mfcc_feats + n_chroma_feats - 1] = \
chromaF.std()
st_features.append(curFV)
# delta features
'''
if count_fr>1:
delta = curFV - prevFV
curFVFinal = numpy.concatenate((curFV, delta))
else:
curFVFinal = numpy.concatenate((curFV, curFV))
prevFV = curFV
st_features.append(curFVFinal)
'''
# end of delta
X_prev = X.copy()
st_features = numpy.concatenate(st_features, 1)
return st_features, feature_names | This function implements the shor-term windowing process. For each short-term window a set of features is extracted.
This results to a sequence of feature vectors, stored in a numpy matrix.
ARGUMENTS
signal: the input signal samples
fs: the sampling freq (in Hz)
win: the short-term window size (in samples)
step: the short-term window step (in samples)
RETURNS
st_features: a numpy array (n_feats x numOfShortTermWindows) | Below is the the instruction that describes the task:
### Input:
This function implements the shor-term windowing process. For each short-term window a set of features is extracted.
This results to a sequence of feature vectors, stored in a numpy matrix.
ARGUMENTS
signal: the input signal samples
fs: the sampling freq (in Hz)
win: the short-term window size (in samples)
step: the short-term window step (in samples)
RETURNS
st_features: a numpy array (n_feats x numOfShortTermWindows)
### Response:
def stFeatureExtraction(signal, fs, win, step):
"""
This function implements the shor-term windowing process. For each short-term window a set of features is extracted.
This results to a sequence of feature vectors, stored in a numpy matrix.
ARGUMENTS
signal: the input signal samples
fs: the sampling freq (in Hz)
win: the short-term window size (in samples)
step: the short-term window step (in samples)
RETURNS
st_features: a numpy array (n_feats x numOfShortTermWindows)
"""
win = int(win)
step = int(step)
# Signal normalization
signal = numpy.double(signal)
signal = signal / (2.0 ** 15)
DC = signal.mean()
MAX = (numpy.abs(signal)).max()
signal = (signal - DC) / (MAX + 0.0000000001)
N = len(signal) # total number of samples
cur_p = 0
count_fr = 0
nFFT = int(win / 2)
[fbank, freqs] = mfccInitFilterBanks(fs, nFFT) # compute the triangular filter banks used in the mfcc calculation
nChroma, nFreqsPerChroma = stChromaFeaturesInit(nFFT, fs)
n_time_spectral_feats = 8
n_harmonic_feats = 0
n_mfcc_feats = 13
n_chroma_feats = 13
n_total_feats = n_time_spectral_feats + n_mfcc_feats + n_harmonic_feats + n_chroma_feats
# n_total_feats = n_time_spectral_feats + n_mfcc_feats + n_harmonic_feats
feature_names = []
feature_names.append("zcr")
feature_names.append("energy")
feature_names.append("energy_entropy")
feature_names += ["spectral_centroid", "spectral_spread"]
feature_names.append("spectral_entropy")
feature_names.append("spectral_flux")
feature_names.append("spectral_rolloff")
feature_names += ["mfcc_{0:d}".format(mfcc_i)
for mfcc_i in range(1, n_mfcc_feats+1)]
feature_names += ["chroma_{0:d}".format(chroma_i)
for chroma_i in range(1, n_chroma_feats)]
feature_names.append("chroma_std")
st_features = []
while (cur_p + win - 1 < N): # for each short-term window until the end of signal
count_fr += 1
x = signal[cur_p:cur_p+win] # get current window
cur_p = cur_p + step # update window position
X = abs(fft(x)) # get fft magnitude
X = X[0:nFFT] # normalize fft
X = X / len(X)
if count_fr == 1:
X_prev = X.copy() # keep previous fft mag (used in spectral flux)
curFV = numpy.zeros((n_total_feats, 1))
curFV[0] = stZCR(x) # zero crossing rate
curFV[1] = stEnergy(x) # short-term energy
curFV[2] = stEnergyEntropy(x) # short-term entropy of energy
[curFV[3], curFV[4]] = stSpectralCentroidAndSpread(X, fs) # spectral centroid and spread
curFV[5] = stSpectralEntropy(X) # spectral entropy
curFV[6] = stSpectralFlux(X, X_prev) # spectral flux
curFV[7] = stSpectralRollOff(X, 0.90, fs) # spectral rolloff
curFV[n_time_spectral_feats:n_time_spectral_feats+n_mfcc_feats, 0] = \
stMFCC(X, fbank, n_mfcc_feats).copy() # MFCCs
chromaNames, chromaF = stChromaFeatures(X, fs, nChroma, nFreqsPerChroma)
curFV[n_time_spectral_feats + n_mfcc_feats:
n_time_spectral_feats + n_mfcc_feats + n_chroma_feats - 1] = \
chromaF
curFV[n_time_spectral_feats + n_mfcc_feats + n_chroma_feats - 1] = \
chromaF.std()
st_features.append(curFV)
# delta features
'''
if count_fr>1:
delta = curFV - prevFV
curFVFinal = numpy.concatenate((curFV, delta))
else:
curFVFinal = numpy.concatenate((curFV, curFV))
prevFV = curFV
st_features.append(curFVFinal)
'''
# end of delta
X_prev = X.copy()
st_features = numpy.concatenate(st_features, 1)
return st_features, feature_names |
def _parse_date(self, raw_value):
"Convert raw data to datetime.date"
nday = bytes_to_bint(raw_value) + 678882
century = (4 * nday - 1) // 146097
nday = 4 * nday - 1 - 146097 * century
day = nday // 4
nday = (4 * day + 3) // 1461
day = 4 * day + 3 - 1461 * nday
day = (day + 4) // 4
month = (5 * day - 3) // 153
day = 5 * day - 3 - 153 * month
day = (day + 5) // 5
year = 100 * century + nday
if month < 10:
month += 3
else:
month -= 9
year += 1
return year, month, day | Convert raw data to datetime.date | Below is the the instruction that describes the task:
### Input:
Convert raw data to datetime.date
### Response:
def _parse_date(self, raw_value):
"Convert raw data to datetime.date"
nday = bytes_to_bint(raw_value) + 678882
century = (4 * nday - 1) // 146097
nday = 4 * nday - 1 - 146097 * century
day = nday // 4
nday = (4 * day + 3) // 1461
day = 4 * day + 3 - 1461 * nday
day = (day + 4) // 4
month = (5 * day - 3) // 153
day = 5 * day - 3 - 153 * month
day = (day + 5) // 5
year = 100 * century + nday
if month < 10:
month += 3
else:
month -= 9
year += 1
return year, month, day |
def get_role_by_account_sis_id(self, account_sis_id, role_id):
"""
Get information about a single role, for the passed account SIS ID.
"""
return self.get_role(self._sis_id(account_sis_id, sis_field="account"),
role_id) | Get information about a single role, for the passed account SIS ID. | Below is the the instruction that describes the task:
### Input:
Get information about a single role, for the passed account SIS ID.
### Response:
def get_role_by_account_sis_id(self, account_sis_id, role_id):
"""
Get information about a single role, for the passed account SIS ID.
"""
return self.get_role(self._sis_id(account_sis_id, sis_field="account"),
role_id) |
def _contains_egg_info(
s, _egg_info_re=re.compile(r'([a-z0-9_.]+)-([a-z0-9_.!+-]+)', re.I)):
"""Determine whether the string looks like an egg_info.
:param s: The string to parse. E.g. foo-2.1
"""
return bool(_egg_info_re.search(s)) | Determine whether the string looks like an egg_info.
:param s: The string to parse. E.g. foo-2.1 | Below is the the instruction that describes the task:
### Input:
Determine whether the string looks like an egg_info.
:param s: The string to parse. E.g. foo-2.1
### Response:
def _contains_egg_info(
s, _egg_info_re=re.compile(r'([a-z0-9_.]+)-([a-z0-9_.!+-]+)', re.I)):
"""Determine whether the string looks like an egg_info.
:param s: The string to parse. E.g. foo-2.1
"""
return bool(_egg_info_re.search(s)) |
def declarative(member_class=None, parameter='members', add_init_kwargs=True, sort_key=default_sort_key, is_member=None):
"""
Class decorator to enable classes to be defined in the style of django models.
That is, @declarative classes will get an additional argument to constructor,
containing an OrderedDict with all class members matching the specified type.
:param class member_class: Class(es) to collect
:param is_member: Function to determine if an object should be collected
:param str parameter: Name of constructor parameter to inject
:param bool add_init_kwargs: If constructor parameter should be injected (Default: True)
:param sort_key: Function to invoke on members to obtain ordering (Default is to use ordering from `creation_ordered`)
:type is_member: (object) -> bool
:type sort_key: (object) -> object
"""
if member_class is None and is_member is None:
raise TypeError("The @declarative decorator needs either a member_class parameter or an is_member check function (or both)")
def decorator(class_to_decorate):
class DeclarativeMeta(class_to_decorate.__class__):
# noinspection PyTypeChecker
def __init__(cls, name, bases, dict):
members = get_members(cls, member_class=member_class, is_member=is_member, sort_key=sort_key, _parameter=parameter)
set_declared(cls, members, parameter)
super(DeclarativeMeta, cls).__init__(name, bases, dict)
new_class = DeclarativeMeta(class_to_decorate.__name__,
class_to_decorate.__bases__,
{k: v for k, v in class_to_decorate.__dict__.items() if k not in ['__dict__', '__weakref__']})
def get_extra_args_function(self):
declared = get_declared(self, parameter)
def copy_declared():
for k, v in declared.items():
try:
v = copy(v)
except TypeError:
pass # Not always possible to copy methods
yield (k, v)
copied_members = OrderedDict(copy_declared())
self.__dict__.update(copied_members)
return {parameter: copied_members}
if add_init_kwargs:
add_args_to_init_call(new_class, get_extra_args_function)
else:
add_init_call_hook(new_class, get_extra_args_function)
setattr(new_class, 'get_declared', classmethod(get_declared))
setattr(new_class, 'set_declared', classmethod(set_declared))
return new_class
return decorator | Class decorator to enable classes to be defined in the style of django models.
That is, @declarative classes will get an additional argument to constructor,
containing an OrderedDict with all class members matching the specified type.
:param class member_class: Class(es) to collect
:param is_member: Function to determine if an object should be collected
:param str parameter: Name of constructor parameter to inject
:param bool add_init_kwargs: If constructor parameter should be injected (Default: True)
:param sort_key: Function to invoke on members to obtain ordering (Default is to use ordering from `creation_ordered`)
:type is_member: (object) -> bool
:type sort_key: (object) -> object | Below is the the instruction that describes the task:
### Input:
Class decorator to enable classes to be defined in the style of django models.
That is, @declarative classes will get an additional argument to constructor,
containing an OrderedDict with all class members matching the specified type.
:param class member_class: Class(es) to collect
:param is_member: Function to determine if an object should be collected
:param str parameter: Name of constructor parameter to inject
:param bool add_init_kwargs: If constructor parameter should be injected (Default: True)
:param sort_key: Function to invoke on members to obtain ordering (Default is to use ordering from `creation_ordered`)
:type is_member: (object) -> bool
:type sort_key: (object) -> object
### Response:
def declarative(member_class=None, parameter='members', add_init_kwargs=True, sort_key=default_sort_key, is_member=None):
"""
Class decorator to enable classes to be defined in the style of django models.
That is, @declarative classes will get an additional argument to constructor,
containing an OrderedDict with all class members matching the specified type.
:param class member_class: Class(es) to collect
:param is_member: Function to determine if an object should be collected
:param str parameter: Name of constructor parameter to inject
:param bool add_init_kwargs: If constructor parameter should be injected (Default: True)
:param sort_key: Function to invoke on members to obtain ordering (Default is to use ordering from `creation_ordered`)
:type is_member: (object) -> bool
:type sort_key: (object) -> object
"""
if member_class is None and is_member is None:
raise TypeError("The @declarative decorator needs either a member_class parameter or an is_member check function (or both)")
def decorator(class_to_decorate):
class DeclarativeMeta(class_to_decorate.__class__):
# noinspection PyTypeChecker
def __init__(cls, name, bases, dict):
members = get_members(cls, member_class=member_class, is_member=is_member, sort_key=sort_key, _parameter=parameter)
set_declared(cls, members, parameter)
super(DeclarativeMeta, cls).__init__(name, bases, dict)
new_class = DeclarativeMeta(class_to_decorate.__name__,
class_to_decorate.__bases__,
{k: v for k, v in class_to_decorate.__dict__.items() if k not in ['__dict__', '__weakref__']})
def get_extra_args_function(self):
declared = get_declared(self, parameter)
def copy_declared():
for k, v in declared.items():
try:
v = copy(v)
except TypeError:
pass # Not always possible to copy methods
yield (k, v)
copied_members = OrderedDict(copy_declared())
self.__dict__.update(copied_members)
return {parameter: copied_members}
if add_init_kwargs:
add_args_to_init_call(new_class, get_extra_args_function)
else:
add_init_call_hook(new_class, get_extra_args_function)
setattr(new_class, 'get_declared', classmethod(get_declared))
setattr(new_class, 'set_declared', classmethod(set_declared))
return new_class
return decorator |
def _create_array(self, arr: np.ndarray) -> int:
"""Returns the handle of a RawArray created from the given numpy array.
Args:
arr: A numpy ndarray.
Returns:
The handle (int) of the array.
Raises:
ValueError: if arr is not a ndarray or of an unsupported dtype. If
the array is of an unsupported type, using a view of the array to
another dtype and then converting on get is often a work around.
"""
if not isinstance(arr, np.ndarray):
raise ValueError('Array is not a numpy ndarray.')
try:
c_arr = np.ctypeslib.as_ctypes(arr)
except (KeyError, NotImplementedError):
raise ValueError(
'Array has unsupported dtype {}.'.format(arr.dtype))
# pylint: disable=protected-access
raw_arr = RawArray(c_arr._type_, c_arr)
with self._lock:
if self._count >= len(self._arrays):
self._arrays += len(self._arrays) * [None]
self._get_next_free()
# Note storing the shape is a workaround for an issue encountered
# when upgrading to numpy 1.15.
# See https://github.com/numpy/numpy/issues/11636
self._arrays[self._current] = (raw_arr, arr.shape)
self._count += 1
return self._current | Returns the handle of a RawArray created from the given numpy array.
Args:
arr: A numpy ndarray.
Returns:
The handle (int) of the array.
Raises:
ValueError: if arr is not a ndarray or of an unsupported dtype. If
the array is of an unsupported type, using a view of the array to
another dtype and then converting on get is often a work around. | Below is the the instruction that describes the task:
### Input:
Returns the handle of a RawArray created from the given numpy array.
Args:
arr: A numpy ndarray.
Returns:
The handle (int) of the array.
Raises:
ValueError: if arr is not a ndarray or of an unsupported dtype. If
the array is of an unsupported type, using a view of the array to
another dtype and then converting on get is often a work around.
### Response:
def _create_array(self, arr: np.ndarray) -> int:
"""Returns the handle of a RawArray created from the given numpy array.
Args:
arr: A numpy ndarray.
Returns:
The handle (int) of the array.
Raises:
ValueError: if arr is not a ndarray or of an unsupported dtype. If
the array is of an unsupported type, using a view of the array to
another dtype and then converting on get is often a work around.
"""
if not isinstance(arr, np.ndarray):
raise ValueError('Array is not a numpy ndarray.')
try:
c_arr = np.ctypeslib.as_ctypes(arr)
except (KeyError, NotImplementedError):
raise ValueError(
'Array has unsupported dtype {}.'.format(arr.dtype))
# pylint: disable=protected-access
raw_arr = RawArray(c_arr._type_, c_arr)
with self._lock:
if self._count >= len(self._arrays):
self._arrays += len(self._arrays) * [None]
self._get_next_free()
# Note storing the shape is a workaround for an issue encountered
# when upgrading to numpy 1.15.
# See https://github.com/numpy/numpy/issues/11636
self._arrays[self._current] = (raw_arr, arr.shape)
self._count += 1
return self._current |
def getcomments(self):
"""
Returns an array of comment dictionaries for this bug
"""
comment_list = self.bugzilla.get_comments([self.bug_id])
return comment_list['bugs'][str(self.bug_id)]['comments'] | Returns an array of comment dictionaries for this bug | Below is the the instruction that describes the task:
### Input:
Returns an array of comment dictionaries for this bug
### Response:
def getcomments(self):
"""
Returns an array of comment dictionaries for this bug
"""
comment_list = self.bugzilla.get_comments([self.bug_id])
return comment_list['bugs'][str(self.bug_id)]['comments'] |
def _add_sub_elements_from_dict(parent, sub_dict):
"""
Add SubElements to the parent element.
:param parent: ElementTree.Element: The parent element for the newly created SubElement.
:param sub_dict: dict: Used to create a new SubElement. See `dict_to_xml_schema`
method docstring for more information. e.g.:
{"example": {
"attrs": {
"key1": "value1",
...
},
...
}}
"""
for key, value in sub_dict.items():
if isinstance(value, list):
for repeated_element in value:
sub_element = ET.SubElement(parent, key)
_add_element_attrs(sub_element, repeated_element.get("attrs", {}))
children = repeated_element.get("children", None)
if isinstance(children, dict):
_add_sub_elements_from_dict(sub_element, children)
elif isinstance(children, str):
sub_element.text = children
else:
sub_element = ET.SubElement(parent, key)
_add_element_attrs(sub_element, value.get("attrs", {}))
children = value.get("children", None)
if isinstance(children, dict):
_add_sub_elements_from_dict(sub_element, children)
elif isinstance(children, str):
sub_element.text = children | Add SubElements to the parent element.
:param parent: ElementTree.Element: The parent element for the newly created SubElement.
:param sub_dict: dict: Used to create a new SubElement. See `dict_to_xml_schema`
method docstring for more information. e.g.:
{"example": {
"attrs": {
"key1": "value1",
...
},
...
}} | Below is the the instruction that describes the task:
### Input:
Add SubElements to the parent element.
:param parent: ElementTree.Element: The parent element for the newly created SubElement.
:param sub_dict: dict: Used to create a new SubElement. See `dict_to_xml_schema`
method docstring for more information. e.g.:
{"example": {
"attrs": {
"key1": "value1",
...
},
...
}}
### Response:
def _add_sub_elements_from_dict(parent, sub_dict):
"""
Add SubElements to the parent element.
:param parent: ElementTree.Element: The parent element for the newly created SubElement.
:param sub_dict: dict: Used to create a new SubElement. See `dict_to_xml_schema`
method docstring for more information. e.g.:
{"example": {
"attrs": {
"key1": "value1",
...
},
...
}}
"""
for key, value in sub_dict.items():
if isinstance(value, list):
for repeated_element in value:
sub_element = ET.SubElement(parent, key)
_add_element_attrs(sub_element, repeated_element.get("attrs", {}))
children = repeated_element.get("children", None)
if isinstance(children, dict):
_add_sub_elements_from_dict(sub_element, children)
elif isinstance(children, str):
sub_element.text = children
else:
sub_element = ET.SubElement(parent, key)
_add_element_attrs(sub_element, value.get("attrs", {}))
children = value.get("children", None)
if isinstance(children, dict):
_add_sub_elements_from_dict(sub_element, children)
elif isinstance(children, str):
sub_element.text = children |
def create_df_file_with_query(self, query, output):
""" Dumps in df in chunks to avoid crashes.
"""
chunk_size = 100000
offset = 0
data = defaultdict(lambda : defaultdict(list))
with open(output, 'wb') as outfile:
query = query.replace(';', '')
query += """ LIMIT {chunk_size} OFFSET {offset};"""
while True:
print(offset)
query = query.format(
chunk_size=chunk_size,
offset=offset
)
df = pd.read_sql(query, self.engine)
pickle.dump(df, outfile)
offset += chunk_size
if len(df) < chunk_size:
break
outfile.close() | Dumps in df in chunks to avoid crashes. | Below is the the instruction that describes the task:
### Input:
Dumps in df in chunks to avoid crashes.
### Response:
def create_df_file_with_query(self, query, output):
""" Dumps in df in chunks to avoid crashes.
"""
chunk_size = 100000
offset = 0
data = defaultdict(lambda : defaultdict(list))
with open(output, 'wb') as outfile:
query = query.replace(';', '')
query += """ LIMIT {chunk_size} OFFSET {offset};"""
while True:
print(offset)
query = query.format(
chunk_size=chunk_size,
offset=offset
)
df = pd.read_sql(query, self.engine)
pickle.dump(df, outfile)
offset += chunk_size
if len(df) < chunk_size:
break
outfile.close() |
def decrypt(user=None,
text=None,
filename=None,
output=None,
use_passphrase=False,
gnupghome=None,
bare=False):
'''
Decrypt a message or file
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
text
The encrypted text to decrypt.
filename
The encrypted filename to decrypt.
output
The filename where the decrypted data will be written, default is standard out.
use_passphrase
Whether to use a passphrase with the signing key. Passphrase is received
from Pillar.
gnupghome
Specify the location where GPG keyring and related files are stored.
bare
If ``True``, return the (armored) decrypted block as a string without the
standard comment/res dict.
CLI Example:
.. code-block:: bash
salt '*' gpg.decrypt filename='/path/to/important.file.gpg'
salt '*' gpg.decrypt filename='/path/to/important.file.gpg' use_passphrase=True
'''
ret = {
'res': True,
'comment': ''
}
gpg = _create_gpg(user, gnupghome)
if use_passphrase:
gpg_passphrase = __salt__['pillar.get']('gpg_passphrase')
if not gpg_passphrase:
raise SaltInvocationError('gpg_passphrase not available in pillar.')
gpg_passphrase = gpg_passphrase['gpg_passphrase']
else:
gpg_passphrase = None
if text:
result = gpg.decrypt(text, passphrase=gpg_passphrase)
elif filename:
with salt.utils.files.flopen(filename, 'rb') as _fp:
if output:
result = gpg.decrypt_file(_fp, passphrase=gpg_passphrase, output=output)
else:
result = gpg.decrypt_file(_fp, passphrase=gpg_passphrase)
else:
raise SaltInvocationError('filename or text must be passed.')
if result.ok:
if not bare:
if output:
ret['comment'] = 'Decrypted data has been written to {0}'.format(output)
else:
ret['comment'] = result.data
else:
ret = result.data
else:
if not bare:
ret['res'] = False
ret['comment'] = '{0}.\nPlease check the salt-minion log.'.format(result.status)
else:
ret = False
log.error(result.stderr)
return ret | Decrypt a message or file
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
text
The encrypted text to decrypt.
filename
The encrypted filename to decrypt.
output
The filename where the decrypted data will be written, default is standard out.
use_passphrase
Whether to use a passphrase with the signing key. Passphrase is received
from Pillar.
gnupghome
Specify the location where GPG keyring and related files are stored.
bare
If ``True``, return the (armored) decrypted block as a string without the
standard comment/res dict.
CLI Example:
.. code-block:: bash
salt '*' gpg.decrypt filename='/path/to/important.file.gpg'
salt '*' gpg.decrypt filename='/path/to/important.file.gpg' use_passphrase=True | Below is the the instruction that describes the task:
### Input:
Decrypt a message or file
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
text
The encrypted text to decrypt.
filename
The encrypted filename to decrypt.
output
The filename where the decrypted data will be written, default is standard out.
use_passphrase
Whether to use a passphrase with the signing key. Passphrase is received
from Pillar.
gnupghome
Specify the location where GPG keyring and related files are stored.
bare
If ``True``, return the (armored) decrypted block as a string without the
standard comment/res dict.
CLI Example:
.. code-block:: bash
salt '*' gpg.decrypt filename='/path/to/important.file.gpg'
salt '*' gpg.decrypt filename='/path/to/important.file.gpg' use_passphrase=True
### Response:
def decrypt(user=None,
text=None,
filename=None,
output=None,
use_passphrase=False,
gnupghome=None,
bare=False):
'''
Decrypt a message or file
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
text
The encrypted text to decrypt.
filename
The encrypted filename to decrypt.
output
The filename where the decrypted data will be written, default is standard out.
use_passphrase
Whether to use a passphrase with the signing key. Passphrase is received
from Pillar.
gnupghome
Specify the location where GPG keyring and related files are stored.
bare
If ``True``, return the (armored) decrypted block as a string without the
standard comment/res dict.
CLI Example:
.. code-block:: bash
salt '*' gpg.decrypt filename='/path/to/important.file.gpg'
salt '*' gpg.decrypt filename='/path/to/important.file.gpg' use_passphrase=True
'''
ret = {
'res': True,
'comment': ''
}
gpg = _create_gpg(user, gnupghome)
if use_passphrase:
gpg_passphrase = __salt__['pillar.get']('gpg_passphrase')
if not gpg_passphrase:
raise SaltInvocationError('gpg_passphrase not available in pillar.')
gpg_passphrase = gpg_passphrase['gpg_passphrase']
else:
gpg_passphrase = None
if text:
result = gpg.decrypt(text, passphrase=gpg_passphrase)
elif filename:
with salt.utils.files.flopen(filename, 'rb') as _fp:
if output:
result = gpg.decrypt_file(_fp, passphrase=gpg_passphrase, output=output)
else:
result = gpg.decrypt_file(_fp, passphrase=gpg_passphrase)
else:
raise SaltInvocationError('filename or text must be passed.')
if result.ok:
if not bare:
if output:
ret['comment'] = 'Decrypted data has been written to {0}'.format(output)
else:
ret['comment'] = result.data
else:
ret = result.data
else:
if not bare:
ret['res'] = False
ret['comment'] = '{0}.\nPlease check the salt-minion log.'.format(result.status)
else:
ret = False
log.error(result.stderr)
return ret |
def p_common_scalar_magic_dir(p):
'common_scalar : DIR'
value = getattr(p.lexer, 'filename', None)
if value is not None:
value = os.path.dirname(value)
p[0] = ast.MagicConstant(p[1].upper(), value, lineno=p.lineno(1)) | common_scalar : DIR | Below is the the instruction that describes the task:
### Input:
common_scalar : DIR
### Response:
def p_common_scalar_magic_dir(p):
'common_scalar : DIR'
value = getattr(p.lexer, 'filename', None)
if value is not None:
value = os.path.dirname(value)
p[0] = ast.MagicConstant(p[1].upper(), value, lineno=p.lineno(1)) |
def _getCosimulation(self, func, **kwargs):
''' Returns a co-simulation instance of func.
Uses the _simulator specified by self._simulator.
Enables traces if self._trace is True
func - MyHDL function to be simulated
kwargs - dict of func interface assignments: for signals and parameters
'''
vals = {}
vals['topname'] = func.func_name
vals['unitname'] = func.func_name.lower()
hdlsim = self._simulator
if not hdlsim:
raise ValueError("No _simulator specified")
if not self.sim_reg.has_key(hdlsim):
raise ValueError("Simulator {} is not registered".format(hdlsim))
hdl, analyze_cmd, elaborate_cmd, simulate_cmd = self.sim_reg[hdlsim]
# Convert to HDL
if hdl == "verilog":
toVerilog(func, **kwargs)
if self._trace:
self._enableTracesVerilog("./tb_{topname}.v".format(**vals))
elif hdl == "vhdl":
toVHDL(func, **kwargs)
# Analyze HDL
os.system(analyze_cmd.format(**vals))
# Elaborate
if elaborate_cmd:
os.system(elaborate_cmd.format(**vals))
# Simulate
return Cosimulation(simulate_cmd.format(**vals), **kwargs) | Returns a co-simulation instance of func.
Uses the _simulator specified by self._simulator.
Enables traces if self._trace is True
func - MyHDL function to be simulated
kwargs - dict of func interface assignments: for signals and parameters | Below is the the instruction that describes the task:
### Input:
Returns a co-simulation instance of func.
Uses the _simulator specified by self._simulator.
Enables traces if self._trace is True
func - MyHDL function to be simulated
kwargs - dict of func interface assignments: for signals and parameters
### Response:
def _getCosimulation(self, func, **kwargs):
''' Returns a co-simulation instance of func.
Uses the _simulator specified by self._simulator.
Enables traces if self._trace is True
func - MyHDL function to be simulated
kwargs - dict of func interface assignments: for signals and parameters
'''
vals = {}
vals['topname'] = func.func_name
vals['unitname'] = func.func_name.lower()
hdlsim = self._simulator
if not hdlsim:
raise ValueError("No _simulator specified")
if not self.sim_reg.has_key(hdlsim):
raise ValueError("Simulator {} is not registered".format(hdlsim))
hdl, analyze_cmd, elaborate_cmd, simulate_cmd = self.sim_reg[hdlsim]
# Convert to HDL
if hdl == "verilog":
toVerilog(func, **kwargs)
if self._trace:
self._enableTracesVerilog("./tb_{topname}.v".format(**vals))
elif hdl == "vhdl":
toVHDL(func, **kwargs)
# Analyze HDL
os.system(analyze_cmd.format(**vals))
# Elaborate
if elaborate_cmd:
os.system(elaborate_cmd.format(**vals))
# Simulate
return Cosimulation(simulate_cmd.format(**vals), **kwargs) |
def create(self, body=values.unset, media_url=values.unset):
"""
Create a new MessageInteractionInstance
:param unicode body: Message body
:param unicode media_url: Reserved
:returns: Newly created MessageInteractionInstance
:rtype: twilio.rest.proxy.v1.service.session.participant.message_interaction.MessageInteractionInstance
"""
data = values.of({'Body': body, 'MediaUrl': serialize.map(media_url, lambda e: e), })
payload = self._version.create(
'POST',
self._uri,
data=data,
)
return MessageInteractionInstance(
self._version,
payload,
service_sid=self._solution['service_sid'],
session_sid=self._solution['session_sid'],
participant_sid=self._solution['participant_sid'],
) | Create a new MessageInteractionInstance
:param unicode body: Message body
:param unicode media_url: Reserved
:returns: Newly created MessageInteractionInstance
:rtype: twilio.rest.proxy.v1.service.session.participant.message_interaction.MessageInteractionInstance | Below is the the instruction that describes the task:
### Input:
Create a new MessageInteractionInstance
:param unicode body: Message body
:param unicode media_url: Reserved
:returns: Newly created MessageInteractionInstance
:rtype: twilio.rest.proxy.v1.service.session.participant.message_interaction.MessageInteractionInstance
### Response:
def create(self, body=values.unset, media_url=values.unset):
"""
Create a new MessageInteractionInstance
:param unicode body: Message body
:param unicode media_url: Reserved
:returns: Newly created MessageInteractionInstance
:rtype: twilio.rest.proxy.v1.service.session.participant.message_interaction.MessageInteractionInstance
"""
data = values.of({'Body': body, 'MediaUrl': serialize.map(media_url, lambda e: e), })
payload = self._version.create(
'POST',
self._uri,
data=data,
)
return MessageInteractionInstance(
self._version,
payload,
service_sid=self._solution['service_sid'],
session_sid=self._solution['session_sid'],
participant_sid=self._solution['participant_sid'],
) |
def merge_pres_feats(pres, features):
"""
Helper function to merge pres and features to support legacy features argument
"""
sub = []
for psub, fsub in zip(pres, features):
exp = []
for pexp, fexp in zip(psub, fsub):
lst = []
for p, f in zip(pexp, fexp):
p.update(f)
lst.append(p)
exp.append(lst)
sub.append(exp)
return sub | Helper function to merge pres and features to support legacy features argument | Below is the the instruction that describes the task:
### Input:
Helper function to merge pres and features to support legacy features argument
### Response:
def merge_pres_feats(pres, features):
"""
Helper function to merge pres and features to support legacy features argument
"""
sub = []
for psub, fsub in zip(pres, features):
exp = []
for pexp, fexp in zip(psub, fsub):
lst = []
for p, f in zip(pexp, fexp):
p.update(f)
lst.append(p)
exp.append(lst)
sub.append(exp)
return sub |
def reverse_reference(self):
'''Changes the coordinates as if the reference sequence has been reverse complemented'''
self.ref_start = self.ref_length - self.ref_start - 1
self.ref_end = self.ref_length - self.ref_end - 1 | Changes the coordinates as if the reference sequence has been reverse complemented | Below is the the instruction that describes the task:
### Input:
Changes the coordinates as if the reference sequence has been reverse complemented
### Response:
def reverse_reference(self):
'''Changes the coordinates as if the reference sequence has been reverse complemented'''
self.ref_start = self.ref_length - self.ref_start - 1
self.ref_end = self.ref_length - self.ref_end - 1 |
def apdu_contents(self, use_dict=None, as_class=dict):
"""Return the contents of an object as a dict."""
if _debug: APCISequence._debug("apdu_contents use_dict=%r as_class=%r", use_dict, as_class)
# make/extend the dictionary of content
if use_dict is None:
use_dict = as_class()
# set the function based on the class name
use_dict.__setitem__('function', self.__class__.__name__)
# fill in from the sequence contents
Sequence.dict_contents(self, use_dict=use_dict, as_class=as_class)
# return what we built/updated
return use_dict | Return the contents of an object as a dict. | Below is the the instruction that describes the task:
### Input:
Return the contents of an object as a dict.
### Response:
def apdu_contents(self, use_dict=None, as_class=dict):
"""Return the contents of an object as a dict."""
if _debug: APCISequence._debug("apdu_contents use_dict=%r as_class=%r", use_dict, as_class)
# make/extend the dictionary of content
if use_dict is None:
use_dict = as_class()
# set the function based on the class name
use_dict.__setitem__('function', self.__class__.__name__)
# fill in from the sequence contents
Sequence.dict_contents(self, use_dict=use_dict, as_class=as_class)
# return what we built/updated
return use_dict |
def pack(self):
""" Pack message to binary stream. """
payload = io.BytesIO()
# Advance num bytes equal to header size - the header is written later
# after the payload of all segments and parts has been written:
payload.seek(self.header_size, io.SEEK_CUR)
# Write out payload of segments and parts:
self.build_payload(payload)
packet_length = len(payload.getvalue()) - self.header_size
self.header = MessageHeader(self.session_id, self.packet_count, packet_length, constants.MAX_SEGMENT_SIZE,
num_segments=len(self.segments), packet_options=0)
packed_header = self.header_struct.pack(*self.header)
# Go back to begining of payload for writing message header:
payload.seek(0)
payload.write(packed_header)
payload.seek(0, io.SEEK_END)
trace(self)
return payload | Pack message to binary stream. | Below is the the instruction that describes the task:
### Input:
Pack message to binary stream.
### Response:
def pack(self):
""" Pack message to binary stream. """
payload = io.BytesIO()
# Advance num bytes equal to header size - the header is written later
# after the payload of all segments and parts has been written:
payload.seek(self.header_size, io.SEEK_CUR)
# Write out payload of segments and parts:
self.build_payload(payload)
packet_length = len(payload.getvalue()) - self.header_size
self.header = MessageHeader(self.session_id, self.packet_count, packet_length, constants.MAX_SEGMENT_SIZE,
num_segments=len(self.segments), packet_options=0)
packed_header = self.header_struct.pack(*self.header)
# Go back to begining of payload for writing message header:
payload.seek(0)
payload.write(packed_header)
payload.seek(0, io.SEEK_END)
trace(self)
return payload |
def TDL(AN, BN):
"""
Helper function for MT2Plane.
Adapted from MATLAB script
`bb.m <http://www.ceri.memphis.edu/people/olboyd/Software/Software.html>`_
written by Andy Michael and Oliver Boyd.
"""
XN = AN[0]
YN = AN[1]
ZN = AN[2]
XE = BN[0]
YE = BN[1]
ZE = BN[2]
AAA = 1.0 / (1000000)
CON = 57.2957795
if np.fabs(ZN) < AAA:
FD = 90.
AXN = np.fabs(XN)
if AXN > 1.0:
AXN = 1.0
FT = np.arcsin(AXN) * CON
ST = -XN
CT = YN
if ST >= 0. and CT < 0:
FT = 180. - FT
if ST < 0. and CT <= 0:
FT = 180. + FT
if ST < 0. and CT > 0:
FT = 360. - FT
FL = np.arcsin(abs(ZE)) * CON
SL = -ZE
if np.fabs(XN) < AAA:
CL = XE / YN
else:
CL = -YE / XN
if SL >= 0. and CL < 0:
FL = 180. - FL
if SL < 0. and CL <= 0:
FL = FL - 180.
if SL < 0. and CL > 0:
FL = -FL
else:
if - ZN > 1.0:
ZN = -1.0
FDH = np.arccos(-ZN)
FD = FDH * CON
SD = np.sin(FDH)
if SD == 0:
return
ST = -XN / SD
CT = YN / SD
SX = np.fabs(ST)
if SX > 1.0:
SX = 1.0
FT = np.arcsin(SX) * CON
if ST >= 0. and CT < 0:
FT = 180. - FT
if ST < 0. and CT <= 0:
FT = 180. + FT
if ST < 0. and CT > 0:
FT = 360. - FT
SL = -ZE / SD
SX = np.fabs(SL)
if SX > 1.0:
SX = 1.0
FL = np.arcsin(SX) * CON
if ST == 0:
CL = XE / CT
else:
XXX = YN * ZN * ZE / SD / SD + YE
CL = -SD * XXX / XN
if CT == 0:
CL = YE / ST
if SL >= 0. and CL < 0:
FL = 180. - FL
if SL < 0. and CL <= 0:
FL = FL - 180.
if SL < 0. and CL > 0:
FL = -FL
return (FT, FD, FL) | Helper function for MT2Plane.
Adapted from MATLAB script
`bb.m <http://www.ceri.memphis.edu/people/olboyd/Software/Software.html>`_
written by Andy Michael and Oliver Boyd. | Below is the the instruction that describes the task:
### Input:
Helper function for MT2Plane.
Adapted from MATLAB script
`bb.m <http://www.ceri.memphis.edu/people/olboyd/Software/Software.html>`_
written by Andy Michael and Oliver Boyd.
### Response:
def TDL(AN, BN):
"""
Helper function for MT2Plane.
Adapted from MATLAB script
`bb.m <http://www.ceri.memphis.edu/people/olboyd/Software/Software.html>`_
written by Andy Michael and Oliver Boyd.
"""
XN = AN[0]
YN = AN[1]
ZN = AN[2]
XE = BN[0]
YE = BN[1]
ZE = BN[2]
AAA = 1.0 / (1000000)
CON = 57.2957795
if np.fabs(ZN) < AAA:
FD = 90.
AXN = np.fabs(XN)
if AXN > 1.0:
AXN = 1.0
FT = np.arcsin(AXN) * CON
ST = -XN
CT = YN
if ST >= 0. and CT < 0:
FT = 180. - FT
if ST < 0. and CT <= 0:
FT = 180. + FT
if ST < 0. and CT > 0:
FT = 360. - FT
FL = np.arcsin(abs(ZE)) * CON
SL = -ZE
if np.fabs(XN) < AAA:
CL = XE / YN
else:
CL = -YE / XN
if SL >= 0. and CL < 0:
FL = 180. - FL
if SL < 0. and CL <= 0:
FL = FL - 180.
if SL < 0. and CL > 0:
FL = -FL
else:
if - ZN > 1.0:
ZN = -1.0
FDH = np.arccos(-ZN)
FD = FDH * CON
SD = np.sin(FDH)
if SD == 0:
return
ST = -XN / SD
CT = YN / SD
SX = np.fabs(ST)
if SX > 1.0:
SX = 1.0
FT = np.arcsin(SX) * CON
if ST >= 0. and CT < 0:
FT = 180. - FT
if ST < 0. and CT <= 0:
FT = 180. + FT
if ST < 0. and CT > 0:
FT = 360. - FT
SL = -ZE / SD
SX = np.fabs(SL)
if SX > 1.0:
SX = 1.0
FL = np.arcsin(SX) * CON
if ST == 0:
CL = XE / CT
else:
XXX = YN * ZN * ZE / SD / SD + YE
CL = -SD * XXX / XN
if CT == 0:
CL = YE / ST
if SL >= 0. and CL < 0:
FL = 180. - FL
if SL < 0. and CL <= 0:
FL = FL - 180.
if SL < 0. and CL > 0:
FL = -FL
return (FT, FD, FL) |
def get_full_psd_matrix(self):
"""Function that returns the tf graph corresponding to the entire matrix M.
Returns:
matrix_h: unrolled version of tf matrix corresponding to H
matrix_m: unrolled tf matrix corresponding to M
"""
if self.matrix_m is not None:
return self.matrix_h, self.matrix_m
# Computing the matrix term
h_columns = []
for i in range(self.nn_params.num_hidden_layers + 1):
current_col_elems = []
for j in range(i):
current_col_elems.append(
tf.zeros([self.nn_params.sizes[j], self.nn_params.sizes[i]]))
# For the first layer, there is no relu constraint
if i == 0:
current_col_elems.append(utils.diag(self.lambda_lu[i]))
else:
current_col_elems.append(
utils.diag(self.lambda_lu[i] + self.lambda_quad[i]))
if i < self.nn_params.num_hidden_layers:
current_col_elems.append(tf.matmul(
utils.diag(-1 * self.lambda_quad[i + 1]),
self.nn_params.weights[i]))
for j in range(i + 2, self.nn_params.num_hidden_layers + 1):
current_col_elems.append(
tf.zeros([self.nn_params.sizes[j], self.nn_params.sizes[i]]))
current_column = tf.concat(current_col_elems, 0)
h_columns.append(current_column)
self.matrix_h = tf.concat(h_columns, 1)
self.matrix_h = (self.matrix_h + tf.transpose(self.matrix_h))
self.matrix_m = tf.concat(
[
tf.concat([tf.reshape(self.nu, (1, 1)), tf.transpose(self.vector_g)], axis=1),
tf.concat([self.vector_g, self.matrix_h], axis=1)
],
axis=0)
return self.matrix_h, self.matrix_m | Function that returns the tf graph corresponding to the entire matrix M.
Returns:
matrix_h: unrolled version of tf matrix corresponding to H
matrix_m: unrolled tf matrix corresponding to M | Below is the the instruction that describes the task:
### Input:
Function that returns the tf graph corresponding to the entire matrix M.
Returns:
matrix_h: unrolled version of tf matrix corresponding to H
matrix_m: unrolled tf matrix corresponding to M
### Response:
def get_full_psd_matrix(self):
"""Function that returns the tf graph corresponding to the entire matrix M.
Returns:
matrix_h: unrolled version of tf matrix corresponding to H
matrix_m: unrolled tf matrix corresponding to M
"""
if self.matrix_m is not None:
return self.matrix_h, self.matrix_m
# Computing the matrix term
h_columns = []
for i in range(self.nn_params.num_hidden_layers + 1):
current_col_elems = []
for j in range(i):
current_col_elems.append(
tf.zeros([self.nn_params.sizes[j], self.nn_params.sizes[i]]))
# For the first layer, there is no relu constraint
if i == 0:
current_col_elems.append(utils.diag(self.lambda_lu[i]))
else:
current_col_elems.append(
utils.diag(self.lambda_lu[i] + self.lambda_quad[i]))
if i < self.nn_params.num_hidden_layers:
current_col_elems.append(tf.matmul(
utils.diag(-1 * self.lambda_quad[i + 1]),
self.nn_params.weights[i]))
for j in range(i + 2, self.nn_params.num_hidden_layers + 1):
current_col_elems.append(
tf.zeros([self.nn_params.sizes[j], self.nn_params.sizes[i]]))
current_column = tf.concat(current_col_elems, 0)
h_columns.append(current_column)
self.matrix_h = tf.concat(h_columns, 1)
self.matrix_h = (self.matrix_h + tf.transpose(self.matrix_h))
self.matrix_m = tf.concat(
[
tf.concat([tf.reshape(self.nu, (1, 1)), tf.transpose(self.vector_g)], axis=1),
tf.concat([self.vector_g, self.matrix_h], axis=1)
],
axis=0)
return self.matrix_h, self.matrix_m |
def receive(self):
"""
Read and return a message from the stream. If `None` is returned, then
the socket is considered closed/errored.
"""
if self._closed:
raise WebSocketError("Connection is already closed")
try:
return self.read_message()
except UnicodeError as e:
logger.info('websocket.receive: UnicodeError {}'.format(e))
self.close(1007)
except WebSocketError as e:
logger.info('websocket.receive: WebSocketError {}'.format(e))
self.close(1002)
except Exception as e:
logger.info('websocket.receive: Unknown error {}'.format(e))
raise e | Read and return a message from the stream. If `None` is returned, then
the socket is considered closed/errored. | Below is the the instruction that describes the task:
### Input:
Read and return a message from the stream. If `None` is returned, then
the socket is considered closed/errored.
### Response:
def receive(self):
"""
Read and return a message from the stream. If `None` is returned, then
the socket is considered closed/errored.
"""
if self._closed:
raise WebSocketError("Connection is already closed")
try:
return self.read_message()
except UnicodeError as e:
logger.info('websocket.receive: UnicodeError {}'.format(e))
self.close(1007)
except WebSocketError as e:
logger.info('websocket.receive: WebSocketError {}'.format(e))
self.close(1002)
except Exception as e:
logger.info('websocket.receive: Unknown error {}'.format(e))
raise e |
def _build_ssh_client(self):
"""Prepare for Paramiko SSH connection."""
# Create instance of SSHClient object
remote_conn_pre = paramiko.SSHClient()
# Load host_keys for better SSH security
if self.system_host_keys:
remote_conn_pre.load_system_host_keys()
if self.alt_host_keys and path.isfile(self.alt_key_file):
remote_conn_pre.load_host_keys(self.alt_key_file)
# Default is to automatically add untrusted hosts (make sure appropriate for your env)
remote_conn_pre.set_missing_host_key_policy(self.key_policy)
return remote_conn_pre | Prepare for Paramiko SSH connection. | Below is the the instruction that describes the task:
### Input:
Prepare for Paramiko SSH connection.
### Response:
def _build_ssh_client(self):
"""Prepare for Paramiko SSH connection."""
# Create instance of SSHClient object
remote_conn_pre = paramiko.SSHClient()
# Load host_keys for better SSH security
if self.system_host_keys:
remote_conn_pre.load_system_host_keys()
if self.alt_host_keys and path.isfile(self.alt_key_file):
remote_conn_pre.load_host_keys(self.alt_key_file)
# Default is to automatically add untrusted hosts (make sure appropriate for your env)
remote_conn_pre.set_missing_host_key_policy(self.key_policy)
return remote_conn_pre |
async def digital_read(self, command):
"""
This method reads and returns the last reported value for a digital pin.
Normally not used since digital pin updates will be provided automatically
as they occur with the digital_message_reply being sent to the client after set_pin_mode is called..
(see enable_digital_reporting for message format)
:param command: {"method": "digital_read", "params": [PIN]}
:returns: {"method": "digital_read_reply", "params": [PIN, DIGITAL_DATA_VALUE]}
"""
pin = int(command[0])
data_val = await self.core.digital_read(pin)
reply = json.dumps({"method": "digital_read_reply", "params": [pin, data_val]})
await self.websocket.send(reply) | This method reads and returns the last reported value for a digital pin.
Normally not used since digital pin updates will be provided automatically
as they occur with the digital_message_reply being sent to the client after set_pin_mode is called..
(see enable_digital_reporting for message format)
:param command: {"method": "digital_read", "params": [PIN]}
:returns: {"method": "digital_read_reply", "params": [PIN, DIGITAL_DATA_VALUE]} | Below is the the instruction that describes the task:
### Input:
This method reads and returns the last reported value for a digital pin.
Normally not used since digital pin updates will be provided automatically
as they occur with the digital_message_reply being sent to the client after set_pin_mode is called..
(see enable_digital_reporting for message format)
:param command: {"method": "digital_read", "params": [PIN]}
:returns: {"method": "digital_read_reply", "params": [PIN, DIGITAL_DATA_VALUE]}
### Response:
async def digital_read(self, command):
"""
This method reads and returns the last reported value for a digital pin.
Normally not used since digital pin updates will be provided automatically
as they occur with the digital_message_reply being sent to the client after set_pin_mode is called..
(see enable_digital_reporting for message format)
:param command: {"method": "digital_read", "params": [PIN]}
:returns: {"method": "digital_read_reply", "params": [PIN, DIGITAL_DATA_VALUE]}
"""
pin = int(command[0])
data_val = await self.core.digital_read(pin)
reply = json.dumps({"method": "digital_read_reply", "params": [pin, data_val]})
await self.websocket.send(reply) |
def dump_index(args, idx):
"""Create a metatab file for the index"""
import csv
import sys
from metatab import MetatabDoc
doc = MetatabDoc()
pack_section = doc.new_section('Packages', ['Identifier', 'Name', 'Nvname', 'Version', 'Format'])
r = doc['Root']
r.new_term('Root.Title', 'Package Index')
for p in idx.list():
pack_section.new_term('Package',
p['url'],
identifier=p['ident'],
name=p['name'],
nvname=p['nvname'],
version=p['version'],
format=p['format'])
doc.write_csv(args.dump) | Create a metatab file for the index | Below is the the instruction that describes the task:
### Input:
Create a metatab file for the index
### Response:
def dump_index(args, idx):
"""Create a metatab file for the index"""
import csv
import sys
from metatab import MetatabDoc
doc = MetatabDoc()
pack_section = doc.new_section('Packages', ['Identifier', 'Name', 'Nvname', 'Version', 'Format'])
r = doc['Root']
r.new_term('Root.Title', 'Package Index')
for p in idx.list():
pack_section.new_term('Package',
p['url'],
identifier=p['ident'],
name=p['name'],
nvname=p['nvname'],
version=p['version'],
format=p['format'])
doc.write_csv(args.dump) |
def gen_tau(S, K, delta):
"""The Robust part of the RSD, we precompute an
array for speed
"""
pivot = floor(K/S)
return [S/K * 1/d for d in range(1, pivot)] \
+ [S/K * log(S/delta)] \
+ [0 for d in range(pivot, K)] | The Robust part of the RSD, we precompute an
array for speed | Below is the the instruction that describes the task:
### Input:
The Robust part of the RSD, we precompute an
array for speed
### Response:
def gen_tau(S, K, delta):
"""The Robust part of the RSD, we precompute an
array for speed
"""
pivot = floor(K/S)
return [S/K * 1/d for d in range(1, pivot)] \
+ [S/K * log(S/delta)] \
+ [0 for d in range(pivot, K)] |
def remove(self, force=False):
"""
Remove this volume.
Args:
force (bool): Force removal of volumes that were already removed
out of band by the volume driver plugin.
Raises:
:py:class:`docker.errors.APIError`
If volume failed to remove.
"""
return self.client.api.remove_volume(self.id, force=force) | Remove this volume.
Args:
force (bool): Force removal of volumes that were already removed
out of band by the volume driver plugin.
Raises:
:py:class:`docker.errors.APIError`
If volume failed to remove. | Below is the the instruction that describes the task:
### Input:
Remove this volume.
Args:
force (bool): Force removal of volumes that were already removed
out of band by the volume driver plugin.
Raises:
:py:class:`docker.errors.APIError`
If volume failed to remove.
### Response:
def remove(self, force=False):
"""
Remove this volume.
Args:
force (bool): Force removal of volumes that were already removed
out of band by the volume driver plugin.
Raises:
:py:class:`docker.errors.APIError`
If volume failed to remove.
"""
return self.client.api.remove_volume(self.id, force=force) |
def get_color_scheme(name):
"""Get syntax color scheme"""
color_scheme = {}
for key in sh.COLOR_SCHEME_KEYS:
color_scheme[key] = CONF.get("appearance", "%s/%s" % (name, key))
return color_scheme | Get syntax color scheme | Below is the the instruction that describes the task:
### Input:
Get syntax color scheme
### Response:
def get_color_scheme(name):
"""Get syntax color scheme"""
color_scheme = {}
for key in sh.COLOR_SCHEME_KEYS:
color_scheme[key] = CONF.get("appearance", "%s/%s" % (name, key))
return color_scheme |
async def delete(self):
"""Delete this Bcache."""
await self._handler.delete(
system_id=self.node.system_id, id=self.id) | Delete this Bcache. | Below is the the instruction that describes the task:
### Input:
Delete this Bcache.
### Response:
async def delete(self):
"""Delete this Bcache."""
await self._handler.delete(
system_id=self.node.system_id, id=self.id) |
def get_index_by_name(self, name):
"""Find the index number by block name"""
for i in range(self.n_blocks):
if self.get_block_name(i) == name:
return i
raise KeyError('Block name ({}) not found'.format(name)) | Find the index number by block name | Below is the the instruction that describes the task:
### Input:
Find the index number by block name
### Response:
def get_index_by_name(self, name):
"""Find the index number by block name"""
for i in range(self.n_blocks):
if self.get_block_name(i) == name:
return i
raise KeyError('Block name ({}) not found'.format(name)) |
def get_adapter(self, url):
"""
Returns the appropriate connection adapter for the given URL.
:rtype: requests.adapters.BaseAdapter
"""
for (prefix, adapter) in self.adapters.items():
if url.lower().startswith(prefix.lower()):
return adapter
# Nothing matches :-/
raise InvalidSchema("No connection adapters were found for '%s'" % url) | Returns the appropriate connection adapter for the given URL.
:rtype: requests.adapters.BaseAdapter | Below is the the instruction that describes the task:
### Input:
Returns the appropriate connection adapter for the given URL.
:rtype: requests.adapters.BaseAdapter
### Response:
def get_adapter(self, url):
"""
Returns the appropriate connection adapter for the given URL.
:rtype: requests.adapters.BaseAdapter
"""
for (prefix, adapter) in self.adapters.items():
if url.lower().startswith(prefix.lower()):
return adapter
# Nothing matches :-/
raise InvalidSchema("No connection adapters were found for '%s'" % url) |
def has(self, character):
'''
Get if character (or character code point) is contained by any range on
this range group.
:param character: character or unicode code point to look for
:type character: str or int
:returns: True if character is contained by any range, False otherwise
:rtype: bool
'''
if not self:
return False
character = character if isinstance(character, int) else ord(character)
last = self[-1][-1]
start, end = self[bisect.bisect_right(self, (character, last)) - 1]
return start <= character < end | Get if character (or character code point) is contained by any range on
this range group.
:param character: character or unicode code point to look for
:type character: str or int
:returns: True if character is contained by any range, False otherwise
:rtype: bool | Below is the the instruction that describes the task:
### Input:
Get if character (or character code point) is contained by any range on
this range group.
:param character: character or unicode code point to look for
:type character: str or int
:returns: True if character is contained by any range, False otherwise
:rtype: bool
### Response:
def has(self, character):
'''
Get if character (or character code point) is contained by any range on
this range group.
:param character: character or unicode code point to look for
:type character: str or int
:returns: True if character is contained by any range, False otherwise
:rtype: bool
'''
if not self:
return False
character = character if isinstance(character, int) else ord(character)
last = self[-1][-1]
start, end = self[bisect.bisect_right(self, (character, last)) - 1]
return start <= character < end |
def request(self, method, url, **kwargs):
"""
Overrides ``requests.Session.request`` to renew the cookie and then
retry the original request (if required).
"""
resp = super(CookieSession, self).request(method, url, **kwargs)
if not self._auto_renew:
return resp
is_expired = any((
resp.status_code == 403 and
response_to_json_dict(resp).get('error') == 'credentials_expired',
resp.status_code == 401
))
if is_expired:
self.login()
resp = super(CookieSession, self).request(method, url, **kwargs)
return resp | Overrides ``requests.Session.request`` to renew the cookie and then
retry the original request (if required). | Below is the the instruction that describes the task:
### Input:
Overrides ``requests.Session.request`` to renew the cookie and then
retry the original request (if required).
### Response:
def request(self, method, url, **kwargs):
"""
Overrides ``requests.Session.request`` to renew the cookie and then
retry the original request (if required).
"""
resp = super(CookieSession, self).request(method, url, **kwargs)
if not self._auto_renew:
return resp
is_expired = any((
resp.status_code == 403 and
response_to_json_dict(resp).get('error') == 'credentials_expired',
resp.status_code == 401
))
if is_expired:
self.login()
resp = super(CookieSession, self).request(method, url, **kwargs)
return resp |
def add_hookspecs(self, module_or_class):
""" add new hook specifications defined in the given module_or_class.
Functions are recognized if they have been decorated accordingly. """
names = []
for name in dir(module_or_class):
spec_opts = self.parse_hookspec_opts(module_or_class, name)
if spec_opts is not None:
hc = getattr(self.hook, name, None)
if hc is None:
hc = _HookCaller(name, self._hookexec, module_or_class, spec_opts)
setattr(self.hook, name, hc)
else:
# plugins registered this hook without knowing the spec
hc.set_specification(module_or_class, spec_opts)
for hookfunction in hc.get_hookimpls():
self._verify_hook(hc, hookfunction)
names.append(name)
if not names:
raise ValueError(
"did not find any %r hooks in %r" % (self.project_name, module_or_class)
) | add new hook specifications defined in the given module_or_class.
Functions are recognized if they have been decorated accordingly. | Below is the the instruction that describes the task:
### Input:
add new hook specifications defined in the given module_or_class.
Functions are recognized if they have been decorated accordingly.
### Response:
def add_hookspecs(self, module_or_class):
""" add new hook specifications defined in the given module_or_class.
Functions are recognized if they have been decorated accordingly. """
names = []
for name in dir(module_or_class):
spec_opts = self.parse_hookspec_opts(module_or_class, name)
if spec_opts is not None:
hc = getattr(self.hook, name, None)
if hc is None:
hc = _HookCaller(name, self._hookexec, module_or_class, spec_opts)
setattr(self.hook, name, hc)
else:
# plugins registered this hook without knowing the spec
hc.set_specification(module_or_class, spec_opts)
for hookfunction in hc.get_hookimpls():
self._verify_hook(hc, hookfunction)
names.append(name)
if not names:
raise ValueError(
"did not find any %r hooks in %r" % (self.project_name, module_or_class)
) |
def _list_to_complex_array(complex_list):
"""Convert nested list of shape (..., 2) to complex numpy array with shape (...)
Args:
complex_list (list): List to convert.
Returns:
np.ndarray: Complex numpy aray
Raises:
QiskitError: If inner most array of input nested list is not of length 2.
"""
arr = np.asarray(complex_list, dtype=np.complex_)
if not arr.shape[-1] == 2:
raise QiskitError('Inner most nested list is not of length 2.')
return arr[..., 0] + 1j*arr[..., 1] | Convert nested list of shape (..., 2) to complex numpy array with shape (...)
Args:
complex_list (list): List to convert.
Returns:
np.ndarray: Complex numpy aray
Raises:
QiskitError: If inner most array of input nested list is not of length 2. | Below is the the instruction that describes the task:
### Input:
Convert nested list of shape (..., 2) to complex numpy array with shape (...)
Args:
complex_list (list): List to convert.
Returns:
np.ndarray: Complex numpy aray
Raises:
QiskitError: If inner most array of input nested list is not of length 2.
### Response:
def _list_to_complex_array(complex_list):
"""Convert nested list of shape (..., 2) to complex numpy array with shape (...)
Args:
complex_list (list): List to convert.
Returns:
np.ndarray: Complex numpy aray
Raises:
QiskitError: If inner most array of input nested list is not of length 2.
"""
arr = np.asarray(complex_list, dtype=np.complex_)
if not arr.shape[-1] == 2:
raise QiskitError('Inner most nested list is not of length 2.')
return arr[..., 0] + 1j*arr[..., 1] |
def all_pods_are_ready(self, app_name):
"""
Check if all pods are ready for specific app
:param app_name: str, name of the app
:return: bool
"""
app_pod_exists = False
for pod in self.list_pods(namespace=self.project):
if app_name in pod.name and 'build' not in pod.name and 'deploy' not in pod.name:
app_pod_exists = True
if not pod.is_ready():
return False
if app_pod_exists:
logger.info("All pods are ready!")
return True
return False | Check if all pods are ready for specific app
:param app_name: str, name of the app
:return: bool | Below is the the instruction that describes the task:
### Input:
Check if all pods are ready for specific app
:param app_name: str, name of the app
:return: bool
### Response:
def all_pods_are_ready(self, app_name):
"""
Check if all pods are ready for specific app
:param app_name: str, name of the app
:return: bool
"""
app_pod_exists = False
for pod in self.list_pods(namespace=self.project):
if app_name in pod.name and 'build' not in pod.name and 'deploy' not in pod.name:
app_pod_exists = True
if not pod.is_ready():
return False
if app_pod_exists:
logger.info("All pods are ready!")
return True
return False |
def force_extrapolation(self):
"""Force the underlying model to extrapolate.
An example where this is useful: You create a source spectrum
with non-default extrapolation behavior and you wish to force
the underlying empirical model to extrapolate based on nearest point.
.. note::
This is only applicable to `~synphot.models.Empirical1D` model
and should still work even if the source spectrum has been
redshifted.
Returns
-------
is_forced : bool
`True` if the model is successfully forced to be extrapolated,
else `False`.
"""
# We use _model here in case the spectrum is redshifted.
if isinstance(self._model, Empirical1D):
self._model.fill_value = np.nan
is_forced = True
else:
is_forced = False
return is_forced | Force the underlying model to extrapolate.
An example where this is useful: You create a source spectrum
with non-default extrapolation behavior and you wish to force
the underlying empirical model to extrapolate based on nearest point.
.. note::
This is only applicable to `~synphot.models.Empirical1D` model
and should still work even if the source spectrum has been
redshifted.
Returns
-------
is_forced : bool
`True` if the model is successfully forced to be extrapolated,
else `False`. | Below is the the instruction that describes the task:
### Input:
Force the underlying model to extrapolate.
An example where this is useful: You create a source spectrum
with non-default extrapolation behavior and you wish to force
the underlying empirical model to extrapolate based on nearest point.
.. note::
This is only applicable to `~synphot.models.Empirical1D` model
and should still work even if the source spectrum has been
redshifted.
Returns
-------
is_forced : bool
`True` if the model is successfully forced to be extrapolated,
else `False`.
### Response:
def force_extrapolation(self):
"""Force the underlying model to extrapolate.
An example where this is useful: You create a source spectrum
with non-default extrapolation behavior and you wish to force
the underlying empirical model to extrapolate based on nearest point.
.. note::
This is only applicable to `~synphot.models.Empirical1D` model
and should still work even if the source spectrum has been
redshifted.
Returns
-------
is_forced : bool
`True` if the model is successfully forced to be extrapolated,
else `False`.
"""
# We use _model here in case the spectrum is redshifted.
if isinstance(self._model, Empirical1D):
self._model.fill_value = np.nan
is_forced = True
else:
is_forced = False
return is_forced |
def render(self, name, value, attrs=None):
"""
Render the ``icekit_events/recurrence_rule_widget/render.html``
template with the following context:
rendered_widgets
The rendered widgets.
id
The ``id`` attribute from the ``attrs`` keyword argument.
recurrence_rules
A JSON object mapping recurrence rules to their primary keys.
The default template adds JavaScript event handlers that update the
``Textarea`` and ``Select`` widgets when they are updated.
"""
rendered_widgets = super(RecurrenceRuleWidget, self).render(
name, value, attrs)
template = loader.get_template(
'icekit_events/recurrence_rule_widget/render.html')
recurrence_rules = json.dumps(dict(
self.queryset.values_list('pk', 'recurrence_rule')))
context = Context({
'rendered_widgets': rendered_widgets,
'id': attrs['id'],
'recurrence_rules': recurrence_rules,
})
return template.render(context) | Render the ``icekit_events/recurrence_rule_widget/render.html``
template with the following context:
rendered_widgets
The rendered widgets.
id
The ``id`` attribute from the ``attrs`` keyword argument.
recurrence_rules
A JSON object mapping recurrence rules to their primary keys.
The default template adds JavaScript event handlers that update the
``Textarea`` and ``Select`` widgets when they are updated. | Below is the the instruction that describes the task:
### Input:
Render the ``icekit_events/recurrence_rule_widget/render.html``
template with the following context:
rendered_widgets
The rendered widgets.
id
The ``id`` attribute from the ``attrs`` keyword argument.
recurrence_rules
A JSON object mapping recurrence rules to their primary keys.
The default template adds JavaScript event handlers that update the
``Textarea`` and ``Select`` widgets when they are updated.
### Response:
def render(self, name, value, attrs=None):
"""
Render the ``icekit_events/recurrence_rule_widget/render.html``
template with the following context:
rendered_widgets
The rendered widgets.
id
The ``id`` attribute from the ``attrs`` keyword argument.
recurrence_rules
A JSON object mapping recurrence rules to their primary keys.
The default template adds JavaScript event handlers that update the
``Textarea`` and ``Select`` widgets when they are updated.
"""
rendered_widgets = super(RecurrenceRuleWidget, self).render(
name, value, attrs)
template = loader.get_template(
'icekit_events/recurrence_rule_widget/render.html')
recurrence_rules = json.dumps(dict(
self.queryset.values_list('pk', 'recurrence_rule')))
context = Context({
'rendered_widgets': rendered_widgets,
'id': attrs['id'],
'recurrence_rules': recurrence_rules,
})
return template.render(context) |
def get_all_language_codes():
"""
Returns all language codes defined in settings.LANGUAGES and also the
settings.MSGID_LANGUAGE if defined.
>>> from django.conf import settings
>>> settings.MSGID_LANGUAGE = 'en-us'
>>> settings.LANGUAGES = (('en','English'),('de','German'),('nl-be','Belgium dutch'),('fr-be','Belgium french'),)
>>> sorted( get_language_codes() )
['en-us', 'en','de', 'nl-be','fr-be']
:rtype: A :class:`list` of language codes.
"""
languages = get_language_codes()
if hasattr(settings, 'MSGID_LANGUAGE'):
if not settings.MSGID_LANGUAGE in languages:
languages.insert(0, settings.MSGID_LANGUAGE)
return languages | Returns all language codes defined in settings.LANGUAGES and also the
settings.MSGID_LANGUAGE if defined.
>>> from django.conf import settings
>>> settings.MSGID_LANGUAGE = 'en-us'
>>> settings.LANGUAGES = (('en','English'),('de','German'),('nl-be','Belgium dutch'),('fr-be','Belgium french'),)
>>> sorted( get_language_codes() )
['en-us', 'en','de', 'nl-be','fr-be']
:rtype: A :class:`list` of language codes. | Below is the the instruction that describes the task:
### Input:
Returns all language codes defined in settings.LANGUAGES and also the
settings.MSGID_LANGUAGE if defined.
>>> from django.conf import settings
>>> settings.MSGID_LANGUAGE = 'en-us'
>>> settings.LANGUAGES = (('en','English'),('de','German'),('nl-be','Belgium dutch'),('fr-be','Belgium french'),)
>>> sorted( get_language_codes() )
['en-us', 'en','de', 'nl-be','fr-be']
:rtype: A :class:`list` of language codes.
### Response:
def get_all_language_codes():
"""
Returns all language codes defined in settings.LANGUAGES and also the
settings.MSGID_LANGUAGE if defined.
>>> from django.conf import settings
>>> settings.MSGID_LANGUAGE = 'en-us'
>>> settings.LANGUAGES = (('en','English'),('de','German'),('nl-be','Belgium dutch'),('fr-be','Belgium french'),)
>>> sorted( get_language_codes() )
['en-us', 'en','de', 'nl-be','fr-be']
:rtype: A :class:`list` of language codes.
"""
languages = get_language_codes()
if hasattr(settings, 'MSGID_LANGUAGE'):
if not settings.MSGID_LANGUAGE in languages:
languages.insert(0, settings.MSGID_LANGUAGE)
return languages |
def tick(self):
"""Return the time cost string as expect."""
string = self.passed
if self.rounding:
string = round(string)
if self.readable:
string = self.readable(string)
return string | Return the time cost string as expect. | Below is the the instruction that describes the task:
### Input:
Return the time cost string as expect.
### Response:
def tick(self):
"""Return the time cost string as expect."""
string = self.passed
if self.rounding:
string = round(string)
if self.readable:
string = self.readable(string)
return string |
def output_extras(self, output_file):
"""
Returns dict mapping local path to remote name.
"""
output_directory = dirname(output_file)
def local_path(name):
return join(output_directory, self.path_helper.local_name(name))
files_directory = "%s_files%s" % (basename(output_file)[0:-len(".dat")], self.path_helper.separator)
names = filter(lambda o: o.startswith(files_directory), self.output_directory_contents)
return dict(map(lambda name: (local_path(name), name), names)) | Returns dict mapping local path to remote name. | Below is the the instruction that describes the task:
### Input:
Returns dict mapping local path to remote name.
### Response:
def output_extras(self, output_file):
"""
Returns dict mapping local path to remote name.
"""
output_directory = dirname(output_file)
def local_path(name):
return join(output_directory, self.path_helper.local_name(name))
files_directory = "%s_files%s" % (basename(output_file)[0:-len(".dat")], self.path_helper.separator)
names = filter(lambda o: o.startswith(files_directory), self.output_directory_contents)
return dict(map(lambda name: (local_path(name), name), names)) |
def usage(asked_for=0):
'''Exit with a usage string, used for bad argument or with -h'''
exit = fsq.const('FSQ_SUCCESS') if asked_for else\
fsq.const('FSQ_FAIL_PERM')
f = sys.stdout if asked_for else sys.stderr
shout('{0} [opts] src_queue trg_queue host item_id [item_id [...]]'.format(
os.path.basename(_PROG)), f)
if asked_for:
shout('{0} [-p|--protocol=jsonrpc] [-L|--no-lock] [-t|--trigger] '\
'[-i|--ignore-listener] <proto>://<host>:<port>/url'\
.format(os.path.basename(_PROG)), f)
shout('{0} [-p|--protocol=jsonrpc] [-L|--no-lock] [-t|--trigger]'\
'[-i|--ignore-listener] unix://var/sock/foo.sock'\
.format(os.path.basename(_PROG)), f)
shout(' src_queue trg_queue host_queue item [item [...]]', f)
return exit | Exit with a usage string, used for bad argument or with -h | Below is the the instruction that describes the task:
### Input:
Exit with a usage string, used for bad argument or with -h
### Response:
def usage(asked_for=0):
'''Exit with a usage string, used for bad argument or with -h'''
exit = fsq.const('FSQ_SUCCESS') if asked_for else\
fsq.const('FSQ_FAIL_PERM')
f = sys.stdout if asked_for else sys.stderr
shout('{0} [opts] src_queue trg_queue host item_id [item_id [...]]'.format(
os.path.basename(_PROG)), f)
if asked_for:
shout('{0} [-p|--protocol=jsonrpc] [-L|--no-lock] [-t|--trigger] '\
'[-i|--ignore-listener] <proto>://<host>:<port>/url'\
.format(os.path.basename(_PROG)), f)
shout('{0} [-p|--protocol=jsonrpc] [-L|--no-lock] [-t|--trigger]'\
'[-i|--ignore-listener] unix://var/sock/foo.sock'\
.format(os.path.basename(_PROG)), f)
shout(' src_queue trg_queue host_queue item [item [...]]', f)
return exit |
def get_all_parent_edges(self):
"""Return tuples for all parent GO IDs, containing current GO ID and parent GO ID."""
all_parent_edges = set()
for parent in self.parents:
all_parent_edges.add((self.item_id, parent.item_id))
all_parent_edges |= parent.get_all_parent_edges()
return all_parent_edges | Return tuples for all parent GO IDs, containing current GO ID and parent GO ID. | Below is the the instruction that describes the task:
### Input:
Return tuples for all parent GO IDs, containing current GO ID and parent GO ID.
### Response:
def get_all_parent_edges(self):
"""Return tuples for all parent GO IDs, containing current GO ID and parent GO ID."""
all_parent_edges = set()
for parent in self.parents:
all_parent_edges.add((self.item_id, parent.item_id))
all_parent_edges |= parent.get_all_parent_edges()
return all_parent_edges |
def committer(cls, config_reader=None):
"""
:return: Actor instance corresponding to the configured committer. It behaves
similar to the git implementation, such that the environment will override
configuration values of config_reader. If no value is set at all, it will be
generated
:param config_reader: ConfigReader to use to retrieve the values from in case
they are not set in the environment"""
return cls._main_actor(cls.env_committer_name, cls.env_committer_email, config_reader) | :return: Actor instance corresponding to the configured committer. It behaves
similar to the git implementation, such that the environment will override
configuration values of config_reader. If no value is set at all, it will be
generated
:param config_reader: ConfigReader to use to retrieve the values from in case
they are not set in the environment | Below is the the instruction that describes the task:
### Input:
:return: Actor instance corresponding to the configured committer. It behaves
similar to the git implementation, such that the environment will override
configuration values of config_reader. If no value is set at all, it will be
generated
:param config_reader: ConfigReader to use to retrieve the values from in case
they are not set in the environment
### Response:
def committer(cls, config_reader=None):
"""
:return: Actor instance corresponding to the configured committer. It behaves
similar to the git implementation, such that the environment will override
configuration values of config_reader. If no value is set at all, it will be
generated
:param config_reader: ConfigReader to use to retrieve the values from in case
they are not set in the environment"""
return cls._main_actor(cls.env_committer_name, cls.env_committer_email, config_reader) |
def _check_release_cmp(name):
'''
Helper function to compare release codename versions to the minion's current
Salt version.
If release codename isn't found, the function returns None. Otherwise, it
returns the results of the version comparison as documented by the
``versions_cmp`` function in ``salt.utils.versions.py``.
'''
map_version = get_release_number(name)
if map_version is None:
log.info('Release codename %s was not found.', name)
return None
current_version = six.text_type(salt.version.SaltStackVersion(
*salt.version.__version_info__))
current_version = current_version.rsplit('.', 1)[0]
version_cmp = salt.utils.versions.version_cmp(map_version, current_version)
return version_cmp | Helper function to compare release codename versions to the minion's current
Salt version.
If release codename isn't found, the function returns None. Otherwise, it
returns the results of the version comparison as documented by the
``versions_cmp`` function in ``salt.utils.versions.py``. | Below is the the instruction that describes the task:
### Input:
Helper function to compare release codename versions to the minion's current
Salt version.
If release codename isn't found, the function returns None. Otherwise, it
returns the results of the version comparison as documented by the
``versions_cmp`` function in ``salt.utils.versions.py``.
### Response:
def _check_release_cmp(name):
'''
Helper function to compare release codename versions to the minion's current
Salt version.
If release codename isn't found, the function returns None. Otherwise, it
returns the results of the version comparison as documented by the
``versions_cmp`` function in ``salt.utils.versions.py``.
'''
map_version = get_release_number(name)
if map_version is None:
log.info('Release codename %s was not found.', name)
return None
current_version = six.text_type(salt.version.SaltStackVersion(
*salt.version.__version_info__))
current_version = current_version.rsplit('.', 1)[0]
version_cmp = salt.utils.versions.version_cmp(map_version, current_version)
return version_cmp |
def reset(self):
'''
Reset the state of the container and its internal fields
'''
super(TakeFrom, self).reset()
self.random.seed(self.seed * self.max_elements + self.min_elements) | Reset the state of the container and its internal fields | Below is the the instruction that describes the task:
### Input:
Reset the state of the container and its internal fields
### Response:
def reset(self):
'''
Reset the state of the container and its internal fields
'''
super(TakeFrom, self).reset()
self.random.seed(self.seed * self.max_elements + self.min_elements) |
def json_serializer(pid, data, *args):
"""Build a JSON Flask response using the given data.
:param pid: The `invenio_pidstore.models.PersistentIdentifier` of the
record.
:param data: The record metadata.
:returns: A Flask response with JSON data.
:rtype: :py:class:`flask.Response`.
"""
if data is not None:
response = Response(
json.dumps(data.dumps()),
mimetype='application/json'
)
else:
response = Response(mimetype='application/json')
return response | Build a JSON Flask response using the given data.
:param pid: The `invenio_pidstore.models.PersistentIdentifier` of the
record.
:param data: The record metadata.
:returns: A Flask response with JSON data.
:rtype: :py:class:`flask.Response`. | Below is the the instruction that describes the task:
### Input:
Build a JSON Flask response using the given data.
:param pid: The `invenio_pidstore.models.PersistentIdentifier` of the
record.
:param data: The record metadata.
:returns: A Flask response with JSON data.
:rtype: :py:class:`flask.Response`.
### Response:
def json_serializer(pid, data, *args):
"""Build a JSON Flask response using the given data.
:param pid: The `invenio_pidstore.models.PersistentIdentifier` of the
record.
:param data: The record metadata.
:returns: A Flask response with JSON data.
:rtype: :py:class:`flask.Response`.
"""
if data is not None:
response = Response(
json.dumps(data.dumps()),
mimetype='application/json'
)
else:
response = Response(mimetype='application/json')
return response |
def geturl(urllib2_resp):
"""
Use instead of urllib.addinfourl.geturl(), which appears to have
some issues with dropping the double slash for certain schemes
(e.g. file://). This implementation is probably over-eager, as it
always restores '://' if it is missing, and it appears some url
schemata aren't always followed by '//' after the colon, but as
far as I know pip doesn't need any of those.
The URI RFC can be found at: http://tools.ietf.org/html/rfc1630
This function assumes that
scheme:/foo/bar
is the same as
scheme:///foo/bar
"""
url = urllib2_resp.geturl()
scheme, rest = url.split(':', 1)
if rest.startswith('//'):
return url
else:
# FIXME: write a good test to cover it
return '%s://%s' % (scheme, rest) | Use instead of urllib.addinfourl.geturl(), which appears to have
some issues with dropping the double slash for certain schemes
(e.g. file://). This implementation is probably over-eager, as it
always restores '://' if it is missing, and it appears some url
schemata aren't always followed by '//' after the colon, but as
far as I know pip doesn't need any of those.
The URI RFC can be found at: http://tools.ietf.org/html/rfc1630
This function assumes that
scheme:/foo/bar
is the same as
scheme:///foo/bar | Below is the the instruction that describes the task:
### Input:
Use instead of urllib.addinfourl.geturl(), which appears to have
some issues with dropping the double slash for certain schemes
(e.g. file://). This implementation is probably over-eager, as it
always restores '://' if it is missing, and it appears some url
schemata aren't always followed by '//' after the colon, but as
far as I know pip doesn't need any of those.
The URI RFC can be found at: http://tools.ietf.org/html/rfc1630
This function assumes that
scheme:/foo/bar
is the same as
scheme:///foo/bar
### Response:
def geturl(urllib2_resp):
"""
Use instead of urllib.addinfourl.geturl(), which appears to have
some issues with dropping the double slash for certain schemes
(e.g. file://). This implementation is probably over-eager, as it
always restores '://' if it is missing, and it appears some url
schemata aren't always followed by '//' after the colon, but as
far as I know pip doesn't need any of those.
The URI RFC can be found at: http://tools.ietf.org/html/rfc1630
This function assumes that
scheme:/foo/bar
is the same as
scheme:///foo/bar
"""
url = urllib2_resp.geturl()
scheme, rest = url.split(':', 1)
if rest.startswith('//'):
return url
else:
# FIXME: write a good test to cover it
return '%s://%s' % (scheme, rest) |
def get_log_rhos(target_action_log_probs, behaviour_action_log_probs):
"""With the selected log_probs for multi-discrete actions of behaviour
and target policies we compute the log_rhos for calculating the vtrace."""
t = tf.stack(target_action_log_probs)
b = tf.stack(behaviour_action_log_probs)
log_rhos = tf.reduce_sum(t - b, axis=0)
return log_rhos | With the selected log_probs for multi-discrete actions of behaviour
and target policies we compute the log_rhos for calculating the vtrace. | Below is the the instruction that describes the task:
### Input:
With the selected log_probs for multi-discrete actions of behaviour
and target policies we compute the log_rhos for calculating the vtrace.
### Response:
def get_log_rhos(target_action_log_probs, behaviour_action_log_probs):
"""With the selected log_probs for multi-discrete actions of behaviour
and target policies we compute the log_rhos for calculating the vtrace."""
t = tf.stack(target_action_log_probs)
b = tf.stack(behaviour_action_log_probs)
log_rhos = tf.reduce_sum(t - b, axis=0)
return log_rhos |
def local_renderer(self):
"""
Retrieves the cached local renderer.
"""
if not self._local_renderer:
r = self.create_local_renderer()
self._local_renderer = r
return self._local_renderer | Retrieves the cached local renderer. | Below is the the instruction that describes the task:
### Input:
Retrieves the cached local renderer.
### Response:
def local_renderer(self):
"""
Retrieves the cached local renderer.
"""
if not self._local_renderer:
r = self.create_local_renderer()
self._local_renderer = r
return self._local_renderer |
def sflow_profile_profile_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
sflow_profile = ET.SubElement(config, "sflow-profile", xmlns="urn:brocade.com:mgmt:brocade-sflow")
profile_name = ET.SubElement(sflow_profile, "profile-name")
profile_name.text = kwargs.pop('profile_name')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code | Below is the the instruction that describes the task:
### Input:
Auto Generated Code
### Response:
def sflow_profile_profile_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
sflow_profile = ET.SubElement(config, "sflow-profile", xmlns="urn:brocade.com:mgmt:brocade-sflow")
profile_name = ET.SubElement(sflow_profile, "profile-name")
profile_name.text = kwargs.pop('profile_name')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def gene_filter(self, query, mongo_query):
""" Adds gene-related filters to the query object
Args:
query(dict): a dictionary of query filters specified by the users
mongo_query(dict): the query that is going to be submitted to the database
Returns:
mongo_query(dict): returned object contains gene and panel-related filters
"""
LOG.debug('Adding panel and genes-related parameters to the query')
gene_query = []
if query.get('hgnc_symbols') and query.get('gene_panels'):
gene_query.append({'hgnc_symbols': {'$in': query['hgnc_symbols']}})
gene_query.append({'panels': {'$in': query['gene_panels']}})
mongo_query['$or']=gene_query
else:
if query.get('hgnc_symbols'):
hgnc_symbols = query['hgnc_symbols']
mongo_query['hgnc_symbols'] = {'$in': hgnc_symbols}
LOG.debug("Adding hgnc_symbols: %s to query" %
', '.join(hgnc_symbols))
if query.get('gene_panels'):
gene_panels = query['gene_panels']
mongo_query['panels'] = {'$in': gene_panels}
return gene_query | Adds gene-related filters to the query object
Args:
query(dict): a dictionary of query filters specified by the users
mongo_query(dict): the query that is going to be submitted to the database
Returns:
mongo_query(dict): returned object contains gene and panel-related filters | Below is the the instruction that describes the task:
### Input:
Adds gene-related filters to the query object
Args:
query(dict): a dictionary of query filters specified by the users
mongo_query(dict): the query that is going to be submitted to the database
Returns:
mongo_query(dict): returned object contains gene and panel-related filters
### Response:
def gene_filter(self, query, mongo_query):
""" Adds gene-related filters to the query object
Args:
query(dict): a dictionary of query filters specified by the users
mongo_query(dict): the query that is going to be submitted to the database
Returns:
mongo_query(dict): returned object contains gene and panel-related filters
"""
LOG.debug('Adding panel and genes-related parameters to the query')
gene_query = []
if query.get('hgnc_symbols') and query.get('gene_panels'):
gene_query.append({'hgnc_symbols': {'$in': query['hgnc_symbols']}})
gene_query.append({'panels': {'$in': query['gene_panels']}})
mongo_query['$or']=gene_query
else:
if query.get('hgnc_symbols'):
hgnc_symbols = query['hgnc_symbols']
mongo_query['hgnc_symbols'] = {'$in': hgnc_symbols}
LOG.debug("Adding hgnc_symbols: %s to query" %
', '.join(hgnc_symbols))
if query.get('gene_panels'):
gene_panels = query['gene_panels']
mongo_query['panels'] = {'$in': gene_panels}
return gene_query |
def describe(id_or_link, **kwargs):
'''
:param id_or_link: String containing an object ID or dict containing a DXLink,
or a list of object IDs or dicts containing a DXLink.
Given an object ID, calls :meth:`~dxpy.bindings.DXDataObject.describe` on the object.
Example::
describe("file-1234")
Given a list of object IDs, calls :meth:`~dxpy.api.system_describe_data_objects`.
Example::
describe(["file-1234", "workflow-5678"])
Note: If id_or_link is a list and **kwargs contains a "fields" parameter, these
fields will be returned in the response for each data object in addition to the
fields included by default. Additionally, describe options can be provided for
each data object class in the "classDescribeOptions" kwargs argument. See
https://wiki.dnanexus.com/API-Specification-v1.0.0/System-Methods#API-method:-/system/describeDataObjects
for input parameters used with the multiple object describe method.
'''
# If this is a list, extract the ids.
# TODO: modify the procedure to use project ID when possible
if isinstance(id_or_link, basestring) or is_dxlink(id_or_link):
handler = get_handler(id_or_link)
return handler.describe(**kwargs)
else:
links = []
for link in id_or_link:
# If this entry is a dxlink, then get the id.
if is_dxlink(link):
# Guaranteed by is_dxlink that one of the following will work
if isinstance(link['$dnanexus_link'], basestring):
link = link['$dnanexus_link']
else:
link = link['$dnanexus_link']['id']
links.append(link)
# Prepare input to system_describe_data_objects, the same fields will be passed
# for all data object classes; if a class doesn't include a field in its describe
# output, it will be ignored
describe_input = \
dict([(field, True) for field in kwargs['fields']]) if kwargs.get('fields', []) else True
describe_links_input = [{'id': link, 'describe': describe_input} for link in links]
bulk_describe_input = {'objects': describe_links_input}
if 'classDescribeOptions' in kwargs:
bulk_describe_input['classDescribeOptions'] = kwargs['classDescribeOptions']
data_object_descriptions = dxpy.api.system_describe_data_objects(bulk_describe_input)
return [desc['describe'] for desc in data_object_descriptions['results']] | :param id_or_link: String containing an object ID or dict containing a DXLink,
or a list of object IDs or dicts containing a DXLink.
Given an object ID, calls :meth:`~dxpy.bindings.DXDataObject.describe` on the object.
Example::
describe("file-1234")
Given a list of object IDs, calls :meth:`~dxpy.api.system_describe_data_objects`.
Example::
describe(["file-1234", "workflow-5678"])
Note: If id_or_link is a list and **kwargs contains a "fields" parameter, these
fields will be returned in the response for each data object in addition to the
fields included by default. Additionally, describe options can be provided for
each data object class in the "classDescribeOptions" kwargs argument. See
https://wiki.dnanexus.com/API-Specification-v1.0.0/System-Methods#API-method:-/system/describeDataObjects
for input parameters used with the multiple object describe method. | Below is the the instruction that describes the task:
### Input:
:param id_or_link: String containing an object ID or dict containing a DXLink,
or a list of object IDs or dicts containing a DXLink.
Given an object ID, calls :meth:`~dxpy.bindings.DXDataObject.describe` on the object.
Example::
describe("file-1234")
Given a list of object IDs, calls :meth:`~dxpy.api.system_describe_data_objects`.
Example::
describe(["file-1234", "workflow-5678"])
Note: If id_or_link is a list and **kwargs contains a "fields" parameter, these
fields will be returned in the response for each data object in addition to the
fields included by default. Additionally, describe options can be provided for
each data object class in the "classDescribeOptions" kwargs argument. See
https://wiki.dnanexus.com/API-Specification-v1.0.0/System-Methods#API-method:-/system/describeDataObjects
for input parameters used with the multiple object describe method.
### Response:
def describe(id_or_link, **kwargs):
'''
:param id_or_link: String containing an object ID or dict containing a DXLink,
or a list of object IDs or dicts containing a DXLink.
Given an object ID, calls :meth:`~dxpy.bindings.DXDataObject.describe` on the object.
Example::
describe("file-1234")
Given a list of object IDs, calls :meth:`~dxpy.api.system_describe_data_objects`.
Example::
describe(["file-1234", "workflow-5678"])
Note: If id_or_link is a list and **kwargs contains a "fields" parameter, these
fields will be returned in the response for each data object in addition to the
fields included by default. Additionally, describe options can be provided for
each data object class in the "classDescribeOptions" kwargs argument. See
https://wiki.dnanexus.com/API-Specification-v1.0.0/System-Methods#API-method:-/system/describeDataObjects
for input parameters used with the multiple object describe method.
'''
# If this is a list, extract the ids.
# TODO: modify the procedure to use project ID when possible
if isinstance(id_or_link, basestring) or is_dxlink(id_or_link):
handler = get_handler(id_or_link)
return handler.describe(**kwargs)
else:
links = []
for link in id_or_link:
# If this entry is a dxlink, then get the id.
if is_dxlink(link):
# Guaranteed by is_dxlink that one of the following will work
if isinstance(link['$dnanexus_link'], basestring):
link = link['$dnanexus_link']
else:
link = link['$dnanexus_link']['id']
links.append(link)
# Prepare input to system_describe_data_objects, the same fields will be passed
# for all data object classes; if a class doesn't include a field in its describe
# output, it will be ignored
describe_input = \
dict([(field, True) for field in kwargs['fields']]) if kwargs.get('fields', []) else True
describe_links_input = [{'id': link, 'describe': describe_input} for link in links]
bulk_describe_input = {'objects': describe_links_input}
if 'classDescribeOptions' in kwargs:
bulk_describe_input['classDescribeOptions'] = kwargs['classDescribeOptions']
data_object_descriptions = dxpy.api.system_describe_data_objects(bulk_describe_input)
return [desc['describe'] for desc in data_object_descriptions['results']] |
def create_column(self, name):
"""
calls: `POST https://developer.github.com/v3/projects/columns/#create-a-project-column>`_
:param name: string
"""
assert isinstance(name, (str, unicode)), name
post_parameters = {"name": name}
import_header = {"Accept": Consts.mediaTypeProjectsPreview}
headers, data = self._requester.requestJsonAndCheck(
"POST",
self.url + "/columns",
headers=import_header,
input=post_parameters
)
return github.ProjectColumn.ProjectColumn(self._requester, headers, data, completed=True) | calls: `POST https://developer.github.com/v3/projects/columns/#create-a-project-column>`_
:param name: string | Below is the the instruction that describes the task:
### Input:
calls: `POST https://developer.github.com/v3/projects/columns/#create-a-project-column>`_
:param name: string
### Response:
def create_column(self, name):
"""
calls: `POST https://developer.github.com/v3/projects/columns/#create-a-project-column>`_
:param name: string
"""
assert isinstance(name, (str, unicode)), name
post_parameters = {"name": name}
import_header = {"Accept": Consts.mediaTypeProjectsPreview}
headers, data = self._requester.requestJsonAndCheck(
"POST",
self.url + "/columns",
headers=import_header,
input=post_parameters
)
return github.ProjectColumn.ProjectColumn(self._requester, headers, data, completed=True) |
def stream_subsegments(self):
"""
Stream all closed subsegments to the daemon
and remove reference to the parent segment.
No-op for a not sampled segment.
"""
segment = self.current_segment()
if self.streaming.is_eligible(segment):
self.streaming.stream(segment, self._stream_subsegment_out) | Stream all closed subsegments to the daemon
and remove reference to the parent segment.
No-op for a not sampled segment. | Below is the the instruction that describes the task:
### Input:
Stream all closed subsegments to the daemon
and remove reference to the parent segment.
No-op for a not sampled segment.
### Response:
def stream_subsegments(self):
"""
Stream all closed subsegments to the daemon
and remove reference to the parent segment.
No-op for a not sampled segment.
"""
segment = self.current_segment()
if self.streaming.is_eligible(segment):
self.streaming.stream(segment, self._stream_subsegment_out) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.