code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def printable_name(column, path=None):
"""Provided for debug output when rendering conditions.
User.name[3]["foo"][0]["bar"] -> name[3].foo[0].bar
"""
pieces = [column.name]
path = path or path_of(column)
for segment in path:
if isinstance(segment, str):
pieces.append(segment)
else:
pieces[-1] += "[{}]".format(segment)
return ".".join(pieces) | Provided for debug output when rendering conditions.
User.name[3]["foo"][0]["bar"] -> name[3].foo[0].bar | Below is the the instruction that describes the task:
### Input:
Provided for debug output when rendering conditions.
User.name[3]["foo"][0]["bar"] -> name[3].foo[0].bar
### Response:
def printable_name(column, path=None):
"""Provided for debug output when rendering conditions.
User.name[3]["foo"][0]["bar"] -> name[3].foo[0].bar
"""
pieces = [column.name]
path = path or path_of(column)
for segment in path:
if isinstance(segment, str):
pieces.append(segment)
else:
pieces[-1] += "[{}]".format(segment)
return ".".join(pieces) |
def train(self, s, path="spelling.txt"):
""" Counts the words in the given string and saves the probabilities at the given path.
This can be used to generate a new model for the Spelling() constructor.
"""
model = {}
for w in re.findall("[a-z]+", s.lower()):
model[w] = w in model and model[w] + 1 or 1
model = ("%s %s" % (k, v) for k, v in sorted(model.items()))
model = "\n".join(model)
f = open(path, "w")
f.write(model)
f.close() | Counts the words in the given string and saves the probabilities at the given path.
This can be used to generate a new model for the Spelling() constructor. | Below is the the instruction that describes the task:
### Input:
Counts the words in the given string and saves the probabilities at the given path.
This can be used to generate a new model for the Spelling() constructor.
### Response:
def train(self, s, path="spelling.txt"):
""" Counts the words in the given string and saves the probabilities at the given path.
This can be used to generate a new model for the Spelling() constructor.
"""
model = {}
for w in re.findall("[a-z]+", s.lower()):
model[w] = w in model and model[w] + 1 or 1
model = ("%s %s" % (k, v) for k, v in sorted(model.items()))
model = "\n".join(model)
f = open(path, "w")
f.write(model)
f.close() |
def _rr_line(self, section):
"""Process one line from the text format answer, authority, or
additional data sections.
"""
deleting = None
# Name
token = self.tok.get(want_leading = True)
if not token.is_whitespace():
self.last_name = dns.name.from_text(token.value, None)
name = self.last_name
token = self.tok.get()
if not token.is_identifier():
raise dns.exception.SyntaxError
# TTL
try:
ttl = int(token.value, 0)
token = self.tok.get()
if not token.is_identifier():
raise dns.exception.SyntaxError
except dns.exception.SyntaxError:
raise dns.exception.SyntaxError
except Exception:
ttl = 0
# Class
try:
rdclass = dns.rdataclass.from_text(token.value)
token = self.tok.get()
if not token.is_identifier():
raise dns.exception.SyntaxError
if rdclass == dns.rdataclass.ANY or rdclass == dns.rdataclass.NONE:
deleting = rdclass
rdclass = self.zone_rdclass
except dns.exception.SyntaxError:
raise dns.exception.SyntaxError
except Exception:
rdclass = dns.rdataclass.IN
# Type
rdtype = dns.rdatatype.from_text(token.value)
token = self.tok.get()
if not token.is_eol_or_eof():
self.tok.unget(token)
rd = dns.rdata.from_text(rdclass, rdtype, self.tok, None)
covers = rd.covers()
else:
rd = None
covers = dns.rdatatype.NONE
rrset = self.message.find_rrset(section, name,
rdclass, rdtype, covers,
deleting, True, self.updating)
if not rd is None:
rrset.add(rd, ttl) | Process one line from the text format answer, authority, or
additional data sections. | Below is the the instruction that describes the task:
### Input:
Process one line from the text format answer, authority, or
additional data sections.
### Response:
def _rr_line(self, section):
"""Process one line from the text format answer, authority, or
additional data sections.
"""
deleting = None
# Name
token = self.tok.get(want_leading = True)
if not token.is_whitespace():
self.last_name = dns.name.from_text(token.value, None)
name = self.last_name
token = self.tok.get()
if not token.is_identifier():
raise dns.exception.SyntaxError
# TTL
try:
ttl = int(token.value, 0)
token = self.tok.get()
if not token.is_identifier():
raise dns.exception.SyntaxError
except dns.exception.SyntaxError:
raise dns.exception.SyntaxError
except Exception:
ttl = 0
# Class
try:
rdclass = dns.rdataclass.from_text(token.value)
token = self.tok.get()
if not token.is_identifier():
raise dns.exception.SyntaxError
if rdclass == dns.rdataclass.ANY or rdclass == dns.rdataclass.NONE:
deleting = rdclass
rdclass = self.zone_rdclass
except dns.exception.SyntaxError:
raise dns.exception.SyntaxError
except Exception:
rdclass = dns.rdataclass.IN
# Type
rdtype = dns.rdatatype.from_text(token.value)
token = self.tok.get()
if not token.is_eol_or_eof():
self.tok.unget(token)
rd = dns.rdata.from_text(rdclass, rdtype, self.tok, None)
covers = rd.covers()
else:
rd = None
covers = dns.rdatatype.NONE
rrset = self.message.find_rrset(section, name,
rdclass, rdtype, covers,
deleting, True, self.updating)
if not rd is None:
rrset.add(rd, ttl) |
def get_op(self):
"""Returns all symmetry operations (including inversions and
subtranslations), but unlike get_symop(), they are returned as
two ndarrays."""
if self.centrosymmetric:
rot = np.tile(np.vstack((self.rotations, -self.rotations)),
(self.nsubtrans, 1, 1))
trans = np.repeat(self.subtrans, 2*len(self.rotations), axis=0)
else:
rot = np.tile(self.rotations, (self.nsubtrans, 1, 1))
trans = np.repeat(self.subtrans, len(self.rotations), axis=0)
return rot, trans | Returns all symmetry operations (including inversions and
subtranslations), but unlike get_symop(), they are returned as
two ndarrays. | Below is the the instruction that describes the task:
### Input:
Returns all symmetry operations (including inversions and
subtranslations), but unlike get_symop(), they are returned as
two ndarrays.
### Response:
def get_op(self):
"""Returns all symmetry operations (including inversions and
subtranslations), but unlike get_symop(), they are returned as
two ndarrays."""
if self.centrosymmetric:
rot = np.tile(np.vstack((self.rotations, -self.rotations)),
(self.nsubtrans, 1, 1))
trans = np.repeat(self.subtrans, 2*len(self.rotations), axis=0)
else:
rot = np.tile(self.rotations, (self.nsubtrans, 1, 1))
trans = np.repeat(self.subtrans, len(self.rotations), axis=0)
return rot, trans |
def readerWalker(self):
"""Create an xmltextReader for a preparsed document. """
ret = libxml2mod.xmlReaderWalker(self._o)
if ret is None:raise treeError('xmlReaderWalker() failed')
__tmp = xmlTextReader(_obj=ret)
return __tmp | Create an xmltextReader for a preparsed document. | Below is the the instruction that describes the task:
### Input:
Create an xmltextReader for a preparsed document.
### Response:
def readerWalker(self):
"""Create an xmltextReader for a preparsed document. """
ret = libxml2mod.xmlReaderWalker(self._o)
if ret is None:raise treeError('xmlReaderWalker() failed')
__tmp = xmlTextReader(_obj=ret)
return __tmp |
def mosaicMethod(self, value):
"""
get/set the mosaic method
"""
if value in self.__allowedMosaicMethods and \
self._mosaicMethod != value:
self._mosaicMethod = value | get/set the mosaic method | Below is the the instruction that describes the task:
### Input:
get/set the mosaic method
### Response:
def mosaicMethod(self, value):
"""
get/set the mosaic method
"""
if value in self.__allowedMosaicMethods and \
self._mosaicMethod != value:
self._mosaicMethod = value |
def _parse_peer_address(self, config):
"""Scans the config block and parses the peer-address value
Args:
config (str): The config block to scan
Returns:
dict: A dict object that is intended to be merged into the
resource dict
"""
match = re.search(r'peer-address ([^\s]+)', config)
value = match.group(1) if match else None
return dict(peer_address=value) | Scans the config block and parses the peer-address value
Args:
config (str): The config block to scan
Returns:
dict: A dict object that is intended to be merged into the
resource dict | Below is the the instruction that describes the task:
### Input:
Scans the config block and parses the peer-address value
Args:
config (str): The config block to scan
Returns:
dict: A dict object that is intended to be merged into the
resource dict
### Response:
def _parse_peer_address(self, config):
"""Scans the config block and parses the peer-address value
Args:
config (str): The config block to scan
Returns:
dict: A dict object that is intended to be merged into the
resource dict
"""
match = re.search(r'peer-address ([^\s]+)', config)
value = match.group(1) if match else None
return dict(peer_address=value) |
def intersection(L1, L2):
"""Intersects two line segments
Args:
L1 ([float, float]): x and y coordinates
L2 ([float, float]): x and y coordinates
Returns:
bool: if they intersect
(float, float): x and y of intersection, if they do
"""
D = L1[0] * L2[1] - L1[1] * L2[0]
Dx = L1[2] * L2[1] - L1[1] * L2[2]
Dy = L1[0] * L2[2] - L1[2] * L2[0]
if D != 0:
x = Dx / D
y = Dy / D
return x, y
else:
return False | Intersects two line segments
Args:
L1 ([float, float]): x and y coordinates
L2 ([float, float]): x and y coordinates
Returns:
bool: if they intersect
(float, float): x and y of intersection, if they do | Below is the the instruction that describes the task:
### Input:
Intersects two line segments
Args:
L1 ([float, float]): x and y coordinates
L2 ([float, float]): x and y coordinates
Returns:
bool: if they intersect
(float, float): x and y of intersection, if they do
### Response:
def intersection(L1, L2):
"""Intersects two line segments
Args:
L1 ([float, float]): x and y coordinates
L2 ([float, float]): x and y coordinates
Returns:
bool: if they intersect
(float, float): x and y of intersection, if they do
"""
D = L1[0] * L2[1] - L1[1] * L2[0]
Dx = L1[2] * L2[1] - L1[1] * L2[2]
Dy = L1[0] * L2[2] - L1[2] * L2[0]
if D != 0:
x = Dx / D
y = Dy / D
return x, y
else:
return False |
def getNonDefaultsDict(self):
""" Recursively retrieves values as a dictionary to be used for persistence.
Does not save defaultData and other properties, only stores values if they differ from
the defaultData. If the CTI and none of its children differ from their default, a
completely empty dictionary is returned. This is to achieve a smaller json
representation.
Typically descendants should override _nodeGetNonDefaultsDict instead of this function.
"""
dct = self._nodeGetNonDefaultsDict()
childList = []
for childCti in self.childItems:
childDct = childCti.getNonDefaultsDict()
if childDct:
childList.append(childDct)
if childList:
dct['childItems'] = childList
if dct:
dct['nodeName'] = self.nodeName
return dct | Recursively retrieves values as a dictionary to be used for persistence.
Does not save defaultData and other properties, only stores values if they differ from
the defaultData. If the CTI and none of its children differ from their default, a
completely empty dictionary is returned. This is to achieve a smaller json
representation.
Typically descendants should override _nodeGetNonDefaultsDict instead of this function. | Below is the the instruction that describes the task:
### Input:
Recursively retrieves values as a dictionary to be used for persistence.
Does not save defaultData and other properties, only stores values if they differ from
the defaultData. If the CTI and none of its children differ from their default, a
completely empty dictionary is returned. This is to achieve a smaller json
representation.
Typically descendants should override _nodeGetNonDefaultsDict instead of this function.
### Response:
def getNonDefaultsDict(self):
""" Recursively retrieves values as a dictionary to be used for persistence.
Does not save defaultData and other properties, only stores values if they differ from
the defaultData. If the CTI and none of its children differ from their default, a
completely empty dictionary is returned. This is to achieve a smaller json
representation.
Typically descendants should override _nodeGetNonDefaultsDict instead of this function.
"""
dct = self._nodeGetNonDefaultsDict()
childList = []
for childCti in self.childItems:
childDct = childCti.getNonDefaultsDict()
if childDct:
childList.append(childDct)
if childList:
dct['childItems'] = childList
if dct:
dct['nodeName'] = self.nodeName
return dct |
def json_serial(obj):
"""JSON serializer for objects not serializable by default json code"""
if isinstance(obj, LegipyModel):
return obj.to_json()
elif isinstance(obj, (datetime.date, datetime.datetime)):
return obj.isoformat()
raise TypeError("Type {0} not serializable".format(repr(type(obj)))) | JSON serializer for objects not serializable by default json code | Below is the the instruction that describes the task:
### Input:
JSON serializer for objects not serializable by default json code
### Response:
def json_serial(obj):
"""JSON serializer for objects not serializable by default json code"""
if isinstance(obj, LegipyModel):
return obj.to_json()
elif isinstance(obj, (datetime.date, datetime.datetime)):
return obj.isoformat()
raise TypeError("Type {0} not serializable".format(repr(type(obj)))) |
def add_message(self, text, type=None):
"""Add a message with an optional type."""
key = self._msg_key
self.setdefault(key, [])
self[key].append(message(type, text))
self.save() | Add a message with an optional type. | Below is the the instruction that describes the task:
### Input:
Add a message with an optional type.
### Response:
def add_message(self, text, type=None):
"""Add a message with an optional type."""
key = self._msg_key
self.setdefault(key, [])
self[key].append(message(type, text))
self.save() |
def list_projects(root, backend=os.listdir):
"""List projects at `root`
Arguments:
root (str): Absolute path to the `be` root directory,
typically the current working directory.
"""
projects = list()
for project in sorted(backend(root)):
abspath = os.path.join(root, project)
if not isproject(abspath):
continue
projects.append(project)
return projects | List projects at `root`
Arguments:
root (str): Absolute path to the `be` root directory,
typically the current working directory. | Below is the the instruction that describes the task:
### Input:
List projects at `root`
Arguments:
root (str): Absolute path to the `be` root directory,
typically the current working directory.
### Response:
def list_projects(root, backend=os.listdir):
"""List projects at `root`
Arguments:
root (str): Absolute path to the `be` root directory,
typically the current working directory.
"""
projects = list()
for project in sorted(backend(root)):
abspath = os.path.join(root, project)
if not isproject(abspath):
continue
projects.append(project)
return projects |
def _get_snapshot(vm, snapshot_name):
"""
Returns snapshot object by its name
:param vm:
:param snapshot_name:
:type snapshot_name: str
:return: Snapshot by its name
:rtype vim.vm.Snapshot
"""
snapshots = SnapshotRetriever.get_vm_snapshots(vm)
if snapshot_name not in snapshots:
raise SnapshotNotFoundException('Snapshot {0} was not found'.format(snapshot_name))
return snapshots[snapshot_name] | Returns snapshot object by its name
:param vm:
:param snapshot_name:
:type snapshot_name: str
:return: Snapshot by its name
:rtype vim.vm.Snapshot | Below is the the instruction that describes the task:
### Input:
Returns snapshot object by its name
:param vm:
:param snapshot_name:
:type snapshot_name: str
:return: Snapshot by its name
:rtype vim.vm.Snapshot
### Response:
def _get_snapshot(vm, snapshot_name):
"""
Returns snapshot object by its name
:param vm:
:param snapshot_name:
:type snapshot_name: str
:return: Snapshot by its name
:rtype vim.vm.Snapshot
"""
snapshots = SnapshotRetriever.get_vm_snapshots(vm)
if snapshot_name not in snapshots:
raise SnapshotNotFoundException('Snapshot {0} was not found'.format(snapshot_name))
return snapshots[snapshot_name] |
def _constructClient(client_version, username, user_domain, password, project_name, project_domain,
auth_url):
"""Return a novaclient from the given args."""
loader = loading.get_plugin_loader('password')
# These only work with v3
if user_domain is not None or project_domain is not None:
auth = loader.load_from_options(auth_url=auth_url, username=username, user_domain_name=user_domain,
password=password, project_name=project_name, project_domain_name=project_domain)
else:
auth = loader.load_from_options(auth_url=auth_url, username=username,
password=password, project_name=project_name)
sess = session.Session(auth=auth)
return client.Client(client_version, session=sess) | Return a novaclient from the given args. | Below is the the instruction that describes the task:
### Input:
Return a novaclient from the given args.
### Response:
def _constructClient(client_version, username, user_domain, password, project_name, project_domain,
auth_url):
"""Return a novaclient from the given args."""
loader = loading.get_plugin_loader('password')
# These only work with v3
if user_domain is not None or project_domain is not None:
auth = loader.load_from_options(auth_url=auth_url, username=username, user_domain_name=user_domain,
password=password, project_name=project_name, project_domain_name=project_domain)
else:
auth = loader.load_from_options(auth_url=auth_url, username=username,
password=password, project_name=project_name)
sess = session.Session(auth=auth)
return client.Client(client_version, session=sess) |
def get_transactions(self, account_id, **params):
"""https://developers.coinbase.com/api/v2#list-transactions"""
response = self._get('v2', 'accounts', account_id, 'transactions', params=params)
return self._make_api_object(response, Transaction) | https://developers.coinbase.com/api/v2#list-transactions | Below is the the instruction that describes the task:
### Input:
https://developers.coinbase.com/api/v2#list-transactions
### Response:
def get_transactions(self, account_id, **params):
"""https://developers.coinbase.com/api/v2#list-transactions"""
response = self._get('v2', 'accounts', account_id, 'transactions', params=params)
return self._make_api_object(response, Transaction) |
def serialize(pca, **kwargs):
"""
Serialize an orientation object to a dict suitable
for JSON
"""
strike, dip, rake = pca.strike_dip_rake()
hyp_axes = sampling_axes(pca)
return dict(
**kwargs,
principal_axes = pca.axes.tolist(),
hyperbolic_axes = hyp_axes.tolist(),
n_samples = pca.n,
strike=strike, dip=dip, rake=rake,
angular_errors=[2*N.degrees(i)
for i in angular_errors(hyp_axes)]) | Serialize an orientation object to a dict suitable
for JSON | Below is the the instruction that describes the task:
### Input:
Serialize an orientation object to a dict suitable
for JSON
### Response:
def serialize(pca, **kwargs):
"""
Serialize an orientation object to a dict suitable
for JSON
"""
strike, dip, rake = pca.strike_dip_rake()
hyp_axes = sampling_axes(pca)
return dict(
**kwargs,
principal_axes = pca.axes.tolist(),
hyperbolic_axes = hyp_axes.tolist(),
n_samples = pca.n,
strike=strike, dip=dip, rake=rake,
angular_errors=[2*N.degrees(i)
for i in angular_errors(hyp_axes)]) |
def data(self, index, role):
"""Get the information of the levels."""
if not index.isValid():
return None
if role == Qt.FontRole:
return self._font
label = ''
if index.column() == self.model.header_shape[1] - 1:
label = str(self.model.name(0, index.row()))
elif index.row() == self.model.header_shape[0] - 1:
label = str(self.model.name(1, index.column()))
if role == Qt.DisplayRole and label:
return label
elif role == Qt.ForegroundRole:
return self._foreground
elif role == Qt.BackgroundRole:
return self._background
elif role == Qt.BackgroundRole:
return self._palette.window()
return None | Get the information of the levels. | Below is the the instruction that describes the task:
### Input:
Get the information of the levels.
### Response:
def data(self, index, role):
"""Get the information of the levels."""
if not index.isValid():
return None
if role == Qt.FontRole:
return self._font
label = ''
if index.column() == self.model.header_shape[1] - 1:
label = str(self.model.name(0, index.row()))
elif index.row() == self.model.header_shape[0] - 1:
label = str(self.model.name(1, index.column()))
if role == Qt.DisplayRole and label:
return label
elif role == Qt.ForegroundRole:
return self._foreground
elif role == Qt.BackgroundRole:
return self._background
elif role == Qt.BackgroundRole:
return self._palette.window()
return None |
def path_join(*args):
"""Join path parts to single path."""
return SEP.join((x for x in args if x not in (None, ''))).strip(SEP) | Join path parts to single path. | Below is the the instruction that describes the task:
### Input:
Join path parts to single path.
### Response:
def path_join(*args):
"""Join path parts to single path."""
return SEP.join((x for x in args if x not in (None, ''))).strip(SEP) |
def on_start(self):
"""
start publisher
"""
LOGGER.debug("zeromq.Publisher.on_start")
try:
self.zmqsocket.bind(self.zmqbind_url)
except Exception as e:
LOGGER.error("zeromq.Publisher.on_start - error while binding publisher ! " + e.__cause__)
raise e | start publisher | Below is the the instruction that describes the task:
### Input:
start publisher
### Response:
def on_start(self):
"""
start publisher
"""
LOGGER.debug("zeromq.Publisher.on_start")
try:
self.zmqsocket.bind(self.zmqbind_url)
except Exception as e:
LOGGER.error("zeromq.Publisher.on_start - error while binding publisher ! " + e.__cause__)
raise e |
def solve(self, value, filter_):
"""Get slice or entry defined by an index from the given value.
Arguments
---------
value : ?
A value to solve in combination with the given filter.
filter_ : dataql.resource.SliceFilter
An instance of ``SliceFilter``to solve with the given value.
Example
-------
>>> from dataql.solvers.registry import Registry
>>> registry = Registry()
>>> solver = SliceSolver(registry)
>>> solver.solve([1, 2, 3], SliceFilter(1))
2
>>> solver.solve([1, 2, 3], SliceFilter(slice(1, None, None)))
[2, 3]
>>> solver.solve([1, 2, 3], SliceFilter(slice(0, 2, 2)))
[1]
>>> solver.solve([1, 2, 3], SliceFilter(4))
"""
try:
return value[filter_.slice or filter_.index]
except IndexError:
return None | Get slice or entry defined by an index from the given value.
Arguments
---------
value : ?
A value to solve in combination with the given filter.
filter_ : dataql.resource.SliceFilter
An instance of ``SliceFilter``to solve with the given value.
Example
-------
>>> from dataql.solvers.registry import Registry
>>> registry = Registry()
>>> solver = SliceSolver(registry)
>>> solver.solve([1, 2, 3], SliceFilter(1))
2
>>> solver.solve([1, 2, 3], SliceFilter(slice(1, None, None)))
[2, 3]
>>> solver.solve([1, 2, 3], SliceFilter(slice(0, 2, 2)))
[1]
>>> solver.solve([1, 2, 3], SliceFilter(4)) | Below is the the instruction that describes the task:
### Input:
Get slice or entry defined by an index from the given value.
Arguments
---------
value : ?
A value to solve in combination with the given filter.
filter_ : dataql.resource.SliceFilter
An instance of ``SliceFilter``to solve with the given value.
Example
-------
>>> from dataql.solvers.registry import Registry
>>> registry = Registry()
>>> solver = SliceSolver(registry)
>>> solver.solve([1, 2, 3], SliceFilter(1))
2
>>> solver.solve([1, 2, 3], SliceFilter(slice(1, None, None)))
[2, 3]
>>> solver.solve([1, 2, 3], SliceFilter(slice(0, 2, 2)))
[1]
>>> solver.solve([1, 2, 3], SliceFilter(4))
### Response:
def solve(self, value, filter_):
"""Get slice or entry defined by an index from the given value.
Arguments
---------
value : ?
A value to solve in combination with the given filter.
filter_ : dataql.resource.SliceFilter
An instance of ``SliceFilter``to solve with the given value.
Example
-------
>>> from dataql.solvers.registry import Registry
>>> registry = Registry()
>>> solver = SliceSolver(registry)
>>> solver.solve([1, 2, 3], SliceFilter(1))
2
>>> solver.solve([1, 2, 3], SliceFilter(slice(1, None, None)))
[2, 3]
>>> solver.solve([1, 2, 3], SliceFilter(slice(0, 2, 2)))
[1]
>>> solver.solve([1, 2, 3], SliceFilter(4))
"""
try:
return value[filter_.slice or filter_.index]
except IndexError:
return None |
def path(self):
"Return a list of nodes forming the path from the root to this node."
node, path_back = self, []
while node:
path_back.append(node)
node = node.parent
return list(reversed(path_back)) | Return a list of nodes forming the path from the root to this node. | Below is the the instruction that describes the task:
### Input:
Return a list of nodes forming the path from the root to this node.
### Response:
def path(self):
"Return a list of nodes forming the path from the root to this node."
node, path_back = self, []
while node:
path_back.append(node)
node = node.parent
return list(reversed(path_back)) |
def save(self, name, content, max_length=None):
"""
Saves the given content with the given name using the local
storage. If the :attr:`~queued_storage.backends.QueuedStorage.delayed`
attribute is ``True`` this will automatically call the
:meth:`~queued_storage.backends.QueuedStorage.transfer` method
queuing the transfer from local to remote storage.
:param name: file name
:type name: str
:param content: content of the file specified by name
:type content: :class:`~django:django.core.files.File`
:rtype: str
"""
cache_key = self.get_cache_key(name)
cache.set(cache_key, False)
# Use a name that is available on both the local and remote storage
# systems and save locally.
name = self.get_available_name(name)
try:
name = self.local.save(name, content, max_length=max_length)
except TypeError:
# Django < 1.10
name = self.local.save(name, content)
# Pass on the cache key to prevent duplicate cache key creation,
# we save the result in the storage to be able to test for it
if not self.delayed:
self.result = self.transfer(name, cache_key=cache_key)
return name | Saves the given content with the given name using the local
storage. If the :attr:`~queued_storage.backends.QueuedStorage.delayed`
attribute is ``True`` this will automatically call the
:meth:`~queued_storage.backends.QueuedStorage.transfer` method
queuing the transfer from local to remote storage.
:param name: file name
:type name: str
:param content: content of the file specified by name
:type content: :class:`~django:django.core.files.File`
:rtype: str | Below is the the instruction that describes the task:
### Input:
Saves the given content with the given name using the local
storage. If the :attr:`~queued_storage.backends.QueuedStorage.delayed`
attribute is ``True`` this will automatically call the
:meth:`~queued_storage.backends.QueuedStorage.transfer` method
queuing the transfer from local to remote storage.
:param name: file name
:type name: str
:param content: content of the file specified by name
:type content: :class:`~django:django.core.files.File`
:rtype: str
### Response:
def save(self, name, content, max_length=None):
"""
Saves the given content with the given name using the local
storage. If the :attr:`~queued_storage.backends.QueuedStorage.delayed`
attribute is ``True`` this will automatically call the
:meth:`~queued_storage.backends.QueuedStorage.transfer` method
queuing the transfer from local to remote storage.
:param name: file name
:type name: str
:param content: content of the file specified by name
:type content: :class:`~django:django.core.files.File`
:rtype: str
"""
cache_key = self.get_cache_key(name)
cache.set(cache_key, False)
# Use a name that is available on both the local and remote storage
# systems and save locally.
name = self.get_available_name(name)
try:
name = self.local.save(name, content, max_length=max_length)
except TypeError:
# Django < 1.10
name = self.local.save(name, content)
# Pass on the cache key to prevent duplicate cache key creation,
# we save the result in the storage to be able to test for it
if not self.delayed:
self.result = self.transfer(name, cache_key=cache_key)
return name |
def toProtocolElement(self):
"""
Returns the GA4GH protocol representation of this ReadGroup.
"""
# TODO this is very incomplete, but we don't have the
# implementation to fill out the rest of the fields currently
readGroup = protocol.ReadGroup()
readGroup.id = self.getId()
readGroup.created = self._creationTime
readGroup.updated = self._updateTime
dataset = self.getParentContainer().getParentContainer()
readGroup.dataset_id = dataset.getId()
readGroup.name = self.getLocalId()
readGroup.predicted_insert_size = pb.int(self.getPredictedInsertSize())
referenceSet = self._parentContainer.getReferenceSet()
readGroup.sample_name = pb.string(self.getSampleName())
readGroup.biosample_id = pb.string(self.getBiosampleId())
if referenceSet is not None:
readGroup.reference_set_id = referenceSet.getId()
readGroup.stats.CopyFrom(self.getStats())
readGroup.programs.extend(self.getPrograms())
readGroup.description = pb.string(self.getDescription())
readGroup.experiment.CopyFrom(self.getExperiment())
self.serializeAttributes(readGroup)
return readGroup | Returns the GA4GH protocol representation of this ReadGroup. | Below is the the instruction that describes the task:
### Input:
Returns the GA4GH protocol representation of this ReadGroup.
### Response:
def toProtocolElement(self):
"""
Returns the GA4GH protocol representation of this ReadGroup.
"""
# TODO this is very incomplete, but we don't have the
# implementation to fill out the rest of the fields currently
readGroup = protocol.ReadGroup()
readGroup.id = self.getId()
readGroup.created = self._creationTime
readGroup.updated = self._updateTime
dataset = self.getParentContainer().getParentContainer()
readGroup.dataset_id = dataset.getId()
readGroup.name = self.getLocalId()
readGroup.predicted_insert_size = pb.int(self.getPredictedInsertSize())
referenceSet = self._parentContainer.getReferenceSet()
readGroup.sample_name = pb.string(self.getSampleName())
readGroup.biosample_id = pb.string(self.getBiosampleId())
if referenceSet is not None:
readGroup.reference_set_id = referenceSet.getId()
readGroup.stats.CopyFrom(self.getStats())
readGroup.programs.extend(self.getPrograms())
readGroup.description = pb.string(self.getDescription())
readGroup.experiment.CopyFrom(self.getExperiment())
self.serializeAttributes(readGroup)
return readGroup |
def get_assessment_offered(self):
"""Gets the ``AssessmentOffered``.
return: (osid.assessment.AssessmentOffered) - the assessment
offered
raise: OperationFailed - unable to complete request
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.learning.Activity.get_objective
if not bool(self._my_map['assessmentOfferedId']):
raise errors.IllegalState('assessment_offered empty')
mgr = self._get_provider_manager('ASSESSMENT')
if not mgr.supports_assessment_offered_lookup():
raise errors.OperationFailed('Assessment does not support AssessmentOffered lookup')
lookup_session = mgr.get_assessment_offered_lookup_session(proxy=getattr(self, "_proxy", None))
lookup_session.use_federated_bank_view()
return lookup_session.get_assessment_offered(self.get_assessment_offered_id()) | Gets the ``AssessmentOffered``.
return: (osid.assessment.AssessmentOffered) - the assessment
offered
raise: OperationFailed - unable to complete request
*compliance: mandatory -- This method must be implemented.* | Below is the the instruction that describes the task:
### Input:
Gets the ``AssessmentOffered``.
return: (osid.assessment.AssessmentOffered) - the assessment
offered
raise: OperationFailed - unable to complete request
*compliance: mandatory -- This method must be implemented.*
### Response:
def get_assessment_offered(self):
"""Gets the ``AssessmentOffered``.
return: (osid.assessment.AssessmentOffered) - the assessment
offered
raise: OperationFailed - unable to complete request
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.learning.Activity.get_objective
if not bool(self._my_map['assessmentOfferedId']):
raise errors.IllegalState('assessment_offered empty')
mgr = self._get_provider_manager('ASSESSMENT')
if not mgr.supports_assessment_offered_lookup():
raise errors.OperationFailed('Assessment does not support AssessmentOffered lookup')
lookup_session = mgr.get_assessment_offered_lookup_session(proxy=getattr(self, "_proxy", None))
lookup_session.use_federated_bank_view()
return lookup_session.get_assessment_offered(self.get_assessment_offered_id()) |
def _call_api_single_related_resource(self, resource, full_resource_url,
method_name, **kwargs):
"""
For HypermediaResource - make an API call to a known URL
"""
url = full_resource_url
params = {
'headers': self.get_http_headers(
resource.Meta.name, method_name, **kwargs),
'url': url
}
prepared_request = self.prepare_http_request(
'GET', params, **kwargs)
response = self.session.send(prepared_request)
return self._handle_response(
response, resource.Meta.valid_status_codes, resource) | For HypermediaResource - make an API call to a known URL | Below is the the instruction that describes the task:
### Input:
For HypermediaResource - make an API call to a known URL
### Response:
def _call_api_single_related_resource(self, resource, full_resource_url,
method_name, **kwargs):
"""
For HypermediaResource - make an API call to a known URL
"""
url = full_resource_url
params = {
'headers': self.get_http_headers(
resource.Meta.name, method_name, **kwargs),
'url': url
}
prepared_request = self.prepare_http_request(
'GET', params, **kwargs)
response = self.session.send(prepared_request)
return self._handle_response(
response, resource.Meta.valid_status_codes, resource) |
def start_event_stream(self):
""" Start streaming events from `gerrit stream-events`. """
if not self._stream:
self._stream = GerritStream(self, ssh_client=self._ssh_client)
self._stream.start() | Start streaming events from `gerrit stream-events`. | Below is the the instruction that describes the task:
### Input:
Start streaming events from `gerrit stream-events`.
### Response:
def start_event_stream(self):
""" Start streaming events from `gerrit stream-events`. """
if not self._stream:
self._stream = GerritStream(self, ssh_client=self._ssh_client)
self._stream.start() |
def offsets_for_times(self, timestamps):
"""Look up the offsets for the given partitions by timestamp. The
returned offset for each partition is the earliest offset whose
timestamp is greater than or equal to the given timestamp in the
corresponding partition.
This is a blocking call. The consumer does not have to be assigned the
partitions.
If the message format version in a partition is before 0.10.0, i.e.
the messages do not have timestamps, ``None`` will be returned for that
partition. ``None`` will also be returned for the partition if there
are no messages in it.
Note:
This method may block indefinitely if the partition does not exist.
Arguments:
timestamps (dict): ``{TopicPartition: int}`` mapping from partition
to the timestamp to look up. Unit should be milliseconds since
beginning of the epoch (midnight Jan 1, 1970 (UTC))
Returns:
``{TopicPartition: OffsetAndTimestamp}``: mapping from partition
to the timestamp and offset of the first message with timestamp
greater than or equal to the target timestamp.
Raises:
ValueError: If the target timestamp is negative
UnsupportedVersionError: If the broker does not support looking
up the offsets by timestamp.
KafkaTimeoutError: If fetch failed in request_timeout_ms
"""
if self.config['api_version'] <= (0, 10, 0):
raise UnsupportedVersionError(
"offsets_for_times API not supported for cluster version {}"
.format(self.config['api_version']))
for tp, ts in six.iteritems(timestamps):
timestamps[tp] = int(ts)
if ts < 0:
raise ValueError(
"The target time for partition {} is {}. The target time "
"cannot be negative.".format(tp, ts))
return self._fetcher.get_offsets_by_times(
timestamps, self.config['request_timeout_ms']) | Look up the offsets for the given partitions by timestamp. The
returned offset for each partition is the earliest offset whose
timestamp is greater than or equal to the given timestamp in the
corresponding partition.
This is a blocking call. The consumer does not have to be assigned the
partitions.
If the message format version in a partition is before 0.10.0, i.e.
the messages do not have timestamps, ``None`` will be returned for that
partition. ``None`` will also be returned for the partition if there
are no messages in it.
Note:
This method may block indefinitely if the partition does not exist.
Arguments:
timestamps (dict): ``{TopicPartition: int}`` mapping from partition
to the timestamp to look up. Unit should be milliseconds since
beginning of the epoch (midnight Jan 1, 1970 (UTC))
Returns:
``{TopicPartition: OffsetAndTimestamp}``: mapping from partition
to the timestamp and offset of the first message with timestamp
greater than or equal to the target timestamp.
Raises:
ValueError: If the target timestamp is negative
UnsupportedVersionError: If the broker does not support looking
up the offsets by timestamp.
KafkaTimeoutError: If fetch failed in request_timeout_ms | Below is the the instruction that describes the task:
### Input:
Look up the offsets for the given partitions by timestamp. The
returned offset for each partition is the earliest offset whose
timestamp is greater than or equal to the given timestamp in the
corresponding partition.
This is a blocking call. The consumer does not have to be assigned the
partitions.
If the message format version in a partition is before 0.10.0, i.e.
the messages do not have timestamps, ``None`` will be returned for that
partition. ``None`` will also be returned for the partition if there
are no messages in it.
Note:
This method may block indefinitely if the partition does not exist.
Arguments:
timestamps (dict): ``{TopicPartition: int}`` mapping from partition
to the timestamp to look up. Unit should be milliseconds since
beginning of the epoch (midnight Jan 1, 1970 (UTC))
Returns:
``{TopicPartition: OffsetAndTimestamp}``: mapping from partition
to the timestamp and offset of the first message with timestamp
greater than or equal to the target timestamp.
Raises:
ValueError: If the target timestamp is negative
UnsupportedVersionError: If the broker does not support looking
up the offsets by timestamp.
KafkaTimeoutError: If fetch failed in request_timeout_ms
### Response:
def offsets_for_times(self, timestamps):
"""Look up the offsets for the given partitions by timestamp. The
returned offset for each partition is the earliest offset whose
timestamp is greater than or equal to the given timestamp in the
corresponding partition.
This is a blocking call. The consumer does not have to be assigned the
partitions.
If the message format version in a partition is before 0.10.0, i.e.
the messages do not have timestamps, ``None`` will be returned for that
partition. ``None`` will also be returned for the partition if there
are no messages in it.
Note:
This method may block indefinitely if the partition does not exist.
Arguments:
timestamps (dict): ``{TopicPartition: int}`` mapping from partition
to the timestamp to look up. Unit should be milliseconds since
beginning of the epoch (midnight Jan 1, 1970 (UTC))
Returns:
``{TopicPartition: OffsetAndTimestamp}``: mapping from partition
to the timestamp and offset of the first message with timestamp
greater than or equal to the target timestamp.
Raises:
ValueError: If the target timestamp is negative
UnsupportedVersionError: If the broker does not support looking
up the offsets by timestamp.
KafkaTimeoutError: If fetch failed in request_timeout_ms
"""
if self.config['api_version'] <= (0, 10, 0):
raise UnsupportedVersionError(
"offsets_for_times API not supported for cluster version {}"
.format(self.config['api_version']))
for tp, ts in six.iteritems(timestamps):
timestamps[tp] = int(ts)
if ts < 0:
raise ValueError(
"The target time for partition {} is {}. The target time "
"cannot be negative.".format(tp, ts))
return self._fetcher.get_offsets_by_times(
timestamps, self.config['request_timeout_ms']) |
def decode(cls, value):
"""
take a utf-8 encoded byte-string from redis and
turn it back into a list
:param value: bytes
:return: list
"""
try:
return None if value is None else \
list(json.loads(value.decode(cls._encoding)))
except (TypeError, AttributeError):
return list(value) | take a utf-8 encoded byte-string from redis and
turn it back into a list
:param value: bytes
:return: list | Below is the the instruction that describes the task:
### Input:
take a utf-8 encoded byte-string from redis and
turn it back into a list
:param value: bytes
:return: list
### Response:
def decode(cls, value):
"""
take a utf-8 encoded byte-string from redis and
turn it back into a list
:param value: bytes
:return: list
"""
try:
return None if value is None else \
list(json.loads(value.decode(cls._encoding)))
except (TypeError, AttributeError):
return list(value) |
def get_extra_restriction(self, where_class, alias, remote_alias):
"""
Overrides ForeignObject's get_extra_restriction function that returns
an SQL statement which is appended to a JOIN's conditional filtering
part
:return: SQL conditional statement
:rtype: WhereNode
"""
historic_sql = '''{alias}.version_start_date <= %s
AND ({alias}.version_end_date > %s
OR {alias}.version_end_date is NULL )'''
current_sql = '''{alias}.version_end_date is NULL'''
# How 'bout creating an ExtraWhere here, without params
return where_class([VersionedExtraWhere(historic_sql=historic_sql,
current_sql=current_sql,
alias=alias,
remote_alias=remote_alias)]) | Overrides ForeignObject's get_extra_restriction function that returns
an SQL statement which is appended to a JOIN's conditional filtering
part
:return: SQL conditional statement
:rtype: WhereNode | Below is the the instruction that describes the task:
### Input:
Overrides ForeignObject's get_extra_restriction function that returns
an SQL statement which is appended to a JOIN's conditional filtering
part
:return: SQL conditional statement
:rtype: WhereNode
### Response:
def get_extra_restriction(self, where_class, alias, remote_alias):
"""
Overrides ForeignObject's get_extra_restriction function that returns
an SQL statement which is appended to a JOIN's conditional filtering
part
:return: SQL conditional statement
:rtype: WhereNode
"""
historic_sql = '''{alias}.version_start_date <= %s
AND ({alias}.version_end_date > %s
OR {alias}.version_end_date is NULL )'''
current_sql = '''{alias}.version_end_date is NULL'''
# How 'bout creating an ExtraWhere here, without params
return where_class([VersionedExtraWhere(historic_sql=historic_sql,
current_sql=current_sql,
alias=alias,
remote_alias=remote_alias)]) |
def updateReplicationMetadataResponse(
self, pid, replicaMetadata, serialVersion, vendorSpecific=None
):
"""CNReplication.updateReplicationMetadata(session, pid, replicaMetadata,
serialVersion) → boolean https://releases.dataone.org/online/api-
documentation-v2.0.1/apis/CN_AP Is.html#CNReplication.updateReplicationMetadata
Not implemented.
Args:
pid:
replicaMetadata:
serialVersion:
vendorSpecific:
Returns:
"""
mmp_dict = {
'replicaMetadata': ('replicaMetadata.xml', replicaMetadata.toxml('utf-8')),
'serialVersion': str(serialVersion),
}
return self.PUT(
['replicaMetadata', pid], fields=mmp_dict, headers=vendorSpecific
) | CNReplication.updateReplicationMetadata(session, pid, replicaMetadata,
serialVersion) → boolean https://releases.dataone.org/online/api-
documentation-v2.0.1/apis/CN_AP Is.html#CNReplication.updateReplicationMetadata
Not implemented.
Args:
pid:
replicaMetadata:
serialVersion:
vendorSpecific:
Returns: | Below is the the instruction that describes the task:
### Input:
CNReplication.updateReplicationMetadata(session, pid, replicaMetadata,
serialVersion) → boolean https://releases.dataone.org/online/api-
documentation-v2.0.1/apis/CN_AP Is.html#CNReplication.updateReplicationMetadata
Not implemented.
Args:
pid:
replicaMetadata:
serialVersion:
vendorSpecific:
Returns:
### Response:
def updateReplicationMetadataResponse(
self, pid, replicaMetadata, serialVersion, vendorSpecific=None
):
"""CNReplication.updateReplicationMetadata(session, pid, replicaMetadata,
serialVersion) → boolean https://releases.dataone.org/online/api-
documentation-v2.0.1/apis/CN_AP Is.html#CNReplication.updateReplicationMetadata
Not implemented.
Args:
pid:
replicaMetadata:
serialVersion:
vendorSpecific:
Returns:
"""
mmp_dict = {
'replicaMetadata': ('replicaMetadata.xml', replicaMetadata.toxml('utf-8')),
'serialVersion': str(serialVersion),
}
return self.PUT(
['replicaMetadata', pid], fields=mmp_dict, headers=vendorSpecific
) |
def make_bitransformer(
input_vocab_size=gin.REQUIRED,
output_vocab_size=gin.REQUIRED,
layout=None,
mesh_shape=None):
"""Gin-configurable bitransformer constructor.
In your config file you need to set the encoder and decoder layers like this:
encoder/make_layer_stack.layers = [
@transformer_layers.SelfAttention,
@transformer_layers.DenseReluDense,
]
decoder/make_layer_stack.layers = [
@transformer_layers.SelfAttention,
@transformer_layers.EncDecAttention,
@transformer_layers.DenseReluDense,
]
Args:
input_vocab_size: a integer
output_vocab_size: an integer
layout: optional - an input to mtf.convert_to_layout_rules
Some layers (e.g. MoE layers) cheat by looking at layout and mesh_shape
mesh_shape: optional - an input to mtf.convert_to_shape
Some layers (e.g. MoE layers) cheat by looking at layout and mesh_shape
Returns:
a Bitransformer
"""
with gin.config_scope("encoder"):
encoder = Unitransformer(
layer_stack=make_layer_stack(),
input_vocab_size=input_vocab_size,
output_vocab_size=None,
autoregressive=False,
name="encoder",
layout=layout,
mesh_shape=mesh_shape)
with gin.config_scope("decoder"):
decoder = Unitransformer(
layer_stack=make_layer_stack(),
input_vocab_size=output_vocab_size,
output_vocab_size=output_vocab_size,
autoregressive=True,
name="decoder",
layout=layout,
mesh_shape=mesh_shape)
return Bitransformer(encoder, decoder) | Gin-configurable bitransformer constructor.
In your config file you need to set the encoder and decoder layers like this:
encoder/make_layer_stack.layers = [
@transformer_layers.SelfAttention,
@transformer_layers.DenseReluDense,
]
decoder/make_layer_stack.layers = [
@transformer_layers.SelfAttention,
@transformer_layers.EncDecAttention,
@transformer_layers.DenseReluDense,
]
Args:
input_vocab_size: a integer
output_vocab_size: an integer
layout: optional - an input to mtf.convert_to_layout_rules
Some layers (e.g. MoE layers) cheat by looking at layout and mesh_shape
mesh_shape: optional - an input to mtf.convert_to_shape
Some layers (e.g. MoE layers) cheat by looking at layout and mesh_shape
Returns:
a Bitransformer | Below is the the instruction that describes the task:
### Input:
Gin-configurable bitransformer constructor.
In your config file you need to set the encoder and decoder layers like this:
encoder/make_layer_stack.layers = [
@transformer_layers.SelfAttention,
@transformer_layers.DenseReluDense,
]
decoder/make_layer_stack.layers = [
@transformer_layers.SelfAttention,
@transformer_layers.EncDecAttention,
@transformer_layers.DenseReluDense,
]
Args:
input_vocab_size: a integer
output_vocab_size: an integer
layout: optional - an input to mtf.convert_to_layout_rules
Some layers (e.g. MoE layers) cheat by looking at layout and mesh_shape
mesh_shape: optional - an input to mtf.convert_to_shape
Some layers (e.g. MoE layers) cheat by looking at layout and mesh_shape
Returns:
a Bitransformer
### Response:
def make_bitransformer(
input_vocab_size=gin.REQUIRED,
output_vocab_size=gin.REQUIRED,
layout=None,
mesh_shape=None):
"""Gin-configurable bitransformer constructor.
In your config file you need to set the encoder and decoder layers like this:
encoder/make_layer_stack.layers = [
@transformer_layers.SelfAttention,
@transformer_layers.DenseReluDense,
]
decoder/make_layer_stack.layers = [
@transformer_layers.SelfAttention,
@transformer_layers.EncDecAttention,
@transformer_layers.DenseReluDense,
]
Args:
input_vocab_size: a integer
output_vocab_size: an integer
layout: optional - an input to mtf.convert_to_layout_rules
Some layers (e.g. MoE layers) cheat by looking at layout and mesh_shape
mesh_shape: optional - an input to mtf.convert_to_shape
Some layers (e.g. MoE layers) cheat by looking at layout and mesh_shape
Returns:
a Bitransformer
"""
with gin.config_scope("encoder"):
encoder = Unitransformer(
layer_stack=make_layer_stack(),
input_vocab_size=input_vocab_size,
output_vocab_size=None,
autoregressive=False,
name="encoder",
layout=layout,
mesh_shape=mesh_shape)
with gin.config_scope("decoder"):
decoder = Unitransformer(
layer_stack=make_layer_stack(),
input_vocab_size=output_vocab_size,
output_vocab_size=output_vocab_size,
autoregressive=True,
name="decoder",
layout=layout,
mesh_shape=mesh_shape)
return Bitransformer(encoder, decoder) |
def _process_irrational_functions(self, functions, predetermined_function_addrs, blockaddr_to_function):
"""
For unresolveable indirect jumps, angr marks those jump targets as individual functions. For example, usually
the following pattern is seen:
sub_0x400010:
push ebp
mov esp, ebp
...
cmp eax, 10
ja end
mov eax, jumptable[eax]
jmp eax
sub_0x400080:
# do something here
jmp end
end (0x400e00):
pop ebp
ret
In the example above, `process_irrational_functions` will remove function 0x400080, and merge it with function
0x400010.
:param angr.knowledge_plugins.FunctionManager functions: all functions that angr recovers, including those ones
that are misidentified as functions.
:param dict blockaddr_to_function: A mapping between block addresses and Function instances.
:return: A set of addresses of all removed functions
:rtype: set
"""
functions_to_remove = { }
functions_can_be_removed = set(functions.keys()) - set(predetermined_function_addrs)
for func_addr, function in functions.items():
if func_addr in functions_to_remove:
continue
# check all blocks and see if any block ends with an indirect jump and is not resolved
has_unresolved_jumps = False
# the functions to merge with must be locating between the unresolved basic block address and the endpoint
# of the current function
max_unresolved_jump_addr = 0
for block_addr in function.block_addrs_set:
if block_addr in self.indirect_jumps and \
self.indirect_jumps[block_addr].jumpkind == 'Ijk_Boring' and \
not self.indirect_jumps[block_addr].resolved_targets:
# it's not resolved
# we should also make sure it's a jump, not a call
has_unresolved_jumps = True
max_unresolved_jump_addr = max(max_unresolved_jump_addr, block_addr)
if not has_unresolved_jumps:
continue
if function.startpoint is None:
continue
startpoint_addr = function.startpoint.addr
if not function.endpoints:
# Function should have at least one endpoint
continue
endpoint_addr = max([ a.addr for a in function.endpoints ])
the_endpoint = next(a for a in function.endpoints if a.addr == endpoint_addr)
endpoint_addr += the_endpoint.size
# sanity check: startpoint of the function should be greater than its endpoint
if startpoint_addr >= endpoint_addr:
continue
if max_unresolved_jump_addr <= startpoint_addr or max_unresolved_jump_addr >= endpoint_addr:
continue
# scan forward from the endpoint to include any function tail jumps
# Here is an example:
# loc_8049562:
# mov eax, ebp
# add esp, 3ch
# ...
# ret
# loc_804956c:
# mov ebp, 3
# jmp loc_8049562
# loc_8049573:
# mov ebp, 4
# jmp loc_8049562
#
last_addr = endpoint_addr
tmp_state = self.project.factory.blank_state(mode='fastpath')
while True:
try:
# using successors is slow, but acceptable since we won't be creating millions of blocks here...
tmp_state.ip = last_addr
b = self.project.factory.successors(tmp_state, jumpkind='Ijk_Boring')
if len(b.successors) != 1:
break
if b.successors[0].history.jumpkind not in ('Ijk_Boring', 'Ijk_InvalICache'):
break
if b.successors[0].ip.symbolic:
break
suc_addr = b.successors[0].ip._model_concrete
if max(startpoint_addr, the_endpoint.addr - 0x40) <= suc_addr < the_endpoint.addr + the_endpoint.size:
# increment the endpoint_addr
endpoint_addr = b.addr + b.artifacts['irsb_size']
else:
break
last_addr = b.addr + b.artifacts['irsb_size']
except (SimTranslationError, SimMemoryError, SimIRSBError, SimEngineError):
break
# find all functions that are between [ startpoint, endpoint ]
should_merge = True
functions_to_merge = set()
for f_addr in functions_can_be_removed:
f = functions[f_addr]
if f_addr == func_addr:
continue
if max_unresolved_jump_addr < f_addr < endpoint_addr and \
all([max_unresolved_jump_addr < b_addr < endpoint_addr for b_addr in f.block_addrs]):
if f_addr in functions_to_remove:
# this function has already been merged with other functions before... it cannot be merged with
# this function anymore
should_merge = False
break
if f_addr in predetermined_function_addrs:
# this function is a legit one. it shouldn't be removed/merged
should_merge = False
break
functions_to_merge.add(f_addr)
if not should_merge:
# we shouldn't merge...
continue
for f_addr in functions_to_merge:
functions_to_remove[f_addr] = func_addr
# merge all functions
for to_remove, merge_with in functions_to_remove.items():
func_merge_with = self._addr_to_function(merge_with, blockaddr_to_function, functions)
for block_addr in functions[to_remove].block_addrs:
blockaddr_to_function[block_addr] = func_merge_with
del functions[to_remove]
return set(functions_to_remove.keys()) | For unresolveable indirect jumps, angr marks those jump targets as individual functions. For example, usually
the following pattern is seen:
sub_0x400010:
push ebp
mov esp, ebp
...
cmp eax, 10
ja end
mov eax, jumptable[eax]
jmp eax
sub_0x400080:
# do something here
jmp end
end (0x400e00):
pop ebp
ret
In the example above, `process_irrational_functions` will remove function 0x400080, and merge it with function
0x400010.
:param angr.knowledge_plugins.FunctionManager functions: all functions that angr recovers, including those ones
that are misidentified as functions.
:param dict blockaddr_to_function: A mapping between block addresses and Function instances.
:return: A set of addresses of all removed functions
:rtype: set | Below is the the instruction that describes the task:
### Input:
For unresolveable indirect jumps, angr marks those jump targets as individual functions. For example, usually
the following pattern is seen:
sub_0x400010:
push ebp
mov esp, ebp
...
cmp eax, 10
ja end
mov eax, jumptable[eax]
jmp eax
sub_0x400080:
# do something here
jmp end
end (0x400e00):
pop ebp
ret
In the example above, `process_irrational_functions` will remove function 0x400080, and merge it with function
0x400010.
:param angr.knowledge_plugins.FunctionManager functions: all functions that angr recovers, including those ones
that are misidentified as functions.
:param dict blockaddr_to_function: A mapping between block addresses and Function instances.
:return: A set of addresses of all removed functions
:rtype: set
### Response:
def _process_irrational_functions(self, functions, predetermined_function_addrs, blockaddr_to_function):
"""
For unresolveable indirect jumps, angr marks those jump targets as individual functions. For example, usually
the following pattern is seen:
sub_0x400010:
push ebp
mov esp, ebp
...
cmp eax, 10
ja end
mov eax, jumptable[eax]
jmp eax
sub_0x400080:
# do something here
jmp end
end (0x400e00):
pop ebp
ret
In the example above, `process_irrational_functions` will remove function 0x400080, and merge it with function
0x400010.
:param angr.knowledge_plugins.FunctionManager functions: all functions that angr recovers, including those ones
that are misidentified as functions.
:param dict blockaddr_to_function: A mapping between block addresses and Function instances.
:return: A set of addresses of all removed functions
:rtype: set
"""
functions_to_remove = { }
functions_can_be_removed = set(functions.keys()) - set(predetermined_function_addrs)
for func_addr, function in functions.items():
if func_addr in functions_to_remove:
continue
# check all blocks and see if any block ends with an indirect jump and is not resolved
has_unresolved_jumps = False
# the functions to merge with must be locating between the unresolved basic block address and the endpoint
# of the current function
max_unresolved_jump_addr = 0
for block_addr in function.block_addrs_set:
if block_addr in self.indirect_jumps and \
self.indirect_jumps[block_addr].jumpkind == 'Ijk_Boring' and \
not self.indirect_jumps[block_addr].resolved_targets:
# it's not resolved
# we should also make sure it's a jump, not a call
has_unresolved_jumps = True
max_unresolved_jump_addr = max(max_unresolved_jump_addr, block_addr)
if not has_unresolved_jumps:
continue
if function.startpoint is None:
continue
startpoint_addr = function.startpoint.addr
if not function.endpoints:
# Function should have at least one endpoint
continue
endpoint_addr = max([ a.addr for a in function.endpoints ])
the_endpoint = next(a for a in function.endpoints if a.addr == endpoint_addr)
endpoint_addr += the_endpoint.size
# sanity check: startpoint of the function should be greater than its endpoint
if startpoint_addr >= endpoint_addr:
continue
if max_unresolved_jump_addr <= startpoint_addr or max_unresolved_jump_addr >= endpoint_addr:
continue
# scan forward from the endpoint to include any function tail jumps
# Here is an example:
# loc_8049562:
# mov eax, ebp
# add esp, 3ch
# ...
# ret
# loc_804956c:
# mov ebp, 3
# jmp loc_8049562
# loc_8049573:
# mov ebp, 4
# jmp loc_8049562
#
last_addr = endpoint_addr
tmp_state = self.project.factory.blank_state(mode='fastpath')
while True:
try:
# using successors is slow, but acceptable since we won't be creating millions of blocks here...
tmp_state.ip = last_addr
b = self.project.factory.successors(tmp_state, jumpkind='Ijk_Boring')
if len(b.successors) != 1:
break
if b.successors[0].history.jumpkind not in ('Ijk_Boring', 'Ijk_InvalICache'):
break
if b.successors[0].ip.symbolic:
break
suc_addr = b.successors[0].ip._model_concrete
if max(startpoint_addr, the_endpoint.addr - 0x40) <= suc_addr < the_endpoint.addr + the_endpoint.size:
# increment the endpoint_addr
endpoint_addr = b.addr + b.artifacts['irsb_size']
else:
break
last_addr = b.addr + b.artifacts['irsb_size']
except (SimTranslationError, SimMemoryError, SimIRSBError, SimEngineError):
break
# find all functions that are between [ startpoint, endpoint ]
should_merge = True
functions_to_merge = set()
for f_addr in functions_can_be_removed:
f = functions[f_addr]
if f_addr == func_addr:
continue
if max_unresolved_jump_addr < f_addr < endpoint_addr and \
all([max_unresolved_jump_addr < b_addr < endpoint_addr for b_addr in f.block_addrs]):
if f_addr in functions_to_remove:
# this function has already been merged with other functions before... it cannot be merged with
# this function anymore
should_merge = False
break
if f_addr in predetermined_function_addrs:
# this function is a legit one. it shouldn't be removed/merged
should_merge = False
break
functions_to_merge.add(f_addr)
if not should_merge:
# we shouldn't merge...
continue
for f_addr in functions_to_merge:
functions_to_remove[f_addr] = func_addr
# merge all functions
for to_remove, merge_with in functions_to_remove.items():
func_merge_with = self._addr_to_function(merge_with, blockaddr_to_function, functions)
for block_addr in functions[to_remove].block_addrs:
blockaddr_to_function[block_addr] = func_merge_with
del functions[to_remove]
return set(functions_to_remove.keys()) |
def _register_user_models(user_models, admin=None, schema=None):
"""Register any user-defined models with the API Service.
:param list user_models: A list of user-defined models to include in the
API service
"""
if any([issubclass(cls, AutomapModel) for cls in user_models]):
AutomapModel.prepare( # pylint:disable=maybe-no-member
db.engine, reflect=True, schema=schema)
for user_model in user_models:
register_model(user_model, admin) | Register any user-defined models with the API Service.
:param list user_models: A list of user-defined models to include in the
API service | Below is the the instruction that describes the task:
### Input:
Register any user-defined models with the API Service.
:param list user_models: A list of user-defined models to include in the
API service
### Response:
def _register_user_models(user_models, admin=None, schema=None):
"""Register any user-defined models with the API Service.
:param list user_models: A list of user-defined models to include in the
API service
"""
if any([issubclass(cls, AutomapModel) for cls in user_models]):
AutomapModel.prepare( # pylint:disable=maybe-no-member
db.engine, reflect=True, schema=schema)
for user_model in user_models:
register_model(user_model, admin) |
def sum_fields(layer, output_field_key, input_fields):
"""Sum the value of input_fields and put it as output_field.
:param layer: The vector layer.
:type layer: QgsVectorLayer
:param output_field_key: The output field definition key.
:type output_field_key: basestring
:param input_fields: List of input fields' name.
:type input_fields: list
"""
field_definition = definition(output_field_key)
output_field_name = field_definition['field_name']
# If the fields only has one element
if len(input_fields) == 1:
# Name is different, copy it
if input_fields[0] != output_field_name:
to_rename = {input_fields[0]: output_field_name}
# We copy only, it will be deleted later.
# We can't rename the field, we need to copy it as the same
# field might be used many times in the FMT tool.
copy_fields(layer, to_rename)
else:
# Name is same, do nothing
return
else:
# Creating expression
# Put field name in a double quote. See #4248
input_fields = ['"%s"' % f for f in input_fields]
string_expression = ' + '.join(input_fields)
sum_expression = QgsExpression(string_expression)
context = QgsExpressionContext()
context.setFields(layer.fields())
sum_expression.prepare(context)
# Get the output field index
output_idx = layer.fields().lookupField(output_field_name)
# Output index is not found
layer.startEditing()
if output_idx == -1:
output_field = create_field_from_definition(field_definition)
layer.addAttribute(output_field)
output_idx = layer.fields().lookupField(output_field_name)
# Iterate to all features
for feature in layer.getFeatures():
context.setFeature(feature)
result = sum_expression.evaluate(context)
feature[output_idx] = result
layer.updateFeature(feature)
layer.commitChanges() | Sum the value of input_fields and put it as output_field.
:param layer: The vector layer.
:type layer: QgsVectorLayer
:param output_field_key: The output field definition key.
:type output_field_key: basestring
:param input_fields: List of input fields' name.
:type input_fields: list | Below is the the instruction that describes the task:
### Input:
Sum the value of input_fields and put it as output_field.
:param layer: The vector layer.
:type layer: QgsVectorLayer
:param output_field_key: The output field definition key.
:type output_field_key: basestring
:param input_fields: List of input fields' name.
:type input_fields: list
### Response:
def sum_fields(layer, output_field_key, input_fields):
"""Sum the value of input_fields and put it as output_field.
:param layer: The vector layer.
:type layer: QgsVectorLayer
:param output_field_key: The output field definition key.
:type output_field_key: basestring
:param input_fields: List of input fields' name.
:type input_fields: list
"""
field_definition = definition(output_field_key)
output_field_name = field_definition['field_name']
# If the fields only has one element
if len(input_fields) == 1:
# Name is different, copy it
if input_fields[0] != output_field_name:
to_rename = {input_fields[0]: output_field_name}
# We copy only, it will be deleted later.
# We can't rename the field, we need to copy it as the same
# field might be used many times in the FMT tool.
copy_fields(layer, to_rename)
else:
# Name is same, do nothing
return
else:
# Creating expression
# Put field name in a double quote. See #4248
input_fields = ['"%s"' % f for f in input_fields]
string_expression = ' + '.join(input_fields)
sum_expression = QgsExpression(string_expression)
context = QgsExpressionContext()
context.setFields(layer.fields())
sum_expression.prepare(context)
# Get the output field index
output_idx = layer.fields().lookupField(output_field_name)
# Output index is not found
layer.startEditing()
if output_idx == -1:
output_field = create_field_from_definition(field_definition)
layer.addAttribute(output_field)
output_idx = layer.fields().lookupField(output_field_name)
# Iterate to all features
for feature in layer.getFeatures():
context.setFeature(feature)
result = sum_expression.evaluate(context)
feature[output_idx] = result
layer.updateFeature(feature)
layer.commitChanges() |
def acl_required(permission, context):
"""Returns a decorator that checks if a user has the requested permission
from the passed acl context.
This function constructs a decorator that can be used to check a aiohttp's
view for authorization before calling it. It uses the get_permission()
function to check the request against the passed permission and context. If
the user does not have the correct permission to run this function, it
raises HTTPForbidden.
Args:
permission: The specific permission requested.
context: Either a sequence of ACL tuples, or a callable that returns a
sequence of ACL tuples. For more information on ACL tuples, see
get_permission()
Returns:
A decorator which will check the request passed has the permission for
the given context. The decorator will raise HTTPForbidden if the user
does not have the correct permissions to access the view.
"""
def decorator(func):
@wraps(func)
async def wrapper(*args):
request = args[-1]
if callable(context):
context = context()
if await get_permitted(request, permission, context):
return await func(*args)
raise web.HTTPForbidden()
return wrapper
return decorator | Returns a decorator that checks if a user has the requested permission
from the passed acl context.
This function constructs a decorator that can be used to check a aiohttp's
view for authorization before calling it. It uses the get_permission()
function to check the request against the passed permission and context. If
the user does not have the correct permission to run this function, it
raises HTTPForbidden.
Args:
permission: The specific permission requested.
context: Either a sequence of ACL tuples, or a callable that returns a
sequence of ACL tuples. For more information on ACL tuples, see
get_permission()
Returns:
A decorator which will check the request passed has the permission for
the given context. The decorator will raise HTTPForbidden if the user
does not have the correct permissions to access the view. | Below is the the instruction that describes the task:
### Input:
Returns a decorator that checks if a user has the requested permission
from the passed acl context.
This function constructs a decorator that can be used to check a aiohttp's
view for authorization before calling it. It uses the get_permission()
function to check the request against the passed permission and context. If
the user does not have the correct permission to run this function, it
raises HTTPForbidden.
Args:
permission: The specific permission requested.
context: Either a sequence of ACL tuples, or a callable that returns a
sequence of ACL tuples. For more information on ACL tuples, see
get_permission()
Returns:
A decorator which will check the request passed has the permission for
the given context. The decorator will raise HTTPForbidden if the user
does not have the correct permissions to access the view.
### Response:
def acl_required(permission, context):
"""Returns a decorator that checks if a user has the requested permission
from the passed acl context.
This function constructs a decorator that can be used to check a aiohttp's
view for authorization before calling it. It uses the get_permission()
function to check the request against the passed permission and context. If
the user does not have the correct permission to run this function, it
raises HTTPForbidden.
Args:
permission: The specific permission requested.
context: Either a sequence of ACL tuples, or a callable that returns a
sequence of ACL tuples. For more information on ACL tuples, see
get_permission()
Returns:
A decorator which will check the request passed has the permission for
the given context. The decorator will raise HTTPForbidden if the user
does not have the correct permissions to access the view.
"""
def decorator(func):
@wraps(func)
async def wrapper(*args):
request = args[-1]
if callable(context):
context = context()
if await get_permitted(request, permission, context):
return await func(*args)
raise web.HTTPForbidden()
return wrapper
return decorator |
def user_config_dir():
r"""Return the per-user config dir (full path).
- Linux, *BSD, SunOS: ~/.config/glances
- macOS: ~/Library/Application Support/glances
- Windows: %APPDATA%\glances
"""
if WINDOWS:
path = os.environ.get('APPDATA')
elif MACOS:
path = os.path.expanduser('~/Library/Application Support')
else:
path = os.environ.get('XDG_CONFIG_HOME') or os.path.expanduser('~/.config')
if path is None:
path = ''
else:
path = os.path.join(path, 'glances')
return path | r"""Return the per-user config dir (full path).
- Linux, *BSD, SunOS: ~/.config/glances
- macOS: ~/Library/Application Support/glances
- Windows: %APPDATA%\glances | Below is the the instruction that describes the task:
### Input:
r"""Return the per-user config dir (full path).
- Linux, *BSD, SunOS: ~/.config/glances
- macOS: ~/Library/Application Support/glances
- Windows: %APPDATA%\glances
### Response:
def user_config_dir():
r"""Return the per-user config dir (full path).
- Linux, *BSD, SunOS: ~/.config/glances
- macOS: ~/Library/Application Support/glances
- Windows: %APPDATA%\glances
"""
if WINDOWS:
path = os.environ.get('APPDATA')
elif MACOS:
path = os.path.expanduser('~/Library/Application Support')
else:
path = os.environ.get('XDG_CONFIG_HOME') or os.path.expanduser('~/.config')
if path is None:
path = ''
else:
path = os.path.join(path, 'glances')
return path |
def ziparchive_opener(path, pattern='', verbose=False):
"""Opener that opens files from zip archive..
:param str path: Path.
:param str pattern: Regular expression pattern.
:return: Filehandle(s).
"""
with zipfile.ZipFile(io.BytesIO(urlopen(path).read()), 'r') if is_url(path) else zipfile.ZipFile(path, 'r') as ziparchive:
for zipinfo in ziparchive.infolist():
if not zipinfo.filename.endswith('/'):
source = os.path.join(path, zipinfo.filename)
if pattern and not re.match(pattern, zipinfo.filename):
logger.verbose('Skipping file: {}, did not match regex pattern "{}"'.format(os.path.abspath(zipinfo.filename), pattern))
continue
logger.verbose('Processing file: {}'.format(source))
filehandle = ziparchive.open(zipinfo)
yield filehandle | Opener that opens files from zip archive..
:param str path: Path.
:param str pattern: Regular expression pattern.
:return: Filehandle(s). | Below is the the instruction that describes the task:
### Input:
Opener that opens files from zip archive..
:param str path: Path.
:param str pattern: Regular expression pattern.
:return: Filehandle(s).
### Response:
def ziparchive_opener(path, pattern='', verbose=False):
"""Opener that opens files from zip archive..
:param str path: Path.
:param str pattern: Regular expression pattern.
:return: Filehandle(s).
"""
with zipfile.ZipFile(io.BytesIO(urlopen(path).read()), 'r') if is_url(path) else zipfile.ZipFile(path, 'r') as ziparchive:
for zipinfo in ziparchive.infolist():
if not zipinfo.filename.endswith('/'):
source = os.path.join(path, zipinfo.filename)
if pattern and not re.match(pattern, zipinfo.filename):
logger.verbose('Skipping file: {}, did not match regex pattern "{}"'.format(os.path.abspath(zipinfo.filename), pattern))
continue
logger.verbose('Processing file: {}'.format(source))
filehandle = ziparchive.open(zipinfo)
yield filehandle |
def create_socks_endpoint(self, reactor, socks_config):
"""
Creates a new TorSocksEndpoint instance given a valid
configuration line for ``SocksPort``; if this configuration
isn't already in the underlying tor, we add it. Note that this
method may call :meth:`txtorcon.TorConfig.save()` on this instance.
Note that calling this with `socks_config=None` is equivalent
to calling `.socks_endpoint` (which is not async).
XXX socks_config should be .. i dunno, but there's fucking
options and craziness, e.g. default Tor Browser Bundle is:
['9150 IPv6Traffic PreferIPv6 KeepAliveIsolateSOCKSAuth',
'9155']
XXX maybe we should say "socks_port" as the 3rd arg, insist
it's an int, and then allow/support all the other options
(e.g. via kwargs)
XXX we could avoid the "maybe call .save()" thing; worth it?
(actually, no we can't or the Tor won't have it config'd)
"""
yield self.post_bootstrap
if socks_config is None:
if len(self.SocksPort) == 0:
raise RuntimeError(
"socks_port is None and Tor has no SocksPorts configured"
)
socks_config = self.SocksPort[0]
else:
if not any([socks_config in port for port in self.SocksPort]):
# need to configure Tor
self.SocksPort.append(socks_config)
try:
yield self.save()
except TorProtocolError as e:
extra = ''
if socks_config.startswith('unix:'):
# XXX so why don't we check this for the
# caller, earlier on?
extra = '\nNote Tor has specific ownership/permissions ' +\
'requirements for unix sockets and parent dir.'
raise RuntimeError(
"While configuring SOCKSPort to '{}', error from"
" Tor: {}{}".format(
socks_config, e, extra
)
)
defer.returnValue(
_endpoint_from_socksport_line(reactor, socks_config)
) | Creates a new TorSocksEndpoint instance given a valid
configuration line for ``SocksPort``; if this configuration
isn't already in the underlying tor, we add it. Note that this
method may call :meth:`txtorcon.TorConfig.save()` on this instance.
Note that calling this with `socks_config=None` is equivalent
to calling `.socks_endpoint` (which is not async).
XXX socks_config should be .. i dunno, but there's fucking
options and craziness, e.g. default Tor Browser Bundle is:
['9150 IPv6Traffic PreferIPv6 KeepAliveIsolateSOCKSAuth',
'9155']
XXX maybe we should say "socks_port" as the 3rd arg, insist
it's an int, and then allow/support all the other options
(e.g. via kwargs)
XXX we could avoid the "maybe call .save()" thing; worth it?
(actually, no we can't or the Tor won't have it config'd) | Below is the the instruction that describes the task:
### Input:
Creates a new TorSocksEndpoint instance given a valid
configuration line for ``SocksPort``; if this configuration
isn't already in the underlying tor, we add it. Note that this
method may call :meth:`txtorcon.TorConfig.save()` on this instance.
Note that calling this with `socks_config=None` is equivalent
to calling `.socks_endpoint` (which is not async).
XXX socks_config should be .. i dunno, but there's fucking
options and craziness, e.g. default Tor Browser Bundle is:
['9150 IPv6Traffic PreferIPv6 KeepAliveIsolateSOCKSAuth',
'9155']
XXX maybe we should say "socks_port" as the 3rd arg, insist
it's an int, and then allow/support all the other options
(e.g. via kwargs)
XXX we could avoid the "maybe call .save()" thing; worth it?
(actually, no we can't or the Tor won't have it config'd)
### Response:
def create_socks_endpoint(self, reactor, socks_config):
"""
Creates a new TorSocksEndpoint instance given a valid
configuration line for ``SocksPort``; if this configuration
isn't already in the underlying tor, we add it. Note that this
method may call :meth:`txtorcon.TorConfig.save()` on this instance.
Note that calling this with `socks_config=None` is equivalent
to calling `.socks_endpoint` (which is not async).
XXX socks_config should be .. i dunno, but there's fucking
options and craziness, e.g. default Tor Browser Bundle is:
['9150 IPv6Traffic PreferIPv6 KeepAliveIsolateSOCKSAuth',
'9155']
XXX maybe we should say "socks_port" as the 3rd arg, insist
it's an int, and then allow/support all the other options
(e.g. via kwargs)
XXX we could avoid the "maybe call .save()" thing; worth it?
(actually, no we can't or the Tor won't have it config'd)
"""
yield self.post_bootstrap
if socks_config is None:
if len(self.SocksPort) == 0:
raise RuntimeError(
"socks_port is None and Tor has no SocksPorts configured"
)
socks_config = self.SocksPort[0]
else:
if not any([socks_config in port for port in self.SocksPort]):
# need to configure Tor
self.SocksPort.append(socks_config)
try:
yield self.save()
except TorProtocolError as e:
extra = ''
if socks_config.startswith('unix:'):
# XXX so why don't we check this for the
# caller, earlier on?
extra = '\nNote Tor has specific ownership/permissions ' +\
'requirements for unix sockets and parent dir.'
raise RuntimeError(
"While configuring SOCKSPort to '{}', error from"
" Tor: {}{}".format(
socks_config, e, extra
)
)
defer.returnValue(
_endpoint_from_socksport_line(reactor, socks_config)
) |
def __set_svd(self):
"""private method to set SVD components.
Note: this should not be called directly
"""
if self.isdiagonal:
x = np.diag(self.x.flatten())
else:
# just a pointer to x
x = self.x
try:
u, s, v = la.svd(x, full_matrices=True)
v = v.transpose()
except Exception as e:
print("standard SVD failed: {0}".format(str(e)))
try:
v, s, u = la.svd(x.transpose(), full_matrices=True)
u = u.transpose()
except Exception as e:
np.savetxt("failed_svd.dat",x,fmt="%15.6E")
raise Exception("Matrix.__set_svd(): " +
"unable to compute SVD of self.x, " +
"saved matrix to 'failed_svd.dat' -- {0}".\
format(str(e)))
col_names = ["left_sing_vec_" + str(i + 1) for i in range(u.shape[1])]
self.__u = Matrix(x=u, row_names=self.row_names,
col_names=col_names, autoalign=False)
sing_names = ["sing_val_" + str(i + 1) for i in range(s.shape[0])]
self.__s = Matrix(x=np.atleast_2d(s).transpose(), row_names=sing_names,
col_names=sing_names, isdiagonal=True,
autoalign=False)
col_names = ["right_sing_vec_" + str(i + 1) for i in range(v.shape[0])]
self.__v = Matrix(v, row_names=self.col_names, col_names=col_names,
autoalign=False) | private method to set SVD components.
Note: this should not be called directly | Below is the the instruction that describes the task:
### Input:
private method to set SVD components.
Note: this should not be called directly
### Response:
def __set_svd(self):
"""private method to set SVD components.
Note: this should not be called directly
"""
if self.isdiagonal:
x = np.diag(self.x.flatten())
else:
# just a pointer to x
x = self.x
try:
u, s, v = la.svd(x, full_matrices=True)
v = v.transpose()
except Exception as e:
print("standard SVD failed: {0}".format(str(e)))
try:
v, s, u = la.svd(x.transpose(), full_matrices=True)
u = u.transpose()
except Exception as e:
np.savetxt("failed_svd.dat",x,fmt="%15.6E")
raise Exception("Matrix.__set_svd(): " +
"unable to compute SVD of self.x, " +
"saved matrix to 'failed_svd.dat' -- {0}".\
format(str(e)))
col_names = ["left_sing_vec_" + str(i + 1) for i in range(u.shape[1])]
self.__u = Matrix(x=u, row_names=self.row_names,
col_names=col_names, autoalign=False)
sing_names = ["sing_val_" + str(i + 1) for i in range(s.shape[0])]
self.__s = Matrix(x=np.atleast_2d(s).transpose(), row_names=sing_names,
col_names=sing_names, isdiagonal=True,
autoalign=False)
col_names = ["right_sing_vec_" + str(i + 1) for i in range(v.shape[0])]
self.__v = Matrix(v, row_names=self.col_names, col_names=col_names,
autoalign=False) |
def get_jid(jid):
'''
Return the information returned from a specified jid
'''
log.debug('sqlite3 returner <get_jid> called jid: %s', jid)
conn = _get_conn(ret=None)
cur = conn.cursor()
sql = '''SELECT id, full_ret FROM salt_returns WHERE jid = :jid'''
cur.execute(sql,
{'jid': jid})
data = cur.fetchone()
log.debug('query result: %s', data)
ret = {}
if data and len(data) > 1:
ret = {six.text_type(data[0]): {'return': salt.utils.json.loads(data[1])}}
log.debug('ret: %s', ret)
_close_conn(conn)
return ret | Return the information returned from a specified jid | Below is the the instruction that describes the task:
### Input:
Return the information returned from a specified jid
### Response:
def get_jid(jid):
'''
Return the information returned from a specified jid
'''
log.debug('sqlite3 returner <get_jid> called jid: %s', jid)
conn = _get_conn(ret=None)
cur = conn.cursor()
sql = '''SELECT id, full_ret FROM salt_returns WHERE jid = :jid'''
cur.execute(sql,
{'jid': jid})
data = cur.fetchone()
log.debug('query result: %s', data)
ret = {}
if data and len(data) > 1:
ret = {six.text_type(data[0]): {'return': salt.utils.json.loads(data[1])}}
log.debug('ret: %s', ret)
_close_conn(conn)
return ret |
def _resolve_array_type(self):
"""Return one of the ARRAY_TYPES members of DIMENSION_TYPE.
This method distinguishes between CA and MR dimensions. The return
value is only meaningful if the dimension is known to be of array
type (i.e. either CA or MR, base-type 'enum.variable').
"""
next_raw_dimension = self._next_raw_dimension
if next_raw_dimension is None:
return DT.CA
is_mr_subvar = (
next_raw_dimension._base_type == "categorical"
and next_raw_dimension._has_selected_category
and next_raw_dimension._alias == self._alias
)
return DT.MR if is_mr_subvar else DT.CA | Return one of the ARRAY_TYPES members of DIMENSION_TYPE.
This method distinguishes between CA and MR dimensions. The return
value is only meaningful if the dimension is known to be of array
type (i.e. either CA or MR, base-type 'enum.variable'). | Below is the the instruction that describes the task:
### Input:
Return one of the ARRAY_TYPES members of DIMENSION_TYPE.
This method distinguishes between CA and MR dimensions. The return
value is only meaningful if the dimension is known to be of array
type (i.e. either CA or MR, base-type 'enum.variable').
### Response:
def _resolve_array_type(self):
"""Return one of the ARRAY_TYPES members of DIMENSION_TYPE.
This method distinguishes between CA and MR dimensions. The return
value is only meaningful if the dimension is known to be of array
type (i.e. either CA or MR, base-type 'enum.variable').
"""
next_raw_dimension = self._next_raw_dimension
if next_raw_dimension is None:
return DT.CA
is_mr_subvar = (
next_raw_dimension._base_type == "categorical"
and next_raw_dimension._has_selected_category
and next_raw_dimension._alias == self._alias
)
return DT.MR if is_mr_subvar else DT.CA |
def add_params(endpoint, params):
"""
Combine query endpoint and params.
Example::
>>> add_params("https://www.google.com/search", {"q": "iphone"})
https://www.google.com/search?q=iphone
"""
p = PreparedRequest()
p.prepare(url=endpoint, params=params)
if PY2: # pragma: no cover
return unicode(p.url)
else: # pragma: no cover
return p.url | Combine query endpoint and params.
Example::
>>> add_params("https://www.google.com/search", {"q": "iphone"})
https://www.google.com/search?q=iphone | Below is the the instruction that describes the task:
### Input:
Combine query endpoint and params.
Example::
>>> add_params("https://www.google.com/search", {"q": "iphone"})
https://www.google.com/search?q=iphone
### Response:
def add_params(endpoint, params):
"""
Combine query endpoint and params.
Example::
>>> add_params("https://www.google.com/search", {"q": "iphone"})
https://www.google.com/search?q=iphone
"""
p = PreparedRequest()
p.prepare(url=endpoint, params=params)
if PY2: # pragma: no cover
return unicode(p.url)
else: # pragma: no cover
return p.url |
def first(self):
"""Attempt to retrieve only the first resource matching this request.
:return: Result instance, or `None` if there are no matching resources.
"""
self.params['limit'] = 1
result = self.all()
return result.items[0] if result.total > 0 else None | Attempt to retrieve only the first resource matching this request.
:return: Result instance, or `None` if there are no matching resources. | Below is the the instruction that describes the task:
### Input:
Attempt to retrieve only the first resource matching this request.
:return: Result instance, or `None` if there are no matching resources.
### Response:
def first(self):
"""Attempt to retrieve only the first resource matching this request.
:return: Result instance, or `None` if there are no matching resources.
"""
self.params['limit'] = 1
result = self.all()
return result.items[0] if result.total > 0 else None |
def get_pixels_on_line(self, x1, y1, x2, y2, getvalues=True):
"""Uses Bresenham's line algorithm to enumerate the pixels along
a line.
(see http://en.wikipedia.org/wiki/Bresenham%27s_line_algorithm)
If `getvalues`==False then it will return tuples of (x, y) coordinates
instead of pixel values.
"""
# NOTE: seems to be necessary or we get a non-terminating result
x1, y1, x2, y2 = int(x1), int(y1), int(x2), int(y2)
dx = abs(x2 - x1)
dy = abs(y2 - y1)
if x1 < x2:
sx = 1
else:
sx = -1
if y1 < y2:
sy = 1
else:
sy = -1
err = dx - dy
res = []
x, y = x1, y1
while True:
if getvalues:
try:
val = self.get_data_xy(x, y)
except Exception:
val = np.NaN
res.append(val)
else:
res.append((x, y))
if (x == x2) and (y == y2):
break
e2 = 2 * err
if e2 > -dy:
err = err - dy
x += sx
if e2 < dx:
err = err + dx
y += sy
return res | Uses Bresenham's line algorithm to enumerate the pixels along
a line.
(see http://en.wikipedia.org/wiki/Bresenham%27s_line_algorithm)
If `getvalues`==False then it will return tuples of (x, y) coordinates
instead of pixel values. | Below is the the instruction that describes the task:
### Input:
Uses Bresenham's line algorithm to enumerate the pixels along
a line.
(see http://en.wikipedia.org/wiki/Bresenham%27s_line_algorithm)
If `getvalues`==False then it will return tuples of (x, y) coordinates
instead of pixel values.
### Response:
def get_pixels_on_line(self, x1, y1, x2, y2, getvalues=True):
"""Uses Bresenham's line algorithm to enumerate the pixels along
a line.
(see http://en.wikipedia.org/wiki/Bresenham%27s_line_algorithm)
If `getvalues`==False then it will return tuples of (x, y) coordinates
instead of pixel values.
"""
# NOTE: seems to be necessary or we get a non-terminating result
x1, y1, x2, y2 = int(x1), int(y1), int(x2), int(y2)
dx = abs(x2 - x1)
dy = abs(y2 - y1)
if x1 < x2:
sx = 1
else:
sx = -1
if y1 < y2:
sy = 1
else:
sy = -1
err = dx - dy
res = []
x, y = x1, y1
while True:
if getvalues:
try:
val = self.get_data_xy(x, y)
except Exception:
val = np.NaN
res.append(val)
else:
res.append((x, y))
if (x == x2) and (y == y2):
break
e2 = 2 * err
if e2 > -dy:
err = err - dy
x += sx
if e2 < dx:
err = err + dx
y += sy
return res |
def asn(self, ip, announce_date=None):
"""
Give an IP, maybe a date, get the ASN.
This is the fastest command.
:param ip: IP address to search for
:param announce_date: Date of the announcement
:rtype: String, ASN.
"""
assignations, announce_date, _ = self.run(ip, announce_date)
return next((assign for assign in assignations if assign is not None), None), announce_date | Give an IP, maybe a date, get the ASN.
This is the fastest command.
:param ip: IP address to search for
:param announce_date: Date of the announcement
:rtype: String, ASN. | Below is the the instruction that describes the task:
### Input:
Give an IP, maybe a date, get the ASN.
This is the fastest command.
:param ip: IP address to search for
:param announce_date: Date of the announcement
:rtype: String, ASN.
### Response:
def asn(self, ip, announce_date=None):
"""
Give an IP, maybe a date, get the ASN.
This is the fastest command.
:param ip: IP address to search for
:param announce_date: Date of the announcement
:rtype: String, ASN.
"""
assignations, announce_date, _ = self.run(ip, announce_date)
return next((assign for assign in assignations if assign is not None), None), announce_date |
def alignmentPanelHTML(titlesAlignments, sortOn='maxScore',
outputDir=None, idList=False, equalizeXAxes=False,
xRange='subject', logLinearXAxis=False,
logBase=DEFAULT_LOG_LINEAR_X_AXIS_BASE,
rankScores=False, showFeatures=True, showOrfs=True):
"""
Produces an HTML index file in C{outputDir} and a collection of alignment
graphs and FASTA files to summarize the information in C{titlesAlignments}.
@param titlesAlignments: A L{dark.titles.TitlesAlignments} instance.
@param sortOn: The attribute to sort subplots on. Either "maxScore",
"medianScore", "readCount", "length", or "title".
@param outputDir: Specifies a C{str} directory to write the HTML to. If
the directory does not exist it will be created.
@param idList: A dictionary. Keys are colors and values are lists of read
ids that should be colored using that color.
@param equalizeXAxes: If C{True}, adjust the X axis on each alignment plot
to be the same.
@param xRange: Set to either 'subject' or 'reads' to indicate the range of
the X axis.
@param logLinearXAxis: If C{True}, convert read offsets so that empty
regions in the plots we're preparing will only be as wide as their
logged actual values.
@param logBase: The logarithm base to use if logLinearXAxis is C{True}.
@param: rankScores: If C{True}, change the scores for the reads for each
title to be their rank (worst to best).
@param showFeatures: If C{True}, look online for features of the subject
sequences.
@param showOrfs: If C{True}, open reading frames will be displayed.
@raise TypeError: If C{outputDir} is C{None}.
@raise ValueError: If C{outputDir} is None or exists but is not a
directory or if C{xRange} is not "subject" or "reads".
"""
if xRange not in ('subject', 'reads'):
raise ValueError('xRange must be either "subject" or "reads".')
if equalizeXAxes:
raise NotImplementedError('This feature is not yet implemented.')
titles = titlesAlignments.sortTitles(sortOn)
if os.access(outputDir, os.F_OK):
# outputDir exists. Check it's a directory.
if not S_ISDIR(os.stat(outputDir).st_mode):
raise ValueError("%r is not a directory." % outputDir)
else:
if outputDir is None:
raise ValueError("The outputDir needs to be specified.")
else:
os.mkdir(outputDir)
htmlWriter = AlignmentPanelHTMLWriter(outputDir, titlesAlignments)
for i, title in enumerate(titles):
# titleAlignments = titlesAlignments[title]
# If we are writing data to a file too, create a separate file with
# a plot (this will be linked from the summary HTML).
imageBasename = '%d.png' % i
imageFile = '%s/%s' % (outputDir, imageBasename)
graphInfo = alignmentGraph(
titlesAlignments, title, addQueryLines=True,
showFeatures=showFeatures, rankScores=rankScores,
logLinearXAxis=logLinearXAxis, logBase=logBase,
colorQueryBases=False, showFigure=False, imageFile=imageFile,
quiet=True, idList=idList, xRange=xRange, showOrfs=showOrfs)
# Close the image plot to make sure memory is flushed.
plt.close()
htmlWriter.addImage(imageBasename, title, graphInfo)
htmlWriter.close() | Produces an HTML index file in C{outputDir} and a collection of alignment
graphs and FASTA files to summarize the information in C{titlesAlignments}.
@param titlesAlignments: A L{dark.titles.TitlesAlignments} instance.
@param sortOn: The attribute to sort subplots on. Either "maxScore",
"medianScore", "readCount", "length", or "title".
@param outputDir: Specifies a C{str} directory to write the HTML to. If
the directory does not exist it will be created.
@param idList: A dictionary. Keys are colors and values are lists of read
ids that should be colored using that color.
@param equalizeXAxes: If C{True}, adjust the X axis on each alignment plot
to be the same.
@param xRange: Set to either 'subject' or 'reads' to indicate the range of
the X axis.
@param logLinearXAxis: If C{True}, convert read offsets so that empty
regions in the plots we're preparing will only be as wide as their
logged actual values.
@param logBase: The logarithm base to use if logLinearXAxis is C{True}.
@param: rankScores: If C{True}, change the scores for the reads for each
title to be their rank (worst to best).
@param showFeatures: If C{True}, look online for features of the subject
sequences.
@param showOrfs: If C{True}, open reading frames will be displayed.
@raise TypeError: If C{outputDir} is C{None}.
@raise ValueError: If C{outputDir} is None or exists but is not a
directory or if C{xRange} is not "subject" or "reads". | Below is the the instruction that describes the task:
### Input:
Produces an HTML index file in C{outputDir} and a collection of alignment
graphs and FASTA files to summarize the information in C{titlesAlignments}.
@param titlesAlignments: A L{dark.titles.TitlesAlignments} instance.
@param sortOn: The attribute to sort subplots on. Either "maxScore",
"medianScore", "readCount", "length", or "title".
@param outputDir: Specifies a C{str} directory to write the HTML to. If
the directory does not exist it will be created.
@param idList: A dictionary. Keys are colors and values are lists of read
ids that should be colored using that color.
@param equalizeXAxes: If C{True}, adjust the X axis on each alignment plot
to be the same.
@param xRange: Set to either 'subject' or 'reads' to indicate the range of
the X axis.
@param logLinearXAxis: If C{True}, convert read offsets so that empty
regions in the plots we're preparing will only be as wide as their
logged actual values.
@param logBase: The logarithm base to use if logLinearXAxis is C{True}.
@param: rankScores: If C{True}, change the scores for the reads for each
title to be their rank (worst to best).
@param showFeatures: If C{True}, look online for features of the subject
sequences.
@param showOrfs: If C{True}, open reading frames will be displayed.
@raise TypeError: If C{outputDir} is C{None}.
@raise ValueError: If C{outputDir} is None or exists but is not a
directory or if C{xRange} is not "subject" or "reads".
### Response:
def alignmentPanelHTML(titlesAlignments, sortOn='maxScore',
outputDir=None, idList=False, equalizeXAxes=False,
xRange='subject', logLinearXAxis=False,
logBase=DEFAULT_LOG_LINEAR_X_AXIS_BASE,
rankScores=False, showFeatures=True, showOrfs=True):
"""
Produces an HTML index file in C{outputDir} and a collection of alignment
graphs and FASTA files to summarize the information in C{titlesAlignments}.
@param titlesAlignments: A L{dark.titles.TitlesAlignments} instance.
@param sortOn: The attribute to sort subplots on. Either "maxScore",
"medianScore", "readCount", "length", or "title".
@param outputDir: Specifies a C{str} directory to write the HTML to. If
the directory does not exist it will be created.
@param idList: A dictionary. Keys are colors and values are lists of read
ids that should be colored using that color.
@param equalizeXAxes: If C{True}, adjust the X axis on each alignment plot
to be the same.
@param xRange: Set to either 'subject' or 'reads' to indicate the range of
the X axis.
@param logLinearXAxis: If C{True}, convert read offsets so that empty
regions in the plots we're preparing will only be as wide as their
logged actual values.
@param logBase: The logarithm base to use if logLinearXAxis is C{True}.
@param: rankScores: If C{True}, change the scores for the reads for each
title to be their rank (worst to best).
@param showFeatures: If C{True}, look online for features of the subject
sequences.
@param showOrfs: If C{True}, open reading frames will be displayed.
@raise TypeError: If C{outputDir} is C{None}.
@raise ValueError: If C{outputDir} is None or exists but is not a
directory or if C{xRange} is not "subject" or "reads".
"""
if xRange not in ('subject', 'reads'):
raise ValueError('xRange must be either "subject" or "reads".')
if equalizeXAxes:
raise NotImplementedError('This feature is not yet implemented.')
titles = titlesAlignments.sortTitles(sortOn)
if os.access(outputDir, os.F_OK):
# outputDir exists. Check it's a directory.
if not S_ISDIR(os.stat(outputDir).st_mode):
raise ValueError("%r is not a directory." % outputDir)
else:
if outputDir is None:
raise ValueError("The outputDir needs to be specified.")
else:
os.mkdir(outputDir)
htmlWriter = AlignmentPanelHTMLWriter(outputDir, titlesAlignments)
for i, title in enumerate(titles):
# titleAlignments = titlesAlignments[title]
# If we are writing data to a file too, create a separate file with
# a plot (this will be linked from the summary HTML).
imageBasename = '%d.png' % i
imageFile = '%s/%s' % (outputDir, imageBasename)
graphInfo = alignmentGraph(
titlesAlignments, title, addQueryLines=True,
showFeatures=showFeatures, rankScores=rankScores,
logLinearXAxis=logLinearXAxis, logBase=logBase,
colorQueryBases=False, showFigure=False, imageFile=imageFile,
quiet=True, idList=idList, xRange=xRange, showOrfs=showOrfs)
# Close the image plot to make sure memory is flushed.
plt.close()
htmlWriter.addImage(imageBasename, title, graphInfo)
htmlWriter.close() |
def crossValidation(self,seed=0,n_folds=10,fullVector=True,verbose=None,D=None,**keywords):
"""
Split the dataset in n folds, predict each fold after training the model on all the others
Args:
seed: seed
n_folds: number of folds to train the model on
fullVector: Bolean indicator, if true it stops if no convergence is observed for one of the folds, otherwise goes through and returns a pheno matrix with missing values
verbose: if true, prints the fold that is being used for predicitons
**keywords: params to pass to the function optimize
Returns:
Matrix of phenotype predictions [N,P]
"""
verbose = dlimix.getVerbose(verbose)
# split samples into training and test
sp.random.seed(seed)
r = sp.random.permutation(self.Y.shape[0])
nfolds = 10
Icv = sp.floor(((sp.ones((self.Y.shape[0]))*nfolds)*r)/self.Y.shape[0])
RV = {}
if self.P==1: RV['var'] = sp.zeros((nfolds,self.n_randEffs))
else: RV['var'] = sp.zeros((nfolds,self.P,self.n_randEffs))
Ystar = sp.zeros_like(self.Y)
for fold_j in range(n_folds):
if verbose:
print((".. predict fold %d"%fold_j))
Itrain = Icv!=fold_j
Itest = Icv==fold_j
Ytrain = self.Y[Itrain,:]
Ytest = self.Y[Itest,:]
vc = VarianceDecomposition(Ytrain)
vc.setTestSampleSize(Itest.sum())
for term_i in range(self.n_fixedEffs):
F = self.vd.getFixed(term_i)
Ftest = F[Itest,:]
Ftrain = F[Itrain,:]
if self.P>1: A = self.vd.getDesign(term_i)
else: A = None
vc.addFixedEffect(F=Ftrain,Ftest=Ftest,A=A)
for term_i in range(self.n_randEffs):
if self.P>1:
tct = self.trait_covar_type[term_i]
rank = self.rank[term_i]
ftc = self.fixed_tc[term_i]
jitt = self.jitter[term_i]
if tct=='lowrank_diag1' or tct=='freeform1':
d = D[fold_j,:,term_i]
else:
d = None
else:
tct = None
rank = None
ftc = None
jitt = None
d = None
if term_i==self.noisPos:
vc.addRandomEffect(is_noise=True,trait_covar_type=tct,rank=rank,jitter=jitt,fixed_trait_covar=ftc,d=d)
else:
R = self.vd.getTerm(term_i).getK()
Rtrain = R[Itrain,:][:,Itrain]
Rcross = R[Itrain,:][:,Itest]
vc.addRandomEffect(K=Rtrain,Kcross=Rcross,trait_covar_type=tct,rank=rank,jitter=jitt,fixed_trait_covar=ftc,d=d)
conv = vc.optimize(verbose=False,**keywords)
if self.P==1:
RV['var'][fold_j,:] = vc.getVarianceComps()[0,:]
else:
RV['var'][fold_j,:,:] = vc.getVarianceComps()
if fullVector:
assert conv, 'VarianceDecompositon:: not converged for fold %d. Stopped here' % fold_j
if conv:
Ystar[Itest,:] = vc.predictPhenos()
else:
warnings.warn('not converged for fold %d' % fold_j)
Ystar[Itest,:] = sp.nan
return Ystar,RV | Split the dataset in n folds, predict each fold after training the model on all the others
Args:
seed: seed
n_folds: number of folds to train the model on
fullVector: Bolean indicator, if true it stops if no convergence is observed for one of the folds, otherwise goes through and returns a pheno matrix with missing values
verbose: if true, prints the fold that is being used for predicitons
**keywords: params to pass to the function optimize
Returns:
Matrix of phenotype predictions [N,P] | Below is the the instruction that describes the task:
### Input:
Split the dataset in n folds, predict each fold after training the model on all the others
Args:
seed: seed
n_folds: number of folds to train the model on
fullVector: Bolean indicator, if true it stops if no convergence is observed for one of the folds, otherwise goes through and returns a pheno matrix with missing values
verbose: if true, prints the fold that is being used for predicitons
**keywords: params to pass to the function optimize
Returns:
Matrix of phenotype predictions [N,P]
### Response:
def crossValidation(self,seed=0,n_folds=10,fullVector=True,verbose=None,D=None,**keywords):
"""
Split the dataset in n folds, predict each fold after training the model on all the others
Args:
seed: seed
n_folds: number of folds to train the model on
fullVector: Bolean indicator, if true it stops if no convergence is observed for one of the folds, otherwise goes through and returns a pheno matrix with missing values
verbose: if true, prints the fold that is being used for predicitons
**keywords: params to pass to the function optimize
Returns:
Matrix of phenotype predictions [N,P]
"""
verbose = dlimix.getVerbose(verbose)
# split samples into training and test
sp.random.seed(seed)
r = sp.random.permutation(self.Y.shape[0])
nfolds = 10
Icv = sp.floor(((sp.ones((self.Y.shape[0]))*nfolds)*r)/self.Y.shape[0])
RV = {}
if self.P==1: RV['var'] = sp.zeros((nfolds,self.n_randEffs))
else: RV['var'] = sp.zeros((nfolds,self.P,self.n_randEffs))
Ystar = sp.zeros_like(self.Y)
for fold_j in range(n_folds):
if verbose:
print((".. predict fold %d"%fold_j))
Itrain = Icv!=fold_j
Itest = Icv==fold_j
Ytrain = self.Y[Itrain,:]
Ytest = self.Y[Itest,:]
vc = VarianceDecomposition(Ytrain)
vc.setTestSampleSize(Itest.sum())
for term_i in range(self.n_fixedEffs):
F = self.vd.getFixed(term_i)
Ftest = F[Itest,:]
Ftrain = F[Itrain,:]
if self.P>1: A = self.vd.getDesign(term_i)
else: A = None
vc.addFixedEffect(F=Ftrain,Ftest=Ftest,A=A)
for term_i in range(self.n_randEffs):
if self.P>1:
tct = self.trait_covar_type[term_i]
rank = self.rank[term_i]
ftc = self.fixed_tc[term_i]
jitt = self.jitter[term_i]
if tct=='lowrank_diag1' or tct=='freeform1':
d = D[fold_j,:,term_i]
else:
d = None
else:
tct = None
rank = None
ftc = None
jitt = None
d = None
if term_i==self.noisPos:
vc.addRandomEffect(is_noise=True,trait_covar_type=tct,rank=rank,jitter=jitt,fixed_trait_covar=ftc,d=d)
else:
R = self.vd.getTerm(term_i).getK()
Rtrain = R[Itrain,:][:,Itrain]
Rcross = R[Itrain,:][:,Itest]
vc.addRandomEffect(K=Rtrain,Kcross=Rcross,trait_covar_type=tct,rank=rank,jitter=jitt,fixed_trait_covar=ftc,d=d)
conv = vc.optimize(verbose=False,**keywords)
if self.P==1:
RV['var'][fold_j,:] = vc.getVarianceComps()[0,:]
else:
RV['var'][fold_j,:,:] = vc.getVarianceComps()
if fullVector:
assert conv, 'VarianceDecompositon:: not converged for fold %d. Stopped here' % fold_j
if conv:
Ystar[Itest,:] = vc.predictPhenos()
else:
warnings.warn('not converged for fold %d' % fold_j)
Ystar[Itest,:] = sp.nan
return Ystar,RV |
def encode_request(name, expected, updated):
""" Encode request into client_message"""
client_message = ClientMessage(payload_size=calculate_size(name, expected, updated))
client_message.set_message_type(REQUEST_TYPE)
client_message.set_retryable(RETRYABLE)
client_message.append_str(name)
client_message.append_bool(expected is None)
if expected is not None:
client_message.append_data(expected)
client_message.append_bool(updated is None)
if updated is not None:
client_message.append_data(updated)
client_message.update_frame_length()
return client_message | Encode request into client_message | Below is the the instruction that describes the task:
### Input:
Encode request into client_message
### Response:
def encode_request(name, expected, updated):
""" Encode request into client_message"""
client_message = ClientMessage(payload_size=calculate_size(name, expected, updated))
client_message.set_message_type(REQUEST_TYPE)
client_message.set_retryable(RETRYABLE)
client_message.append_str(name)
client_message.append_bool(expected is None)
if expected is not None:
client_message.append_data(expected)
client_message.append_bool(updated is None)
if updated is not None:
client_message.append_data(updated)
client_message.update_frame_length()
return client_message |
def _check_contraint(self, edge1, edge2):
"""Check if two edges satisfy vine constraint.
Args:
:param edge1: edge object representing edge1
:param edge2: edge object representing edge2
:type edge1: Edge object
:type edge2: Edge object
Returns:
Boolean True if the two edges satisfy vine constraints
"""
full_node = set([edge1.L, edge1.R, edge2.L, edge2.R])
full_node.update(edge1.D)
full_node.update(edge2.D)
return len(full_node) == (self.level + 1) | Check if two edges satisfy vine constraint.
Args:
:param edge1: edge object representing edge1
:param edge2: edge object representing edge2
:type edge1: Edge object
:type edge2: Edge object
Returns:
Boolean True if the two edges satisfy vine constraints | Below is the the instruction that describes the task:
### Input:
Check if two edges satisfy vine constraint.
Args:
:param edge1: edge object representing edge1
:param edge2: edge object representing edge2
:type edge1: Edge object
:type edge2: Edge object
Returns:
Boolean True if the two edges satisfy vine constraints
### Response:
def _check_contraint(self, edge1, edge2):
"""Check if two edges satisfy vine constraint.
Args:
:param edge1: edge object representing edge1
:param edge2: edge object representing edge2
:type edge1: Edge object
:type edge2: Edge object
Returns:
Boolean True if the two edges satisfy vine constraints
"""
full_node = set([edge1.L, edge1.R, edge2.L, edge2.R])
full_node.update(edge1.D)
full_node.update(edge2.D)
return len(full_node) == (self.level + 1) |
def _xfs_prune_output(out, uuid):
'''
Parse prune output.
'''
data = {}
cnt = []
cutpoint = False
for line in [l.strip() for l in out.split("\n") if l]:
if line.startswith("-"):
if cutpoint:
break
else:
cutpoint = True
continue
if cutpoint:
cnt.append(line)
for kset in [e for e in cnt[1:] if ':' in e]:
key, val = [t.strip() for t in kset.split(":", 1)]
data[key.lower().replace(" ", "_")] = val
return data.get('uuid') == uuid and data or {} | Parse prune output. | Below is the the instruction that describes the task:
### Input:
Parse prune output.
### Response:
def _xfs_prune_output(out, uuid):
'''
Parse prune output.
'''
data = {}
cnt = []
cutpoint = False
for line in [l.strip() for l in out.split("\n") if l]:
if line.startswith("-"):
if cutpoint:
break
else:
cutpoint = True
continue
if cutpoint:
cnt.append(line)
for kset in [e for e in cnt[1:] if ':' in e]:
key, val = [t.strip() for t in kset.split(":", 1)]
data[key.lower().replace(" ", "_")] = val
return data.get('uuid') == uuid and data or {} |
def symb_to_block(symb, coupling = 'full'):
"""
Maps a symbolic factorization to a block-diagonal structure with
coupling constraints.
:param symb: :py:class:`symbolic`
:param coupling: optional
:return dims: list of block dimensions
:return sparse_to_block: dictionary
:return constraints: list of coupling constraints
"""
n = len(symb.snode) # order of block
Ncliques = len(symb.snpar) # number of cliques
# compute clique orders
dims = [symb.sncolptr[j+1]-symb.sncolptr[j] for j in range(Ncliques)]
# compute offsets in block-diagonal structure
offsets = [0]
for i in range(Ncliques): offsets.append(offsets[-1] + dims[i]**2)
constraints = [] # list of coupling constraints
sparse_to_block = {} # conversion dictionary
for k in range(Ncliques):
# map nonzeros in {Jk,Nk} part of clique k to block-diagonal structure
nodes = symb.snode[symb.snptr[k]:symb.snptr[k+1]]
rows = symb.snrowidx[symb.sncolptr[k]:symb.sncolptr[k+1]]
nk = len(nodes) # number of nodes in supernode
wk = len(rows) # number of nodes in clique
for j in range(nk):
for i in range(j,wk):
if i == j:
sparse_to_block[nodes[j]*n + rows[i]] = (offsets[k] + j*wk + i,)
else:
sparse_to_block[nodes[j]*n + rows[i]] =(offsets[k] + j*wk + i, offsets[k] + i*wk + j)
# add coupling constraints to list of constraints
if symb.snpar[k] == k: continue # skip if supernode k is a root supernode
p = symb.snpar[k]
np = len(symb.snode[symb.snptr[p]:symb.snptr[p+1]])
wp = symb.sncolptr[p+1] - symb.sncolptr[p]
ri = symb.relidx[symb.relptr[k]:symb.relptr[k+1]]
if type(coupling) is spmatrix:
tmp = coupling[rows[nk:],rows[nk:]]
for i,j in zip(tmp.I,tmp.J):
if j == i:
constraints.append((offsets[k] + (j+nk)*wk + i+nk,
offsets[p] + ri[j]*wp + ri[i]))
else:
constraints.append((offsets[k] + (j+nk)*wk + i+nk,
offsets[p] + ri[j]*wp + ri[i],
offsets[k] + (i+nk)*wk + j+nk,
offsets[p] + ri[i]*wp + ri[j]))
elif coupling == 'full':
for j in range(len(ri)):
for i in range(j,len(ri)):
if j == i:
constraints.append((offsets[k] + (j+nk)*wk + i+nk,
offsets[p] + ri[j]*wp + ri[i]))
else:
constraints.append((offsets[k] + (j+nk)*wk + i+nk,
offsets[p] + ri[j]*wp + ri[i],
offsets[k] + (i+nk)*wk + j+nk,
offsets[p] + ri[i]*wp + ri[j]))
return dims, sparse_to_block, constraints | Maps a symbolic factorization to a block-diagonal structure with
coupling constraints.
:param symb: :py:class:`symbolic`
:param coupling: optional
:return dims: list of block dimensions
:return sparse_to_block: dictionary
:return constraints: list of coupling constraints | Below is the the instruction that describes the task:
### Input:
Maps a symbolic factorization to a block-diagonal structure with
coupling constraints.
:param symb: :py:class:`symbolic`
:param coupling: optional
:return dims: list of block dimensions
:return sparse_to_block: dictionary
:return constraints: list of coupling constraints
### Response:
def symb_to_block(symb, coupling = 'full'):
"""
Maps a symbolic factorization to a block-diagonal structure with
coupling constraints.
:param symb: :py:class:`symbolic`
:param coupling: optional
:return dims: list of block dimensions
:return sparse_to_block: dictionary
:return constraints: list of coupling constraints
"""
n = len(symb.snode) # order of block
Ncliques = len(symb.snpar) # number of cliques
# compute clique orders
dims = [symb.sncolptr[j+1]-symb.sncolptr[j] for j in range(Ncliques)]
# compute offsets in block-diagonal structure
offsets = [0]
for i in range(Ncliques): offsets.append(offsets[-1] + dims[i]**2)
constraints = [] # list of coupling constraints
sparse_to_block = {} # conversion dictionary
for k in range(Ncliques):
# map nonzeros in {Jk,Nk} part of clique k to block-diagonal structure
nodes = symb.snode[symb.snptr[k]:symb.snptr[k+1]]
rows = symb.snrowidx[symb.sncolptr[k]:symb.sncolptr[k+1]]
nk = len(nodes) # number of nodes in supernode
wk = len(rows) # number of nodes in clique
for j in range(nk):
for i in range(j,wk):
if i == j:
sparse_to_block[nodes[j]*n + rows[i]] = (offsets[k] + j*wk + i,)
else:
sparse_to_block[nodes[j]*n + rows[i]] =(offsets[k] + j*wk + i, offsets[k] + i*wk + j)
# add coupling constraints to list of constraints
if symb.snpar[k] == k: continue # skip if supernode k is a root supernode
p = symb.snpar[k]
np = len(symb.snode[symb.snptr[p]:symb.snptr[p+1]])
wp = symb.sncolptr[p+1] - symb.sncolptr[p]
ri = symb.relidx[symb.relptr[k]:symb.relptr[k+1]]
if type(coupling) is spmatrix:
tmp = coupling[rows[nk:],rows[nk:]]
for i,j in zip(tmp.I,tmp.J):
if j == i:
constraints.append((offsets[k] + (j+nk)*wk + i+nk,
offsets[p] + ri[j]*wp + ri[i]))
else:
constraints.append((offsets[k] + (j+nk)*wk + i+nk,
offsets[p] + ri[j]*wp + ri[i],
offsets[k] + (i+nk)*wk + j+nk,
offsets[p] + ri[i]*wp + ri[j]))
elif coupling == 'full':
for j in range(len(ri)):
for i in range(j,len(ri)):
if j == i:
constraints.append((offsets[k] + (j+nk)*wk + i+nk,
offsets[p] + ri[j]*wp + ri[i]))
else:
constraints.append((offsets[k] + (j+nk)*wk + i+nk,
offsets[p] + ri[j]*wp + ri[i],
offsets[k] + (i+nk)*wk + j+nk,
offsets[p] + ri[i]*wp + ri[j]))
return dims, sparse_to_block, constraints |
def has_add_permission(self, request):
""" Can add this object """
return request.user.is_authenticated and request.user.is_active and request.user.is_staff | Can add this object | Below is the the instruction that describes the task:
### Input:
Can add this object
### Response:
def has_add_permission(self, request):
""" Can add this object """
return request.user.is_authenticated and request.user.is_active and request.user.is_staff |
def drop(self, relation):
"""Drop the named relation and cascade it appropriately to all
dependent relations.
Because dbt proactively does many `drop relation if exist ... cascade`
that are noops, nonexistent relation drops cause a debug log and no
other actions.
:param str schema: The schema of the relation to drop.
:param str identifier: The identifier of the relation to drop.
"""
dropped = _make_key(relation)
logger.debug('Dropping relation: {!s}'.format(dropped))
with self.lock:
self._drop_cascade_relation(dropped) | Drop the named relation and cascade it appropriately to all
dependent relations.
Because dbt proactively does many `drop relation if exist ... cascade`
that are noops, nonexistent relation drops cause a debug log and no
other actions.
:param str schema: The schema of the relation to drop.
:param str identifier: The identifier of the relation to drop. | Below is the the instruction that describes the task:
### Input:
Drop the named relation and cascade it appropriately to all
dependent relations.
Because dbt proactively does many `drop relation if exist ... cascade`
that are noops, nonexistent relation drops cause a debug log and no
other actions.
:param str schema: The schema of the relation to drop.
:param str identifier: The identifier of the relation to drop.
### Response:
def drop(self, relation):
"""Drop the named relation and cascade it appropriately to all
dependent relations.
Because dbt proactively does many `drop relation if exist ... cascade`
that are noops, nonexistent relation drops cause a debug log and no
other actions.
:param str schema: The schema of the relation to drop.
:param str identifier: The identifier of the relation to drop.
"""
dropped = _make_key(relation)
logger.debug('Dropping relation: {!s}'.format(dropped))
with self.lock:
self._drop_cascade_relation(dropped) |
def _as_decode(self, msg):
"""AS: Arming status report."""
return {'armed_statuses': [x for x in msg[4:12]],
'arm_up_states': [x for x in msg[12:20]],
'alarm_states': [x for x in msg[20:28]]} | AS: Arming status report. | Below is the the instruction that describes the task:
### Input:
AS: Arming status report.
### Response:
def _as_decode(self, msg):
"""AS: Arming status report."""
return {'armed_statuses': [x for x in msg[4:12]],
'arm_up_states': [x for x in msg[12:20]],
'alarm_states': [x for x in msg[20:28]]} |
def grab_checksums_file(entry):
"""Grab the checksum file for a given entry."""
http_url = convert_ftp_url(entry['ftp_path'])
full_url = '{}/md5checksums.txt'.format(http_url)
req = requests.get(full_url)
return req.text | Grab the checksum file for a given entry. | Below is the the instruction that describes the task:
### Input:
Grab the checksum file for a given entry.
### Response:
def grab_checksums_file(entry):
"""Grab the checksum file for a given entry."""
http_url = convert_ftp_url(entry['ftp_path'])
full_url = '{}/md5checksums.txt'.format(http_url)
req = requests.get(full_url)
return req.text |
def verify_token_string(self,
token_string,
action=None,
timeout=None,
current_time=None):
"""Generate a hash of the given token contents that can be verified.
:param token_string:
A string containing the hashed token (generated by
`generate_token_string`).
:param action:
A string containing the action that is being verified.
:param timeout:
An int or float representing the number of seconds that the token
is valid for. If None then tokens are valid forever.
:current_time:
An int representing the number of seconds since the epoch. Will be
used by to check for token expiry if `timeout` is set. If `None`
then the current time will be used.
:raises:
XSRFTokenMalformed if the given token_string cannot be parsed.
XSRFTokenExpiredException if the given token string is expired.
XSRFTokenInvalid if the given token string does not match the
contents of the `XSRFToken`.
"""
try:
decoded_token_string = base64.urlsafe_b64decode(token_string)
except TypeError:
raise XSRFTokenMalformed()
split_token = decoded_token_string.split(self._DELIMITER)
if len(split_token) != 2:
raise XSRFTokenMalformed()
try:
token_time = int(split_token[1])
except ValueError:
raise XSRFTokenMalformed()
if timeout is not None:
if current_time is None:
current_time = time.time()
# If an attacker modifies the plain text time then it will not match
# the hashed time so this check is sufficient.
if (token_time + timeout) < current_time:
raise XSRFTokenExpiredException()
expected_token = XSRFToken(self.user_id, self.secret, token_time)
expected_token_string = expected_token.generate_token_string(action)
if len(expected_token_string) != len(token_string):
raise XSRFTokenInvalid()
# Compare the two strings in constant time to prevent timing attacks.
different = 0
for a, b in zip(token_string, expected_token_string):
different |= ord(a) ^ ord(b)
if different:
raise XSRFTokenInvalid() | Generate a hash of the given token contents that can be verified.
:param token_string:
A string containing the hashed token (generated by
`generate_token_string`).
:param action:
A string containing the action that is being verified.
:param timeout:
An int or float representing the number of seconds that the token
is valid for. If None then tokens are valid forever.
:current_time:
An int representing the number of seconds since the epoch. Will be
used by to check for token expiry if `timeout` is set. If `None`
then the current time will be used.
:raises:
XSRFTokenMalformed if the given token_string cannot be parsed.
XSRFTokenExpiredException if the given token string is expired.
XSRFTokenInvalid if the given token string does not match the
contents of the `XSRFToken`. | Below is the the instruction that describes the task:
### Input:
Generate a hash of the given token contents that can be verified.
:param token_string:
A string containing the hashed token (generated by
`generate_token_string`).
:param action:
A string containing the action that is being verified.
:param timeout:
An int or float representing the number of seconds that the token
is valid for. If None then tokens are valid forever.
:current_time:
An int representing the number of seconds since the epoch. Will be
used by to check for token expiry if `timeout` is set. If `None`
then the current time will be used.
:raises:
XSRFTokenMalformed if the given token_string cannot be parsed.
XSRFTokenExpiredException if the given token string is expired.
XSRFTokenInvalid if the given token string does not match the
contents of the `XSRFToken`.
### Response:
def verify_token_string(self,
token_string,
action=None,
timeout=None,
current_time=None):
"""Generate a hash of the given token contents that can be verified.
:param token_string:
A string containing the hashed token (generated by
`generate_token_string`).
:param action:
A string containing the action that is being verified.
:param timeout:
An int or float representing the number of seconds that the token
is valid for. If None then tokens are valid forever.
:current_time:
An int representing the number of seconds since the epoch. Will be
used by to check for token expiry if `timeout` is set. If `None`
then the current time will be used.
:raises:
XSRFTokenMalformed if the given token_string cannot be parsed.
XSRFTokenExpiredException if the given token string is expired.
XSRFTokenInvalid if the given token string does not match the
contents of the `XSRFToken`.
"""
try:
decoded_token_string = base64.urlsafe_b64decode(token_string)
except TypeError:
raise XSRFTokenMalformed()
split_token = decoded_token_string.split(self._DELIMITER)
if len(split_token) != 2:
raise XSRFTokenMalformed()
try:
token_time = int(split_token[1])
except ValueError:
raise XSRFTokenMalformed()
if timeout is not None:
if current_time is None:
current_time = time.time()
# If an attacker modifies the plain text time then it will not match
# the hashed time so this check is sufficient.
if (token_time + timeout) < current_time:
raise XSRFTokenExpiredException()
expected_token = XSRFToken(self.user_id, self.secret, token_time)
expected_token_string = expected_token.generate_token_string(action)
if len(expected_token_string) != len(token_string):
raise XSRFTokenInvalid()
# Compare the two strings in constant time to prevent timing attacks.
different = 0
for a, b in zip(token_string, expected_token_string):
different |= ord(a) ^ ord(b)
if different:
raise XSRFTokenInvalid() |
def validate_reference(self, reference: ReferenceDefinitionType) -> Optional[Path]:
""" Converts reference to :class:`Path <pathlib.Path>`
:raise ValueError: If ``reference`` can't be converted to :class:`Path <pathlib.Path>`.
"""
if reference is not None:
if isinstance(reference, bytes):
reference = reference.decode("utf-8")
try:
return Path(reference)
except TypeError:
raise ValueError(f"Can't convert reference path {reference} to a pathlib.Path")
return None | Converts reference to :class:`Path <pathlib.Path>`
:raise ValueError: If ``reference`` can't be converted to :class:`Path <pathlib.Path>`. | Below is the the instruction that describes the task:
### Input:
Converts reference to :class:`Path <pathlib.Path>`
:raise ValueError: If ``reference`` can't be converted to :class:`Path <pathlib.Path>`.
### Response:
def validate_reference(self, reference: ReferenceDefinitionType) -> Optional[Path]:
""" Converts reference to :class:`Path <pathlib.Path>`
:raise ValueError: If ``reference`` can't be converted to :class:`Path <pathlib.Path>`.
"""
if reference is not None:
if isinstance(reference, bytes):
reference = reference.decode("utf-8")
try:
return Path(reference)
except TypeError:
raise ValueError(f"Can't convert reference path {reference} to a pathlib.Path")
return None |
def listpid(toggle='basic'): # Add method to exclude elements from list
'''list pids'''
proc=psutil.process_iter()# evalute if its better to keep one instance of this or generate here?
if toggle=='basic':
host=gethostname()
host2=os.getenv('HOME').split(sep='/' )[-1]
for row in proc:
#~ DPRINT([row.ppid(),row.name(),host],'username,row.name,host')
if row.username() in host or row.username() in host2: #new psutil using grabing timeyyy and not alfa for username so host 2 is getting the timeyyy on UBUNTU
yield row.name(), row.ppid()
elif toggle=='all':
for row in proc:
yield row.name(), row.ppid()
elif toggle =='windows-basic':
for row in proc:
try:
pname = psutil.Process(row.pid).name()
pname = pname[:-4]#removiing .exe from end
yield pname, row.pid
except:
pass | list pids | Below is the the instruction that describes the task:
### Input:
list pids
### Response:
def listpid(toggle='basic'): # Add method to exclude elements from list
'''list pids'''
proc=psutil.process_iter()# evalute if its better to keep one instance of this or generate here?
if toggle=='basic':
host=gethostname()
host2=os.getenv('HOME').split(sep='/' )[-1]
for row in proc:
#~ DPRINT([row.ppid(),row.name(),host],'username,row.name,host')
if row.username() in host or row.username() in host2: #new psutil using grabing timeyyy and not alfa for username so host 2 is getting the timeyyy on UBUNTU
yield row.name(), row.ppid()
elif toggle=='all':
for row in proc:
yield row.name(), row.ppid()
elif toggle =='windows-basic':
for row in proc:
try:
pname = psutil.Process(row.pid).name()
pname = pname[:-4]#removiing .exe from end
yield pname, row.pid
except:
pass |
def extract_segment_types(urml_document_element, namespace):
"""Return a map from segment node IDs to their segment type
('nucleus', 'satellite' or 'isolated').
"""
segment_types = \
{namespace+':'+seg.attrib['id']: seg.tag
for seg in urml_document_element.iter('nucleus', 'satellite')}
for seg in urml_document_element.iter('segment'):
seg_id = namespace+':'+seg.attrib['id']
if seg_id not in segment_types:
segment_types[seg_id] = 'isolated'
return segment_types | Return a map from segment node IDs to their segment type
('nucleus', 'satellite' or 'isolated'). | Below is the the instruction that describes the task:
### Input:
Return a map from segment node IDs to their segment type
('nucleus', 'satellite' or 'isolated').
### Response:
def extract_segment_types(urml_document_element, namespace):
"""Return a map from segment node IDs to their segment type
('nucleus', 'satellite' or 'isolated').
"""
segment_types = \
{namespace+':'+seg.attrib['id']: seg.tag
for seg in urml_document_element.iter('nucleus', 'satellite')}
for seg in urml_document_element.iter('segment'):
seg_id = namespace+':'+seg.attrib['id']
if seg_id not in segment_types:
segment_types[seg_id] = 'isolated'
return segment_types |
def set_display(self, brightness=100, brightness_mode="auto"):
"""
allows to modify display state (change brightness)
:param int brightness: display brightness [0, 100] (default: 100)
:param str brightness_mode: the brightness mode of the display
[auto, manual] (default: auto)
"""
assert(brightness_mode in ("auto", "manual"))
assert(brightness in range(101))
log.debug("setting display information...")
cmd, url = DEVICE_URLS["set_display"]
json_data = {
"brightness_mode": brightness_mode,
"brightness": brightness
}
return self._exec(cmd, url, json_data=json_data) | allows to modify display state (change brightness)
:param int brightness: display brightness [0, 100] (default: 100)
:param str brightness_mode: the brightness mode of the display
[auto, manual] (default: auto) | Below is the the instruction that describes the task:
### Input:
allows to modify display state (change brightness)
:param int brightness: display brightness [0, 100] (default: 100)
:param str brightness_mode: the brightness mode of the display
[auto, manual] (default: auto)
### Response:
def set_display(self, brightness=100, brightness_mode="auto"):
"""
allows to modify display state (change brightness)
:param int brightness: display brightness [0, 100] (default: 100)
:param str brightness_mode: the brightness mode of the display
[auto, manual] (default: auto)
"""
assert(brightness_mode in ("auto", "manual"))
assert(brightness in range(101))
log.debug("setting display information...")
cmd, url = DEVICE_URLS["set_display"]
json_data = {
"brightness_mode": brightness_mode,
"brightness": brightness
}
return self._exec(cmd, url, json_data=json_data) |
def get_dsl_by_hash(self, node_hash: str) -> Optional[BaseEntity]:
"""Look up a node by the hash and returns the corresponding PyBEL node tuple."""
node = self.get_node_by_hash(node_hash)
if node is not None:
return node.as_bel() | Look up a node by the hash and returns the corresponding PyBEL node tuple. | Below is the the instruction that describes the task:
### Input:
Look up a node by the hash and returns the corresponding PyBEL node tuple.
### Response:
def get_dsl_by_hash(self, node_hash: str) -> Optional[BaseEntity]:
"""Look up a node by the hash and returns the corresponding PyBEL node tuple."""
node = self.get_node_by_hash(node_hash)
if node is not None:
return node.as_bel() |
def _render_hs_label(self, hs):
"""Return the label of the given Hilbert space as a string"""
if isinstance(hs.__class__, Singleton):
return self._render_str(hs.label)
else:
return self._tensor_sym.join(
[self._render_str(ls.label) for ls in hs.local_factors]) | Return the label of the given Hilbert space as a string | Below is the the instruction that describes the task:
### Input:
Return the label of the given Hilbert space as a string
### Response:
def _render_hs_label(self, hs):
"""Return the label of the given Hilbert space as a string"""
if isinstance(hs.__class__, Singleton):
return self._render_str(hs.label)
else:
return self._tensor_sym.join(
[self._render_str(ls.label) for ls in hs.local_factors]) |
def keyUp(key, pause=None, _pause=True):
"""Performs a keyboard key release (without the press down beforehand).
Args:
key (str): The key to be released up. The valid names are listed in
KEYBOARD_KEYS.
Returns:
None
"""
if len(key) > 1:
key = key.lower()
_failSafeCheck()
platformModule._keyUp(key)
_autoPause(pause, _pause) | Performs a keyboard key release (without the press down beforehand).
Args:
key (str): The key to be released up. The valid names are listed in
KEYBOARD_KEYS.
Returns:
None | Below is the the instruction that describes the task:
### Input:
Performs a keyboard key release (without the press down beforehand).
Args:
key (str): The key to be released up. The valid names are listed in
KEYBOARD_KEYS.
Returns:
None
### Response:
def keyUp(key, pause=None, _pause=True):
"""Performs a keyboard key release (without the press down beforehand).
Args:
key (str): The key to be released up. The valid names are listed in
KEYBOARD_KEYS.
Returns:
None
"""
if len(key) > 1:
key = key.lower()
_failSafeCheck()
platformModule._keyUp(key)
_autoPause(pause, _pause) |
def write(histogram):
"""Convert a histogram to a protobuf message.
Note: Currently, all binnings are converted to
static form. When you load the histogram again,
you will lose any related behaviour.
Note: A histogram collection is also planned.
Parameters
----------
histogram : HistogramBase | list | dict
Any histogram
Returns
-------
message : google.protobuf.message.Message
A protocol buffer message
"""
histogram_dict = histogram.to_dict()
message = Histogram()
for field in SIMPLE_CONVERSION_FIELDS:
setattr(message, field, histogram_dict[field])
# Main numerical data - TODO: Optimize!
message.frequencies.extend(histogram.frequencies.flatten())
message.errors2.extend(histogram.errors2.flatten())
# Binnings
for binning in histogram._binnings:
binning_message = message.binnings.add()
for edges in binning.bins:
limits = binning_message.bins.add()
limits.lower = edges[0]
limits.upper = edges[1]
# All meta data
meta_message = message.meta
# user_defined = {}
# for key, value in histogram.meta_data.items():
# if key not in PREDEFINED:
# user_defined[str(key)] = str(value)
for key in SIMPLE_META_KEYS:
if key in histogram.meta_data:
setattr(meta_message, key, str(histogram.meta_data[key]))
if "axis_names" in histogram.meta_data:
meta_message.axis_names.extend(histogram.meta_data["axis_names"])
message.physt_version = CURRENT_VERSION
message.physt_compatible = COMPATIBLE_VERSION
return message | Convert a histogram to a protobuf message.
Note: Currently, all binnings are converted to
static form. When you load the histogram again,
you will lose any related behaviour.
Note: A histogram collection is also planned.
Parameters
----------
histogram : HistogramBase | list | dict
Any histogram
Returns
-------
message : google.protobuf.message.Message
A protocol buffer message | Below is the the instruction that describes the task:
### Input:
Convert a histogram to a protobuf message.
Note: Currently, all binnings are converted to
static form. When you load the histogram again,
you will lose any related behaviour.
Note: A histogram collection is also planned.
Parameters
----------
histogram : HistogramBase | list | dict
Any histogram
Returns
-------
message : google.protobuf.message.Message
A protocol buffer message
### Response:
def write(histogram):
"""Convert a histogram to a protobuf message.
Note: Currently, all binnings are converted to
static form. When you load the histogram again,
you will lose any related behaviour.
Note: A histogram collection is also planned.
Parameters
----------
histogram : HistogramBase | list | dict
Any histogram
Returns
-------
message : google.protobuf.message.Message
A protocol buffer message
"""
histogram_dict = histogram.to_dict()
message = Histogram()
for field in SIMPLE_CONVERSION_FIELDS:
setattr(message, field, histogram_dict[field])
# Main numerical data - TODO: Optimize!
message.frequencies.extend(histogram.frequencies.flatten())
message.errors2.extend(histogram.errors2.flatten())
# Binnings
for binning in histogram._binnings:
binning_message = message.binnings.add()
for edges in binning.bins:
limits = binning_message.bins.add()
limits.lower = edges[0]
limits.upper = edges[1]
# All meta data
meta_message = message.meta
# user_defined = {}
# for key, value in histogram.meta_data.items():
# if key not in PREDEFINED:
# user_defined[str(key)] = str(value)
for key in SIMPLE_META_KEYS:
if key in histogram.meta_data:
setattr(meta_message, key, str(histogram.meta_data[key]))
if "axis_names" in histogram.meta_data:
meta_message.axis_names.extend(histogram.meta_data["axis_names"])
message.physt_version = CURRENT_VERSION
message.physt_compatible = COMPATIBLE_VERSION
return message |
def print_usage(self, file=None):
"""
Outputs usage information to the file if specified, or to the
io_manager's stdout if available, or to sys.stdout.
"""
optparse.OptionParser.print_usage(self, file)
file.flush() | Outputs usage information to the file if specified, or to the
io_manager's stdout if available, or to sys.stdout. | Below is the the instruction that describes the task:
### Input:
Outputs usage information to the file if specified, or to the
io_manager's stdout if available, or to sys.stdout.
### Response:
def print_usage(self, file=None):
"""
Outputs usage information to the file if specified, or to the
io_manager's stdout if available, or to sys.stdout.
"""
optparse.OptionParser.print_usage(self, file)
file.flush() |
def _set_categories_on_workflow(global_workflow_id, categories_to_set):
"""
Note: Categories are set on the workflow series level,
i.e. the same set applies to all versions.
"""
assert(isinstance(categories_to_set, list))
existing_categories = dxpy.api.global_workflow_list_categories(global_workflow_id)['categories']
categories_to_add = set(categories_to_set).difference(set(existing_categories))
categories_to_remove = set(existing_categories).difference(set(categories_to_set))
if categories_to_add:
dxpy.api.global_workflow_add_categories(global_workflow_id,
input_params={'categories': list(categories_to_add)})
if categories_to_remove:
dxpy.api.global_workflow_remove_categories(global_workflow_id,
input_params={'categories': list(categories_to_remove)}) | Note: Categories are set on the workflow series level,
i.e. the same set applies to all versions. | Below is the the instruction that describes the task:
### Input:
Note: Categories are set on the workflow series level,
i.e. the same set applies to all versions.
### Response:
def _set_categories_on_workflow(global_workflow_id, categories_to_set):
"""
Note: Categories are set on the workflow series level,
i.e. the same set applies to all versions.
"""
assert(isinstance(categories_to_set, list))
existing_categories = dxpy.api.global_workflow_list_categories(global_workflow_id)['categories']
categories_to_add = set(categories_to_set).difference(set(existing_categories))
categories_to_remove = set(existing_categories).difference(set(categories_to_set))
if categories_to_add:
dxpy.api.global_workflow_add_categories(global_workflow_id,
input_params={'categories': list(categories_to_add)})
if categories_to_remove:
dxpy.api.global_workflow_remove_categories(global_workflow_id,
input_params={'categories': list(categories_to_remove)}) |
def has_arrlist(type_str):
"""
A predicate that matches a type string with an array dimension list.
"""
try:
abi_type = grammar.parse(type_str)
except exceptions.ParseError:
return False
return abi_type.arrlist is not None | A predicate that matches a type string with an array dimension list. | Below is the the instruction that describes the task:
### Input:
A predicate that matches a type string with an array dimension list.
### Response:
def has_arrlist(type_str):
"""
A predicate that matches a type string with an array dimension list.
"""
try:
abi_type = grammar.parse(type_str)
except exceptions.ParseError:
return False
return abi_type.arrlist is not None |
def add_voice_call_api(mock):
'''Add org.ofono.VoiceCallManager API to a mock'''
# also add an emergency number which is not a real one, in case one runs a
# test case against a production ofono :-)
mock.AddProperty('org.ofono.VoiceCallManager', 'EmergencyNumbers', ['911', '13373'])
mock.calls = [] # object paths
mock.AddMethods('org.ofono.VoiceCallManager', [
('GetProperties', '', 'a{sv}', 'ret = self.GetAll("org.ofono.VoiceCallManager")'),
('Transfer', '', '', ''),
('SwapCalls', '', '', ''),
('ReleaseAndAnswer', '', '', ''),
('ReleaseAndSwap', '', '', ''),
('HoldAndAnswer', '', '', ''),
('SendTones', 's', '', ''),
('PrivateChat', 'o', 'ao', NOT_IMPLEMENTED),
('CreateMultiparty', '', 'o', NOT_IMPLEMENTED),
('HangupMultiparty', '', '', NOT_IMPLEMENTED),
('GetCalls', '', 'a(oa{sv})', 'ret = [(c, objects[c].GetAll("org.ofono.VoiceCall")) for c in self.calls]')
]) | Add org.ofono.VoiceCallManager API to a mock | Below is the the instruction that describes the task:
### Input:
Add org.ofono.VoiceCallManager API to a mock
### Response:
def add_voice_call_api(mock):
'''Add org.ofono.VoiceCallManager API to a mock'''
# also add an emergency number which is not a real one, in case one runs a
# test case against a production ofono :-)
mock.AddProperty('org.ofono.VoiceCallManager', 'EmergencyNumbers', ['911', '13373'])
mock.calls = [] # object paths
mock.AddMethods('org.ofono.VoiceCallManager', [
('GetProperties', '', 'a{sv}', 'ret = self.GetAll("org.ofono.VoiceCallManager")'),
('Transfer', '', '', ''),
('SwapCalls', '', '', ''),
('ReleaseAndAnswer', '', '', ''),
('ReleaseAndSwap', '', '', ''),
('HoldAndAnswer', '', '', ''),
('SendTones', 's', '', ''),
('PrivateChat', 'o', 'ao', NOT_IMPLEMENTED),
('CreateMultiparty', '', 'o', NOT_IMPLEMENTED),
('HangupMultiparty', '', '', NOT_IMPLEMENTED),
('GetCalls', '', 'a(oa{sv})', 'ret = [(c, objects[c].GetAll("org.ofono.VoiceCall")) for c in self.calls]')
]) |
def cache_factor(self, col_list, refresh=False):
"""
Existing todos here are: these should be hidden helper carrays
As in: not normal columns that you would normally see as a user
The factor (label index) carray is as long as the original carray
(and the rest of the table therefore)
But the (unique) values carray is not as long (as long as the number
of unique values)
:param col_list:
:param refresh:
:return:
"""
if not self.rootdir:
raise TypeError('Only out-of-core ctables can have '
'factorization caching at the moment')
if not isinstance(col_list, list):
col_list = [col_list]
if refresh:
kill_list = [x for x in os.listdir(self.rootdir) if '.factor' in x or '.values' in x]
for kill_dir in kill_list:
rm_file_or_dir(os.path.join(self.rootdir, kill_dir))
for col in col_list:
# create cache if needed
if refresh or not self.cache_valid(col):
# todo: also add locking mechanism here
# create directories
col_rootdir = self[col].rootdir
col_factor_rootdir = col_rootdir + '.factor'
col_factor_rootdir_tmp = tempfile.mkdtemp(prefix='bcolz-')
col_values_rootdir = col_rootdir + '.values'
col_values_rootdir_tmp = tempfile.mkdtemp(prefix='bcolz-')
# create factor
carray_factor = \
bcolz.carray([], dtype='int64', expectedlen=self.size,
rootdir=col_factor_rootdir_tmp, mode='w')
_, values = \
ctable_ext.factorize(self[col], labels=carray_factor)
carray_factor.flush()
rm_file_or_dir(col_factor_rootdir, ignore_errors=True)
shutil.move(col_factor_rootdir_tmp, col_factor_rootdir)
# create values
carray_values = \
bcolz.carray(np.fromiter(values.values(), dtype=self[col].dtype),
rootdir=col_values_rootdir_tmp, mode='w')
carray_values.flush()
rm_file_or_dir(col_values_rootdir, ignore_errors=True)
shutil.move(col_values_rootdir_tmp, col_values_rootdir) | Existing todos here are: these should be hidden helper carrays
As in: not normal columns that you would normally see as a user
The factor (label index) carray is as long as the original carray
(and the rest of the table therefore)
But the (unique) values carray is not as long (as long as the number
of unique values)
:param col_list:
:param refresh:
:return: | Below is the the instruction that describes the task:
### Input:
Existing todos here are: these should be hidden helper carrays
As in: not normal columns that you would normally see as a user
The factor (label index) carray is as long as the original carray
(and the rest of the table therefore)
But the (unique) values carray is not as long (as long as the number
of unique values)
:param col_list:
:param refresh:
:return:
### Response:
def cache_factor(self, col_list, refresh=False):
"""
Existing todos here are: these should be hidden helper carrays
As in: not normal columns that you would normally see as a user
The factor (label index) carray is as long as the original carray
(and the rest of the table therefore)
But the (unique) values carray is not as long (as long as the number
of unique values)
:param col_list:
:param refresh:
:return:
"""
if not self.rootdir:
raise TypeError('Only out-of-core ctables can have '
'factorization caching at the moment')
if not isinstance(col_list, list):
col_list = [col_list]
if refresh:
kill_list = [x for x in os.listdir(self.rootdir) if '.factor' in x or '.values' in x]
for kill_dir in kill_list:
rm_file_or_dir(os.path.join(self.rootdir, kill_dir))
for col in col_list:
# create cache if needed
if refresh or not self.cache_valid(col):
# todo: also add locking mechanism here
# create directories
col_rootdir = self[col].rootdir
col_factor_rootdir = col_rootdir + '.factor'
col_factor_rootdir_tmp = tempfile.mkdtemp(prefix='bcolz-')
col_values_rootdir = col_rootdir + '.values'
col_values_rootdir_tmp = tempfile.mkdtemp(prefix='bcolz-')
# create factor
carray_factor = \
bcolz.carray([], dtype='int64', expectedlen=self.size,
rootdir=col_factor_rootdir_tmp, mode='w')
_, values = \
ctable_ext.factorize(self[col], labels=carray_factor)
carray_factor.flush()
rm_file_or_dir(col_factor_rootdir, ignore_errors=True)
shutil.move(col_factor_rootdir_tmp, col_factor_rootdir)
# create values
carray_values = \
bcolz.carray(np.fromiter(values.values(), dtype=self[col].dtype),
rootdir=col_values_rootdir_tmp, mode='w')
carray_values.flush()
rm_file_or_dir(col_values_rootdir, ignore_errors=True)
shutil.move(col_values_rootdir_tmp, col_values_rootdir) |
def make_library(self, diffuse_yaml, catalog_yaml, binning_yaml):
""" Build up the library of all the components
Parameters
----------
diffuse_yaml : str
Name of the yaml file with the library of diffuse component definitions
catalog_yaml : str
Name of the yaml file width the library of catalog split definitions
binning_yaml : str
Name of the yaml file with the binning definitions
"""
ret_dict = {}
#catalog_dict = yaml.safe_load(open(catalog_yaml))
components_dict = Component.build_from_yamlfile(binning_yaml)
diffuse_ret_dict = make_diffuse_comp_info_dict(GalpropMapManager=self._gmm,
DiffuseModelManager=self._dmm,
library=diffuse_yaml,
components=components_dict)
catalog_ret_dict = make_catalog_comp_dict(library=catalog_yaml,
CatalogSourceManager=self._csm)
ret_dict.update(diffuse_ret_dict['comp_info_dict'])
ret_dict.update(catalog_ret_dict['comp_info_dict'])
self._library.update(ret_dict)
return ret_dict | Build up the library of all the components
Parameters
----------
diffuse_yaml : str
Name of the yaml file with the library of diffuse component definitions
catalog_yaml : str
Name of the yaml file width the library of catalog split definitions
binning_yaml : str
Name of the yaml file with the binning definitions | Below is the the instruction that describes the task:
### Input:
Build up the library of all the components
Parameters
----------
diffuse_yaml : str
Name of the yaml file with the library of diffuse component definitions
catalog_yaml : str
Name of the yaml file width the library of catalog split definitions
binning_yaml : str
Name of the yaml file with the binning definitions
### Response:
def make_library(self, diffuse_yaml, catalog_yaml, binning_yaml):
""" Build up the library of all the components
Parameters
----------
diffuse_yaml : str
Name of the yaml file with the library of diffuse component definitions
catalog_yaml : str
Name of the yaml file width the library of catalog split definitions
binning_yaml : str
Name of the yaml file with the binning definitions
"""
ret_dict = {}
#catalog_dict = yaml.safe_load(open(catalog_yaml))
components_dict = Component.build_from_yamlfile(binning_yaml)
diffuse_ret_dict = make_diffuse_comp_info_dict(GalpropMapManager=self._gmm,
DiffuseModelManager=self._dmm,
library=diffuse_yaml,
components=components_dict)
catalog_ret_dict = make_catalog_comp_dict(library=catalog_yaml,
CatalogSourceManager=self._csm)
ret_dict.update(diffuse_ret_dict['comp_info_dict'])
ret_dict.update(catalog_ret_dict['comp_info_dict'])
self._library.update(ret_dict)
return ret_dict |
def remove_all_containers(self, stop_timeout=10, list_only=False):
"""
First stops (if necessary) and them removes all containers present on the Docker instance.
:param stop_timeout: Timeout to stopping each container.
:type stop_timeout: int
:param list_only: When set to ``True`` only lists containers, but does not actually stop or remove them.
:type list_only: bool
:return: A tuple of two lists: Stopped container ids, and removed container ids.
:rtype: (list[unicode | str], list[unicode | str])
"""
containers = [(container['Id'], container['Status'])
for container in self.containers(all=True)]
running_containers = [c_id
for c_id, status in containers
if not (status.startswith('Exited') or status == 'Dead')]
if list_only:
return running_containers, [c[0] for c in containers]
stopped_containers = []
for c_id in running_containers:
try:
self.stop(c_id, timeout=stop_timeout)
except Timeout:
log.warning("Container %s did not stop in time - sent SIGKILL.", c_id)
try:
self.wait(c_id, timeout=stop_timeout)
except Timeout:
pass
except:
exc_info = sys.exc_info()
raise PartialResultsError(exc_info, (stopped_containers, []))
else:
stopped_containers.append(c_id)
removed_containers = []
for c_id, __ in containers:
try:
self.remove_container(c_id)
except:
exc_info = sys.exc_info()
raise PartialResultsError(exc_info, (stopped_containers, removed_containers))
else:
removed_containers.append(c_id)
return stopped_containers, removed_containers | First stops (if necessary) and them removes all containers present on the Docker instance.
:param stop_timeout: Timeout to stopping each container.
:type stop_timeout: int
:param list_only: When set to ``True`` only lists containers, but does not actually stop or remove them.
:type list_only: bool
:return: A tuple of two lists: Stopped container ids, and removed container ids.
:rtype: (list[unicode | str], list[unicode | str]) | Below is the the instruction that describes the task:
### Input:
First stops (if necessary) and them removes all containers present on the Docker instance.
:param stop_timeout: Timeout to stopping each container.
:type stop_timeout: int
:param list_only: When set to ``True`` only lists containers, but does not actually stop or remove them.
:type list_only: bool
:return: A tuple of two lists: Stopped container ids, and removed container ids.
:rtype: (list[unicode | str], list[unicode | str])
### Response:
def remove_all_containers(self, stop_timeout=10, list_only=False):
"""
First stops (if necessary) and them removes all containers present on the Docker instance.
:param stop_timeout: Timeout to stopping each container.
:type stop_timeout: int
:param list_only: When set to ``True`` only lists containers, but does not actually stop or remove them.
:type list_only: bool
:return: A tuple of two lists: Stopped container ids, and removed container ids.
:rtype: (list[unicode | str], list[unicode | str])
"""
containers = [(container['Id'], container['Status'])
for container in self.containers(all=True)]
running_containers = [c_id
for c_id, status in containers
if not (status.startswith('Exited') or status == 'Dead')]
if list_only:
return running_containers, [c[0] for c in containers]
stopped_containers = []
for c_id in running_containers:
try:
self.stop(c_id, timeout=stop_timeout)
except Timeout:
log.warning("Container %s did not stop in time - sent SIGKILL.", c_id)
try:
self.wait(c_id, timeout=stop_timeout)
except Timeout:
pass
except:
exc_info = sys.exc_info()
raise PartialResultsError(exc_info, (stopped_containers, []))
else:
stopped_containers.append(c_id)
removed_containers = []
for c_id, __ in containers:
try:
self.remove_container(c_id)
except:
exc_info = sys.exc_info()
raise PartialResultsError(exc_info, (stopped_containers, removed_containers))
else:
removed_containers.append(c_id)
return stopped_containers, removed_containers |
def apply_augments(self, auglist, p_elem, pset):
"""Handle substatements of augments from `auglist`.
The augments are applied in the context of `p_elem`. `pset`
is a patch set containing patches that may be applicable to
descendants.
"""
for a in auglist:
par = a.parent
if a.search_one("when") is None:
wel = p_elem
else:
if p_elem.interleave:
kw = "interleave"
else:
kw = "group"
wel = SchemaNode(kw, p_elem, interleave=p_elem.interleave)
wel.occur = p_elem.occur
if par.keyword == "uses":
self.handle_substmts(a, wel, pset)
continue
if par.keyword == "submodule":
mnam = par.i_including_modulename
else:
mnam = par.arg
if self.prefix_stack[-1] == self.module_prefixes[mnam]:
self.handle_substmts(a, wel, pset)
else:
self.prefix_stack.append(self.module_prefixes[mnam])
self.handle_substmts(a, wel, pset)
self.prefix_stack.pop() | Handle substatements of augments from `auglist`.
The augments are applied in the context of `p_elem`. `pset`
is a patch set containing patches that may be applicable to
descendants. | Below is the the instruction that describes the task:
### Input:
Handle substatements of augments from `auglist`.
The augments are applied in the context of `p_elem`. `pset`
is a patch set containing patches that may be applicable to
descendants.
### Response:
def apply_augments(self, auglist, p_elem, pset):
"""Handle substatements of augments from `auglist`.
The augments are applied in the context of `p_elem`. `pset`
is a patch set containing patches that may be applicable to
descendants.
"""
for a in auglist:
par = a.parent
if a.search_one("when") is None:
wel = p_elem
else:
if p_elem.interleave:
kw = "interleave"
else:
kw = "group"
wel = SchemaNode(kw, p_elem, interleave=p_elem.interleave)
wel.occur = p_elem.occur
if par.keyword == "uses":
self.handle_substmts(a, wel, pset)
continue
if par.keyword == "submodule":
mnam = par.i_including_modulename
else:
mnam = par.arg
if self.prefix_stack[-1] == self.module_prefixes[mnam]:
self.handle_substmts(a, wel, pset)
else:
self.prefix_stack.append(self.module_prefixes[mnam])
self.handle_substmts(a, wel, pset)
self.prefix_stack.pop() |
def split_line(line, min_line_length=30, max_line_length=100):
"""
This is designed to work with prettified output from Beautiful Soup which indents with a single space.
:param line: The line to split
:param min_line_length: The minimum desired line length
:param max_line_length: The maximum desired line length
:return: A list of lines
"""
if len(line) <= max_line_length:
# No need to split!
return [line]
# First work out the indentation on the beginning of the line
indent = 0
while line[indent] == ' ' and indent < len(line):
indent += 1
# Try to split the line
# Start looking for a space at character max_line_length working backwards
i = max_line_length
split_point = None
while i > min_line_length:
if line[i] == ' ':
split_point = i
break
i -= 1
if split_point is None:
# We didn't find a split point - search beyond the end of the line
i = max_line_length + 1
while i < len(line):
if line[i] == ' ':
split_point = i
break
i += 1
if split_point is None:
# There is nowhere to split the line!
return [line]
else:
# Split it!
line1 = line[:split_point]
line2 = ' ' * indent + line[split_point + 1:]
return [line1] + split_line(line2, min_line_length, max_line_length) | This is designed to work with prettified output from Beautiful Soup which indents with a single space.
:param line: The line to split
:param min_line_length: The minimum desired line length
:param max_line_length: The maximum desired line length
:return: A list of lines | Below is the the instruction that describes the task:
### Input:
This is designed to work with prettified output from Beautiful Soup which indents with a single space.
:param line: The line to split
:param min_line_length: The minimum desired line length
:param max_line_length: The maximum desired line length
:return: A list of lines
### Response:
def split_line(line, min_line_length=30, max_line_length=100):
"""
This is designed to work with prettified output from Beautiful Soup which indents with a single space.
:param line: The line to split
:param min_line_length: The minimum desired line length
:param max_line_length: The maximum desired line length
:return: A list of lines
"""
if len(line) <= max_line_length:
# No need to split!
return [line]
# First work out the indentation on the beginning of the line
indent = 0
while line[indent] == ' ' and indent < len(line):
indent += 1
# Try to split the line
# Start looking for a space at character max_line_length working backwards
i = max_line_length
split_point = None
while i > min_line_length:
if line[i] == ' ':
split_point = i
break
i -= 1
if split_point is None:
# We didn't find a split point - search beyond the end of the line
i = max_line_length + 1
while i < len(line):
if line[i] == ' ':
split_point = i
break
i += 1
if split_point is None:
# There is nowhere to split the line!
return [line]
else:
# Split it!
line1 = line[:split_point]
line2 = ' ' * indent + line[split_point + 1:]
return [line1] + split_line(line2, min_line_length, max_line_length) |
def checkForChanges(f, sde, isTable):
"""
returns False if there are no changes
"""
# try simple feature count first
fCount = int(arcpy.GetCount_management(f).getOutput(0))
sdeCount = int(arcpy.GetCount_management(sde).getOutput(0))
if fCount != sdeCount:
return True
fields = [fld.name for fld in arcpy.ListFields(f)]
# filter out shape fields
if not isTable:
fields = filter_fields(fields)
d = arcpy.Describe(f)
shapeType = d.shapeType
if shapeType == 'Polygon':
shapeToken = 'SHAPE@AREA'
elif shapeType == 'Polyline':
shapeToken = 'SHAPE@LENGTH'
elif shapeType == 'Point':
shapeToken = 'SHAPE@XY'
else:
shapeToken = 'SHAPE@JSON'
fields.append(shapeToken)
def parseShape(shapeValue):
if shapeValue is None:
return 0
elif shapeType in ['Polygon', 'Polyline']:
return shapeValue
elif shapeType == 'Point':
if shapeValue[0] is not None and shapeValue[1] is not None:
return shapeValue[0] + shapeValue[1]
else:
return 0
else:
return shapeValue
outputSR = arcpy.Describe(f).spatialReference
else:
outputSR = None
changed = False
with arcpy.da.SearchCursor(f, fields, sql_clause=(None, 'ORDER BY OBJECTID')) as fCursor, \
arcpy.da.SearchCursor(sde, fields, sql_clause=(None, 'ORDER BY OBJECTID'),
spatial_reference=outputSR) as sdeCursor:
for fRow, sdeRow in izip(fCursor, sdeCursor):
if fRow != sdeRow:
# check shapes first
if fRow[-1] != sdeRow[-1] and not isTable:
if shapeType not in ['Polygon', 'Polyline', 'Point']:
changed = True
break
fShape = parseShape(fRow[-1])
sdeShape = parseShape(sdeRow[-1])
try:
assert_almost_equal(fShape, sdeShape, -1)
# trim off shapes
fRow = list(fRow[:-1])
sdeRow = list(sdeRow[:-1])
except AssertionError:
changed = True
break
# trim microseconds since they can be off by one between file and sde databases
for i in range(len(fRow)):
if type(fRow[i]) is datetime:
fRow = list(fRow)
sdeRow = list(sdeRow)
fRow[i] = fRow[i].replace(microsecond=0)
try:
sdeRow[i] = sdeRow[i].replace(microsecond=0)
except:
pass
# compare all values except OBJECTID
if fRow[1:] != sdeRow[1:]:
changed = True
break
return changed | returns False if there are no changes | Below is the the instruction that describes the task:
### Input:
returns False if there are no changes
### Response:
def checkForChanges(f, sde, isTable):
"""
returns False if there are no changes
"""
# try simple feature count first
fCount = int(arcpy.GetCount_management(f).getOutput(0))
sdeCount = int(arcpy.GetCount_management(sde).getOutput(0))
if fCount != sdeCount:
return True
fields = [fld.name for fld in arcpy.ListFields(f)]
# filter out shape fields
if not isTable:
fields = filter_fields(fields)
d = arcpy.Describe(f)
shapeType = d.shapeType
if shapeType == 'Polygon':
shapeToken = 'SHAPE@AREA'
elif shapeType == 'Polyline':
shapeToken = 'SHAPE@LENGTH'
elif shapeType == 'Point':
shapeToken = 'SHAPE@XY'
else:
shapeToken = 'SHAPE@JSON'
fields.append(shapeToken)
def parseShape(shapeValue):
if shapeValue is None:
return 0
elif shapeType in ['Polygon', 'Polyline']:
return shapeValue
elif shapeType == 'Point':
if shapeValue[0] is not None and shapeValue[1] is not None:
return shapeValue[0] + shapeValue[1]
else:
return 0
else:
return shapeValue
outputSR = arcpy.Describe(f).spatialReference
else:
outputSR = None
changed = False
with arcpy.da.SearchCursor(f, fields, sql_clause=(None, 'ORDER BY OBJECTID')) as fCursor, \
arcpy.da.SearchCursor(sde, fields, sql_clause=(None, 'ORDER BY OBJECTID'),
spatial_reference=outputSR) as sdeCursor:
for fRow, sdeRow in izip(fCursor, sdeCursor):
if fRow != sdeRow:
# check shapes first
if fRow[-1] != sdeRow[-1] and not isTable:
if shapeType not in ['Polygon', 'Polyline', 'Point']:
changed = True
break
fShape = parseShape(fRow[-1])
sdeShape = parseShape(sdeRow[-1])
try:
assert_almost_equal(fShape, sdeShape, -1)
# trim off shapes
fRow = list(fRow[:-1])
sdeRow = list(sdeRow[:-1])
except AssertionError:
changed = True
break
# trim microseconds since they can be off by one between file and sde databases
for i in range(len(fRow)):
if type(fRow[i]) is datetime:
fRow = list(fRow)
sdeRow = list(sdeRow)
fRow[i] = fRow[i].replace(microsecond=0)
try:
sdeRow[i] = sdeRow[i].replace(microsecond=0)
except:
pass
# compare all values except OBJECTID
if fRow[1:] != sdeRow[1:]:
changed = True
break
return changed |
def sync_remote_to_local(force="no"):
"""
Replace your remote db with your local
Example:
sync_remote_to_local:force=yes
"""
assert "local_wp_dir" in env, "Missing local_wp_dir in env"
if force != "yes":
message = "This will replace your local database with your "\
"remote, are you sure [y/n]"
answer = prompt(message, "y")
if answer != "y":
logger.info("Sync stopped")
return
init_tasks() # Bootstrap fabrik
remote_file = "sync_%s.sql" % int(time.time()*1000)
remote_path = "/tmp/%s" % remote_file
with env.cd(paths.get_current_path()):
env.run("wp db export %s" % remote_path)
local_wp_dir = env.local_wp_dir
local_path = "/tmp/%s" % remote_file
# Download sync file
get(remote_path, local_path)
with lcd(local_wp_dir):
elocal("wp db import %s" % local_path)
# Cleanup
env.run("rm %s" % remote_path)
elocal("rm %s" % local_path) | Replace your remote db with your local
Example:
sync_remote_to_local:force=yes | Below is the the instruction that describes the task:
### Input:
Replace your remote db with your local
Example:
sync_remote_to_local:force=yes
### Response:
def sync_remote_to_local(force="no"):
"""
Replace your remote db with your local
Example:
sync_remote_to_local:force=yes
"""
assert "local_wp_dir" in env, "Missing local_wp_dir in env"
if force != "yes":
message = "This will replace your local database with your "\
"remote, are you sure [y/n]"
answer = prompt(message, "y")
if answer != "y":
logger.info("Sync stopped")
return
init_tasks() # Bootstrap fabrik
remote_file = "sync_%s.sql" % int(time.time()*1000)
remote_path = "/tmp/%s" % remote_file
with env.cd(paths.get_current_path()):
env.run("wp db export %s" % remote_path)
local_wp_dir = env.local_wp_dir
local_path = "/tmp/%s" % remote_file
# Download sync file
get(remote_path, local_path)
with lcd(local_wp_dir):
elocal("wp db import %s" % local_path)
# Cleanup
env.run("rm %s" % remote_path)
elocal("rm %s" % local_path) |
def Run(self, unused_arg):
"""Run the kill."""
# Send a message back to the service to say that we are about to shutdown.
reply = rdf_flows.GrrStatus(status=rdf_flows.GrrStatus.ReturnedStatus.OK)
# Queue up the response message, jump the queue.
self.SendReply(reply, message_type=rdf_flows.GrrMessage.Type.STATUS)
# Give the http thread some time to send the reply.
self.grr_worker.Sleep(10)
# Die ourselves.
logging.info("Dying on request.")
os._exit(242) | Run the kill. | Below is the the instruction that describes the task:
### Input:
Run the kill.
### Response:
def Run(self, unused_arg):
"""Run the kill."""
# Send a message back to the service to say that we are about to shutdown.
reply = rdf_flows.GrrStatus(status=rdf_flows.GrrStatus.ReturnedStatus.OK)
# Queue up the response message, jump the queue.
self.SendReply(reply, message_type=rdf_flows.GrrMessage.Type.STATUS)
# Give the http thread some time to send the reply.
self.grr_worker.Sleep(10)
# Die ourselves.
logging.info("Dying on request.")
os._exit(242) |
def set_description(self, description, lang='en'):
"""
Set the description for a WD item in a certain language
:param description: The description of the item in a certain language
:type description: str
:param lang: The language a description should be set for.
:type lang: str
:return: None
"""
if self.fast_run and not self.require_write:
self.require_write = self.fast_run_container.check_language_data(qid=self.wd_item_id,
lang_data=[description], lang=lang,
lang_data_type='description')
if self.require_write:
self.init_data_load()
else:
return
if 'descriptions' not in self.wd_json_representation:
self.wd_json_representation['descriptions'] = {}
self.wd_json_representation['descriptions'][lang] = {
'language': lang,
'value': description
} | Set the description for a WD item in a certain language
:param description: The description of the item in a certain language
:type description: str
:param lang: The language a description should be set for.
:type lang: str
:return: None | Below is the the instruction that describes the task:
### Input:
Set the description for a WD item in a certain language
:param description: The description of the item in a certain language
:type description: str
:param lang: The language a description should be set for.
:type lang: str
:return: None
### Response:
def set_description(self, description, lang='en'):
"""
Set the description for a WD item in a certain language
:param description: The description of the item in a certain language
:type description: str
:param lang: The language a description should be set for.
:type lang: str
:return: None
"""
if self.fast_run and not self.require_write:
self.require_write = self.fast_run_container.check_language_data(qid=self.wd_item_id,
lang_data=[description], lang=lang,
lang_data_type='description')
if self.require_write:
self.init_data_load()
else:
return
if 'descriptions' not in self.wd_json_representation:
self.wd_json_representation['descriptions'] = {}
self.wd_json_representation['descriptions'][lang] = {
'language': lang,
'value': description
} |
def get_gradebook_ids_by_gradebook_column(self, gradebook_column_id):
"""Gets the list of ``Gradebook`` ``Ids`` mapped to a ``GradebookColumn``.
arg: gradebook_column_id (osid.id.Id): ``Id`` of a
``GradebookColumn``
return: (osid.id.IdList) - list of gradebook ``Ids``
raise: NotFound - ``gradebook_column_id`` is not found
raise: NullArgument - ``gradebook_column_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceBinSession.get_bin_ids_by_resource
mgr = self._get_provider_manager('GRADING', local=True)
lookup_session = mgr.get_gradebook_column_lookup_session(proxy=self._proxy)
lookup_session.use_federated_gradebook_view()
gradebook_column = lookup_session.get_gradebook_column(gradebook_column_id)
id_list = []
for idstr in gradebook_column._my_map['assignedGradebookIds']:
id_list.append(Id(idstr))
return IdList(id_list) | Gets the list of ``Gradebook`` ``Ids`` mapped to a ``GradebookColumn``.
arg: gradebook_column_id (osid.id.Id): ``Id`` of a
``GradebookColumn``
return: (osid.id.IdList) - list of gradebook ``Ids``
raise: NotFound - ``gradebook_column_id`` is not found
raise: NullArgument - ``gradebook_column_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.* | Below is the the instruction that describes the task:
### Input:
Gets the list of ``Gradebook`` ``Ids`` mapped to a ``GradebookColumn``.
arg: gradebook_column_id (osid.id.Id): ``Id`` of a
``GradebookColumn``
return: (osid.id.IdList) - list of gradebook ``Ids``
raise: NotFound - ``gradebook_column_id`` is not found
raise: NullArgument - ``gradebook_column_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
### Response:
def get_gradebook_ids_by_gradebook_column(self, gradebook_column_id):
"""Gets the list of ``Gradebook`` ``Ids`` mapped to a ``GradebookColumn``.
arg: gradebook_column_id (osid.id.Id): ``Id`` of a
``GradebookColumn``
return: (osid.id.IdList) - list of gradebook ``Ids``
raise: NotFound - ``gradebook_column_id`` is not found
raise: NullArgument - ``gradebook_column_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceBinSession.get_bin_ids_by_resource
mgr = self._get_provider_manager('GRADING', local=True)
lookup_session = mgr.get_gradebook_column_lookup_session(proxy=self._proxy)
lookup_session.use_federated_gradebook_view()
gradebook_column = lookup_session.get_gradebook_column(gradebook_column_id)
id_list = []
for idstr in gradebook_column._my_map['assignedGradebookIds']:
id_list.append(Id(idstr))
return IdList(id_list) |
def polish(commit_indexes=None, urls=None):
'''
Apply certain behaviors to commits or URLs that need polishing before they are ready for screenshots
For example, if you have 10 commits in a row where static file links were broken, you could re-write the html
in memory as it is interpreted.
Keyword arguments:
commit_indexes -- A list of indexes to apply the wrapped function to
url -- A list of URLs to apply the wrapped function to
'''
def decorator(f):
if commit_indexes:
f.polish_commit_indexes = commit_indexes
if urls:
f.polish_urls = urls
@wraps(f)
def wrappee(*args, **kwargs):
return f(*args, **kwargs)
return wrappee
return decorator | Apply certain behaviors to commits or URLs that need polishing before they are ready for screenshots
For example, if you have 10 commits in a row where static file links were broken, you could re-write the html
in memory as it is interpreted.
Keyword arguments:
commit_indexes -- A list of indexes to apply the wrapped function to
url -- A list of URLs to apply the wrapped function to | Below is the the instruction that describes the task:
### Input:
Apply certain behaviors to commits or URLs that need polishing before they are ready for screenshots
For example, if you have 10 commits in a row where static file links were broken, you could re-write the html
in memory as it is interpreted.
Keyword arguments:
commit_indexes -- A list of indexes to apply the wrapped function to
url -- A list of URLs to apply the wrapped function to
### Response:
def polish(commit_indexes=None, urls=None):
'''
Apply certain behaviors to commits or URLs that need polishing before they are ready for screenshots
For example, if you have 10 commits in a row where static file links were broken, you could re-write the html
in memory as it is interpreted.
Keyword arguments:
commit_indexes -- A list of indexes to apply the wrapped function to
url -- A list of URLs to apply the wrapped function to
'''
def decorator(f):
if commit_indexes:
f.polish_commit_indexes = commit_indexes
if urls:
f.polish_urls = urls
@wraps(f)
def wrappee(*args, **kwargs):
return f(*args, **kwargs)
return wrappee
return decorator |
def _partition_runs_by_day(self):
"""Split the runs by day, so we can display them grouped that way."""
run_infos = self._get_all_run_infos()
for x in run_infos:
ts = float(x['timestamp'])
x['time_of_day_text'] = datetime.fromtimestamp(ts).strftime('%H:%M:%S')
def date_text(dt):
delta_days = (date.today() - dt).days
if delta_days == 0:
return 'Today'
elif delta_days == 1:
return 'Yesterday'
elif delta_days < 7:
return dt.strftime('%A') # Weekday name.
else:
d = dt.day % 10
suffix = 'st' if d == 1 else 'nd' if d == 2 else 'rd' if d == 3 else 'th'
return dt.strftime('%B %d') + suffix # E.g., October 30th.
keyfunc = lambda x: datetime.fromtimestamp(float(x['timestamp']))
sorted_run_infos = sorted(run_infos, key=keyfunc, reverse=True)
return [{'date_text': date_text(dt), 'run_infos': [x for x in infos]}
for dt, infos in itertools.groupby(sorted_run_infos, lambda x: keyfunc(x).date())] | Split the runs by day, so we can display them grouped that way. | Below is the the instruction that describes the task:
### Input:
Split the runs by day, so we can display them grouped that way.
### Response:
def _partition_runs_by_day(self):
"""Split the runs by day, so we can display them grouped that way."""
run_infos = self._get_all_run_infos()
for x in run_infos:
ts = float(x['timestamp'])
x['time_of_day_text'] = datetime.fromtimestamp(ts).strftime('%H:%M:%S')
def date_text(dt):
delta_days = (date.today() - dt).days
if delta_days == 0:
return 'Today'
elif delta_days == 1:
return 'Yesterday'
elif delta_days < 7:
return dt.strftime('%A') # Weekday name.
else:
d = dt.day % 10
suffix = 'st' if d == 1 else 'nd' if d == 2 else 'rd' if d == 3 else 'th'
return dt.strftime('%B %d') + suffix # E.g., October 30th.
keyfunc = lambda x: datetime.fromtimestamp(float(x['timestamp']))
sorted_run_infos = sorted(run_infos, key=keyfunc, reverse=True)
return [{'date_text': date_text(dt), 'run_infos': [x for x in infos]}
for dt, infos in itertools.groupby(sorted_run_infos, lambda x: keyfunc(x).date())] |
def get_short_status(self, hosts, services):
"""Get the short status of this host
:return: "O", "W", "C", "U', or "n/a" based on service state_id or business_rule state
:rtype: str
"""
mapping = {
0: "O",
1: "W",
2: "C",
3: "U",
4: "N",
}
if self.got_business_rule:
return mapping.get(self.business_rule.get_state(hosts, services), "n/a")
return mapping.get(self.state_id, "n/a") | Get the short status of this host
:return: "O", "W", "C", "U', or "n/a" based on service state_id or business_rule state
:rtype: str | Below is the the instruction that describes the task:
### Input:
Get the short status of this host
:return: "O", "W", "C", "U', or "n/a" based on service state_id or business_rule state
:rtype: str
### Response:
def get_short_status(self, hosts, services):
"""Get the short status of this host
:return: "O", "W", "C", "U', or "n/a" based on service state_id or business_rule state
:rtype: str
"""
mapping = {
0: "O",
1: "W",
2: "C",
3: "U",
4: "N",
}
if self.got_business_rule:
return mapping.get(self.business_rule.get_state(hosts, services), "n/a")
return mapping.get(self.state_id, "n/a") |
def get(self, transform=None):
"""
Return the JSON defined at the S3 location in the constructor.
The get method will reload the S3 object after the TTL has
expired.
Fetch the JSON object from cache or S3 if necessary
"""
if not self.has_expired() and self._cached_copy is not None:
return self._cached_copy, False
return self._refresh_cache(transform), True | Return the JSON defined at the S3 location in the constructor.
The get method will reload the S3 object after the TTL has
expired.
Fetch the JSON object from cache or S3 if necessary | Below is the the instruction that describes the task:
### Input:
Return the JSON defined at the S3 location in the constructor.
The get method will reload the S3 object after the TTL has
expired.
Fetch the JSON object from cache or S3 if necessary
### Response:
def get(self, transform=None):
"""
Return the JSON defined at the S3 location in the constructor.
The get method will reload the S3 object after the TTL has
expired.
Fetch the JSON object from cache or S3 if necessary
"""
if not self.has_expired() and self._cached_copy is not None:
return self._cached_copy, False
return self._refresh_cache(transform), True |
def set_iscsi_boot_info(self, mac, target_name, lun, ip_address,
port='3260', auth_method=None, username=None,
password=None):
"""Set iscsi details of the system in uefi boot mode.
The initiator system is set with the target details like
IQN, LUN, IP, Port etc.
:param mac: The MAC of the NIC to be set with iSCSI information
:param target_name: Target Name for iscsi.
:param lun: logical unit number.
:param ip_address: IP address of the target.
:param port: port of the target.
:param auth_method : either None or CHAP.
:param username: CHAP Username for authentication.
:param password: CHAP secret.
:raises: IloError, on an error from iLO.
:raises: IloCommandNotSupportedInBiosError, if the system is
in the bios boot mode.
"""
LOG.warning("'set_iscsi_boot_info' is deprecated. The 'MAC' parameter"
"passed in is ignored. Use 'set_iscsi_info' instead.")
return self._call_method('set_iscsi_info', target_name, lun,
ip_address, port, auth_method, username,
password) | Set iscsi details of the system in uefi boot mode.
The initiator system is set with the target details like
IQN, LUN, IP, Port etc.
:param mac: The MAC of the NIC to be set with iSCSI information
:param target_name: Target Name for iscsi.
:param lun: logical unit number.
:param ip_address: IP address of the target.
:param port: port of the target.
:param auth_method : either None or CHAP.
:param username: CHAP Username for authentication.
:param password: CHAP secret.
:raises: IloError, on an error from iLO.
:raises: IloCommandNotSupportedInBiosError, if the system is
in the bios boot mode. | Below is the the instruction that describes the task:
### Input:
Set iscsi details of the system in uefi boot mode.
The initiator system is set with the target details like
IQN, LUN, IP, Port etc.
:param mac: The MAC of the NIC to be set with iSCSI information
:param target_name: Target Name for iscsi.
:param lun: logical unit number.
:param ip_address: IP address of the target.
:param port: port of the target.
:param auth_method : either None or CHAP.
:param username: CHAP Username for authentication.
:param password: CHAP secret.
:raises: IloError, on an error from iLO.
:raises: IloCommandNotSupportedInBiosError, if the system is
in the bios boot mode.
### Response:
def set_iscsi_boot_info(self, mac, target_name, lun, ip_address,
port='3260', auth_method=None, username=None,
password=None):
"""Set iscsi details of the system in uefi boot mode.
The initiator system is set with the target details like
IQN, LUN, IP, Port etc.
:param mac: The MAC of the NIC to be set with iSCSI information
:param target_name: Target Name for iscsi.
:param lun: logical unit number.
:param ip_address: IP address of the target.
:param port: port of the target.
:param auth_method : either None or CHAP.
:param username: CHAP Username for authentication.
:param password: CHAP secret.
:raises: IloError, on an error from iLO.
:raises: IloCommandNotSupportedInBiosError, if the system is
in the bios boot mode.
"""
LOG.warning("'set_iscsi_boot_info' is deprecated. The 'MAC' parameter"
"passed in is ignored. Use 'set_iscsi_info' instead.")
return self._call_method('set_iscsi_info', target_name, lun,
ip_address, port, auth_method, username,
password) |
def totals(iter, keyfunc, sumfunc):
"""groups items by field described in keyfunc and counts totals using value
from sumfunc
"""
data = sorted(iter, key=keyfunc)
res = {}
for k, group in groupby(data, keyfunc):
res[k] = sum([sumfunc(entry) for entry in group])
return res | groups items by field described in keyfunc and counts totals using value
from sumfunc | Below is the the instruction that describes the task:
### Input:
groups items by field described in keyfunc and counts totals using value
from sumfunc
### Response:
def totals(iter, keyfunc, sumfunc):
"""groups items by field described in keyfunc and counts totals using value
from sumfunc
"""
data = sorted(iter, key=keyfunc)
res = {}
for k, group in groupby(data, keyfunc):
res[k] = sum([sumfunc(entry) for entry in group])
return res |
def is_restricted(self):
"""
Returns True or False according to number of objects in queryset.
If queryset contains too much objects the widget will be restricted and won't be used select box with choices.
"""
return (
not hasattr(self.choices, 'queryset') or
self.choices.queryset.count() > settings.FOREIGN_KEY_MAX_SELECBOX_ENTRIES
) | Returns True or False according to number of objects in queryset.
If queryset contains too much objects the widget will be restricted and won't be used select box with choices. | Below is the the instruction that describes the task:
### Input:
Returns True or False according to number of objects in queryset.
If queryset contains too much objects the widget will be restricted and won't be used select box with choices.
### Response:
def is_restricted(self):
"""
Returns True or False according to number of objects in queryset.
If queryset contains too much objects the widget will be restricted and won't be used select box with choices.
"""
return (
not hasattr(self.choices, 'queryset') or
self.choices.queryset.count() > settings.FOREIGN_KEY_MAX_SELECBOX_ENTRIES
) |
def azimuth(self, point):
"""
Compute the azimuth (in decimal degrees) between this point
and the given point.
:param point:
Destination point.
:type point:
Instance of :class:`Point`
:returns:
The azimuth, value in a range ``[0, 360)``.
:rtype:
float
"""
return geodetic.azimuth(self.longitude, self.latitude,
point.longitude, point.latitude) | Compute the azimuth (in decimal degrees) between this point
and the given point.
:param point:
Destination point.
:type point:
Instance of :class:`Point`
:returns:
The azimuth, value in a range ``[0, 360)``.
:rtype:
float | Below is the the instruction that describes the task:
### Input:
Compute the azimuth (in decimal degrees) between this point
and the given point.
:param point:
Destination point.
:type point:
Instance of :class:`Point`
:returns:
The azimuth, value in a range ``[0, 360)``.
:rtype:
float
### Response:
def azimuth(self, point):
"""
Compute the azimuth (in decimal degrees) between this point
and the given point.
:param point:
Destination point.
:type point:
Instance of :class:`Point`
:returns:
The azimuth, value in a range ``[0, 360)``.
:rtype:
float
"""
return geodetic.azimuth(self.longitude, self.latitude,
point.longitude, point.latitude) |
def write_version(path, ref=None):
"""Update version file using git desribe"""
with lcd(dirname(path)):
version = make_version(ref)
if (env.get('full') or not os.path.exists(path)
or version != open(path).read().strip()):
with open(path, 'w') as out:
out.write(version + '\n')
return version | Update version file using git desribe | Below is the the instruction that describes the task:
### Input:
Update version file using git desribe
### Response:
def write_version(path, ref=None):
"""Update version file using git desribe"""
with lcd(dirname(path)):
version = make_version(ref)
if (env.get('full') or not os.path.exists(path)
or version != open(path).read().strip()):
with open(path, 'w') as out:
out.write(version + '\n')
return version |
def real_space(self):
"""The space corresponding to this space's `real_dtype`.
Raises
------
ValueError
If `dtype` is not a numeric data type.
"""
if not is_numeric_dtype(self.dtype):
raise ValueError(
'`real_space` not defined for non-numeric `dtype`')
return self.astype(self.real_dtype) | The space corresponding to this space's `real_dtype`.
Raises
------
ValueError
If `dtype` is not a numeric data type. | Below is the the instruction that describes the task:
### Input:
The space corresponding to this space's `real_dtype`.
Raises
------
ValueError
If `dtype` is not a numeric data type.
### Response:
def real_space(self):
"""The space corresponding to this space's `real_dtype`.
Raises
------
ValueError
If `dtype` is not a numeric data type.
"""
if not is_numeric_dtype(self.dtype):
raise ValueError(
'`real_space` not defined for non-numeric `dtype`')
return self.astype(self.real_dtype) |
def _ast_to_code(self, node, **kwargs):
"""Convert an abstract syntax tree to python source code."""
if isinstance(node, OptreeNode):
return self._ast_optree_node_to_code(node, **kwargs)
elif isinstance(node, Identifier):
return self._ast_identifier_to_code(node, **kwargs)
elif isinstance(node, Terminal):
return self._ast_terminal_to_code(node, **kwargs)
elif isinstance(node, OptionGroup):
return self._ast_option_group_to_code(node, **kwargs)
elif isinstance(node, RepetitionGroup):
return self._ast_repetition_group_to_code(node, **kwargs)
elif isinstance(node, SpecialHandling):
return self._ast_special_handling_to_code(node, **kwargs)
elif isinstance(node, Number):
return self._ast_number_to_code(node, **kwargs)
else:
raise Exception("Unhandled ast node: {0}".format(node)) | Convert an abstract syntax tree to python source code. | Below is the the instruction that describes the task:
### Input:
Convert an abstract syntax tree to python source code.
### Response:
def _ast_to_code(self, node, **kwargs):
"""Convert an abstract syntax tree to python source code."""
if isinstance(node, OptreeNode):
return self._ast_optree_node_to_code(node, **kwargs)
elif isinstance(node, Identifier):
return self._ast_identifier_to_code(node, **kwargs)
elif isinstance(node, Terminal):
return self._ast_terminal_to_code(node, **kwargs)
elif isinstance(node, OptionGroup):
return self._ast_option_group_to_code(node, **kwargs)
elif isinstance(node, RepetitionGroup):
return self._ast_repetition_group_to_code(node, **kwargs)
elif isinstance(node, SpecialHandling):
return self._ast_special_handling_to_code(node, **kwargs)
elif isinstance(node, Number):
return self._ast_number_to_code(node, **kwargs)
else:
raise Exception("Unhandled ast node: {0}".format(node)) |
def get_role_members(self, role):
"""get permissions of a user"""
targetRoleDb = AuthGroup.objects(creator=self.client, role=role)
members = AuthMembership.objects(groups__in=targetRoleDb).only('user')
return json.loads(members.to_json()) | get permissions of a user | Below is the the instruction that describes the task:
### Input:
get permissions of a user
### Response:
def get_role_members(self, role):
"""get permissions of a user"""
targetRoleDb = AuthGroup.objects(creator=self.client, role=role)
members = AuthMembership.objects(groups__in=targetRoleDb).only('user')
return json.loads(members.to_json()) |
def load(cls, path):
"""
load DictTree from json files.
"""
try:
with open(path, "rb") as f:
return cls(__data__=json.loads(f.read().decode("utf-8")))
except:
pass
with open(path, "rb") as f:
return cls(__data__=pickle.load(f)) | load DictTree from json files. | Below is the the instruction that describes the task:
### Input:
load DictTree from json files.
### Response:
def load(cls, path):
"""
load DictTree from json files.
"""
try:
with open(path, "rb") as f:
return cls(__data__=json.loads(f.read().decode("utf-8")))
except:
pass
with open(path, "rb") as f:
return cls(__data__=pickle.load(f)) |
def handle_signature(self, sig, signode):
"""Parses out pieces from construct signatures
Parses out prefix and argument list from construct definition. This is
assuming that the .NET languages this will support will be in a common
format, such as::
Namespace.Class.method(argument, argument, ...)
The namespace and class will be determined by the nesting of rST
directives.
Returns
Altered :py:data:`signode` with attributes corrected for rST
nesting/etc
"""
try:
sig = self.parse_signature(sig.strip())
except ValueError:
self.env.warn(self.env.docname,
'Parsing signature failed: "{}"'.format(sig),
self.lineno)
raise
prefix = self.env.ref_context.get('dn:prefix', None)
if prefix is not None:
sig.prefix = prefix
signode['object'] = sig.member
signode['prefix'] = sig.prefix
signode['fullname'] = sig.full_name()
# Prefix modifiers
if self.display_prefix:
signode += addnodes.desc_annotation(self.display_prefix,
self.display_prefix)
for prefix in ['public', 'protected', 'static']:
if prefix in self.options:
signode += addnodes.desc_annotation(prefix + ' ',
prefix + ' ')
# Show prefix only on shorter declarations
if sig.prefix is not None and not self.has_arguments:
signode += addnodes.desc_addname(sig.prefix + '.', sig.prefix + '.')
signode += addnodes.desc_name(sig.member, sig.member)
if self.has_arguments:
if not sig.arguments:
signode += addnodes.desc_parameterlist()
else:
# TODO replace this
_pseudo_parse_arglist(signode, ', '.join(sig.arguments))
if isinstance(self, DotNetObjectNested):
return sig.full_name(), sig.full_name()
return sig.full_name(), sig.prefix | Parses out pieces from construct signatures
Parses out prefix and argument list from construct definition. This is
assuming that the .NET languages this will support will be in a common
format, such as::
Namespace.Class.method(argument, argument, ...)
The namespace and class will be determined by the nesting of rST
directives.
Returns
Altered :py:data:`signode` with attributes corrected for rST
nesting/etc | Below is the the instruction that describes the task:
### Input:
Parses out pieces from construct signatures
Parses out prefix and argument list from construct definition. This is
assuming that the .NET languages this will support will be in a common
format, such as::
Namespace.Class.method(argument, argument, ...)
The namespace and class will be determined by the nesting of rST
directives.
Returns
Altered :py:data:`signode` with attributes corrected for rST
nesting/etc
### Response:
def handle_signature(self, sig, signode):
"""Parses out pieces from construct signatures
Parses out prefix and argument list from construct definition. This is
assuming that the .NET languages this will support will be in a common
format, such as::
Namespace.Class.method(argument, argument, ...)
The namespace and class will be determined by the nesting of rST
directives.
Returns
Altered :py:data:`signode` with attributes corrected for rST
nesting/etc
"""
try:
sig = self.parse_signature(sig.strip())
except ValueError:
self.env.warn(self.env.docname,
'Parsing signature failed: "{}"'.format(sig),
self.lineno)
raise
prefix = self.env.ref_context.get('dn:prefix', None)
if prefix is not None:
sig.prefix = prefix
signode['object'] = sig.member
signode['prefix'] = sig.prefix
signode['fullname'] = sig.full_name()
# Prefix modifiers
if self.display_prefix:
signode += addnodes.desc_annotation(self.display_prefix,
self.display_prefix)
for prefix in ['public', 'protected', 'static']:
if prefix in self.options:
signode += addnodes.desc_annotation(prefix + ' ',
prefix + ' ')
# Show prefix only on shorter declarations
if sig.prefix is not None and not self.has_arguments:
signode += addnodes.desc_addname(sig.prefix + '.', sig.prefix + '.')
signode += addnodes.desc_name(sig.member, sig.member)
if self.has_arguments:
if not sig.arguments:
signode += addnodes.desc_parameterlist()
else:
# TODO replace this
_pseudo_parse_arglist(signode, ', '.join(sig.arguments))
if isinstance(self, DotNetObjectNested):
return sig.full_name(), sig.full_name()
return sig.full_name(), sig.prefix |
def maintain_leases(self):
"""Maintain all of the leases being managed.
This method modifies the ack deadline for all of the managed
ack IDs, then waits for most of that time (but with jitter), and
repeats.
"""
while self._manager.is_active and not self._stop_event.is_set():
# Determine the appropriate duration for the lease. This is
# based off of how long previous messages have taken to ack, with
# a sensible default and within the ranges allowed by Pub/Sub.
p99 = self._manager.ack_histogram.percentile(99)
_LOGGER.debug("The current p99 value is %d seconds.", p99)
# Make a copy of the leased messages. This is needed because it's
# possible for another thread to modify the dictionary while
# we're iterating over it.
leased_messages = copy.copy(self._leased_messages)
# Drop any leases that are well beyond max lease time. This
# ensures that in the event of a badly behaving actor, we can
# drop messages and allow Pub/Sub to resend them.
cutoff = time.time() - self._manager.flow_control.max_lease_duration
to_drop = [
requests.DropRequest(ack_id, item.size)
for ack_id, item in six.iteritems(leased_messages)
if item.added_time < cutoff
]
if to_drop:
_LOGGER.warning(
"Dropping %s items because they were leased too long.", len(to_drop)
)
self._manager.dispatcher.drop(to_drop)
# Remove dropped items from our copy of the leased messages (they
# have already been removed from the real one by
# self._manager.drop(), which calls self.remove()).
for item in to_drop:
leased_messages.pop(item.ack_id)
# Create a streaming pull request.
# We do not actually call `modify_ack_deadline` over and over
# because it is more efficient to make a single request.
ack_ids = leased_messages.keys()
if ack_ids:
_LOGGER.debug("Renewing lease for %d ack IDs.", len(ack_ids))
# NOTE: This may not work as expected if ``consumer.active``
# has changed since we checked it. An implementation
# without any sort of race condition would require a
# way for ``send_request`` to fail when the consumer
# is inactive.
self._manager.dispatcher.modify_ack_deadline(
[requests.ModAckRequest(ack_id, p99) for ack_id in ack_ids]
)
# Now wait an appropriate period of time and do this again.
#
# We determine the appropriate period of time based on a random
# period between 0 seconds and 90% of the lease. This use of
# jitter (http://bit.ly/2s2ekL7) helps decrease contention in cases
# where there are many clients.
snooze = random.uniform(0.0, p99 * 0.9)
_LOGGER.debug("Snoozing lease management for %f seconds.", snooze)
self._stop_event.wait(timeout=snooze)
_LOGGER.info("%s exiting.", _LEASE_WORKER_NAME) | Maintain all of the leases being managed.
This method modifies the ack deadline for all of the managed
ack IDs, then waits for most of that time (but with jitter), and
repeats. | Below is the the instruction that describes the task:
### Input:
Maintain all of the leases being managed.
This method modifies the ack deadline for all of the managed
ack IDs, then waits for most of that time (but with jitter), and
repeats.
### Response:
def maintain_leases(self):
"""Maintain all of the leases being managed.
This method modifies the ack deadline for all of the managed
ack IDs, then waits for most of that time (but with jitter), and
repeats.
"""
while self._manager.is_active and not self._stop_event.is_set():
# Determine the appropriate duration for the lease. This is
# based off of how long previous messages have taken to ack, with
# a sensible default and within the ranges allowed by Pub/Sub.
p99 = self._manager.ack_histogram.percentile(99)
_LOGGER.debug("The current p99 value is %d seconds.", p99)
# Make a copy of the leased messages. This is needed because it's
# possible for another thread to modify the dictionary while
# we're iterating over it.
leased_messages = copy.copy(self._leased_messages)
# Drop any leases that are well beyond max lease time. This
# ensures that in the event of a badly behaving actor, we can
# drop messages and allow Pub/Sub to resend them.
cutoff = time.time() - self._manager.flow_control.max_lease_duration
to_drop = [
requests.DropRequest(ack_id, item.size)
for ack_id, item in six.iteritems(leased_messages)
if item.added_time < cutoff
]
if to_drop:
_LOGGER.warning(
"Dropping %s items because they were leased too long.", len(to_drop)
)
self._manager.dispatcher.drop(to_drop)
# Remove dropped items from our copy of the leased messages (they
# have already been removed from the real one by
# self._manager.drop(), which calls self.remove()).
for item in to_drop:
leased_messages.pop(item.ack_id)
# Create a streaming pull request.
# We do not actually call `modify_ack_deadline` over and over
# because it is more efficient to make a single request.
ack_ids = leased_messages.keys()
if ack_ids:
_LOGGER.debug("Renewing lease for %d ack IDs.", len(ack_ids))
# NOTE: This may not work as expected if ``consumer.active``
# has changed since we checked it. An implementation
# without any sort of race condition would require a
# way for ``send_request`` to fail when the consumer
# is inactive.
self._manager.dispatcher.modify_ack_deadline(
[requests.ModAckRequest(ack_id, p99) for ack_id in ack_ids]
)
# Now wait an appropriate period of time and do this again.
#
# We determine the appropriate period of time based on a random
# period between 0 seconds and 90% of the lease. This use of
# jitter (http://bit.ly/2s2ekL7) helps decrease contention in cases
# where there are many clients.
snooze = random.uniform(0.0, p99 * 0.9)
_LOGGER.debug("Snoozing lease management for %f seconds.", snooze)
self._stop_event.wait(timeout=snooze)
_LOGGER.info("%s exiting.", _LEASE_WORKER_NAME) |
def from_dict(d):
"""
Re-create the noise model from a dictionary representation.
:param Dict[str,Any] d: The dictionary representation.
:return: The restored noise model.
:rtype: NoiseModel
"""
return NoiseModel(
gates=[KrausModel.from_dict(t) for t in d["gates"]],
assignment_probs={int(qid): np.array(a) for qid, a in d["assignment_probs"].items()},
) | Re-create the noise model from a dictionary representation.
:param Dict[str,Any] d: The dictionary representation.
:return: The restored noise model.
:rtype: NoiseModel | Below is the the instruction that describes the task:
### Input:
Re-create the noise model from a dictionary representation.
:param Dict[str,Any] d: The dictionary representation.
:return: The restored noise model.
:rtype: NoiseModel
### Response:
def from_dict(d):
"""
Re-create the noise model from a dictionary representation.
:param Dict[str,Any] d: The dictionary representation.
:return: The restored noise model.
:rtype: NoiseModel
"""
return NoiseModel(
gates=[KrausModel.from_dict(t) for t in d["gates"]],
assignment_probs={int(qid): np.array(a) for qid, a in d["assignment_probs"].items()},
) |
def eof():
'''Parser EOF flag of a string.'''
@Parser
def eof_parser(text, index=0):
if index >= len(text):
return Value.success(index, None)
else:
return Value.failure(index, 'EOF')
return eof_parser | Parser EOF flag of a string. | Below is the the instruction that describes the task:
### Input:
Parser EOF flag of a string.
### Response:
def eof():
'''Parser EOF flag of a string.'''
@Parser
def eof_parser(text, index=0):
if index >= len(text):
return Value.success(index, None)
else:
return Value.failure(index, 'EOF')
return eof_parser |
def tops(symbols=None, token='', version=''):
'''TOPS provides IEX’s aggregated best quoted bid and offer position in near real time for all securities on IEX’s displayed limit order book.
TOPS is ideal for developers needing both quote and trade data.
https://iexcloud.io/docs/api/#tops
Args:
symbol (string); Ticker to request
token (string); Access token
version (string); API version
Returns:
dict: result
'''
symbols = _strToList(symbols)
if symbols:
return _getJson('tops?symbols=' + ','.join(symbols) + '%2b', token, version)
return _getJson('tops', token, version) | TOPS provides IEX’s aggregated best quoted bid and offer position in near real time for all securities on IEX’s displayed limit order book.
TOPS is ideal for developers needing both quote and trade data.
https://iexcloud.io/docs/api/#tops
Args:
symbol (string); Ticker to request
token (string); Access token
version (string); API version
Returns:
dict: result | Below is the the instruction that describes the task:
### Input:
TOPS provides IEX’s aggregated best quoted bid and offer position in near real time for all securities on IEX’s displayed limit order book.
TOPS is ideal for developers needing both quote and trade data.
https://iexcloud.io/docs/api/#tops
Args:
symbol (string); Ticker to request
token (string); Access token
version (string); API version
Returns:
dict: result
### Response:
def tops(symbols=None, token='', version=''):
'''TOPS provides IEX’s aggregated best quoted bid and offer position in near real time for all securities on IEX’s displayed limit order book.
TOPS is ideal for developers needing both quote and trade data.
https://iexcloud.io/docs/api/#tops
Args:
symbol (string); Ticker to request
token (string); Access token
version (string); API version
Returns:
dict: result
'''
symbols = _strToList(symbols)
if symbols:
return _getJson('tops?symbols=' + ','.join(symbols) + '%2b', token, version)
return _getJson('tops', token, version) |
def embeddable(self, path, variant):
"""Is the asset embeddable ?"""
name, ext = os.path.splitext(path)
font = ext in FONT_EXTS
if not variant:
return False
if not (re.search(settings.EMBED_PATH, path.replace('\\', '/')) and self.storage.exists(path)):
return False
if ext not in EMBED_EXTS:
return False
if not (font or len(self.encoded_content(path)) < settings.EMBED_MAX_IMAGE_SIZE):
return False
return True | Is the asset embeddable ? | Below is the the instruction that describes the task:
### Input:
Is the asset embeddable ?
### Response:
def embeddable(self, path, variant):
"""Is the asset embeddable ?"""
name, ext = os.path.splitext(path)
font = ext in FONT_EXTS
if not variant:
return False
if not (re.search(settings.EMBED_PATH, path.replace('\\', '/')) and self.storage.exists(path)):
return False
if ext not in EMBED_EXTS:
return False
if not (font or len(self.encoded_content(path)) < settings.EMBED_MAX_IMAGE_SIZE):
return False
return True |
def left_corner_label(self, label, position=None, rotation=0, offset=0.08,
**kwargs):
"""
Sets the label on the left corner (complements right axis.)
Parameters
----------
label: string
The axis label
position: 3-Tuple of floats, None
The position of the text label
rotation: float, 0
The angle of rotation of the label
offset: float,
Used to compute the distance of the label from the axis
kwargs:
Any kwargs to pass through to matplotlib.
"""
if not position:
position = (-offset / 2, offset / 2, 0)
self._corner_labels["left"] = (label, position, rotation, kwargs) | Sets the label on the left corner (complements right axis.)
Parameters
----------
label: string
The axis label
position: 3-Tuple of floats, None
The position of the text label
rotation: float, 0
The angle of rotation of the label
offset: float,
Used to compute the distance of the label from the axis
kwargs:
Any kwargs to pass through to matplotlib. | Below is the the instruction that describes the task:
### Input:
Sets the label on the left corner (complements right axis.)
Parameters
----------
label: string
The axis label
position: 3-Tuple of floats, None
The position of the text label
rotation: float, 0
The angle of rotation of the label
offset: float,
Used to compute the distance of the label from the axis
kwargs:
Any kwargs to pass through to matplotlib.
### Response:
def left_corner_label(self, label, position=None, rotation=0, offset=0.08,
**kwargs):
"""
Sets the label on the left corner (complements right axis.)
Parameters
----------
label: string
The axis label
position: 3-Tuple of floats, None
The position of the text label
rotation: float, 0
The angle of rotation of the label
offset: float,
Used to compute the distance of the label from the axis
kwargs:
Any kwargs to pass through to matplotlib.
"""
if not position:
position = (-offset / 2, offset / 2, 0)
self._corner_labels["left"] = (label, position, rotation, kwargs) |
def get_slot_bindings(self):
"""
Returns slot bindings.
:returns: slot bindings (adapter names) list
"""
slot_bindings = yield from self._hypervisor.send('vm slot_bindings "{}"'.format(self._name))
return slot_bindings | Returns slot bindings.
:returns: slot bindings (adapter names) list | Below is the the instruction that describes the task:
### Input:
Returns slot bindings.
:returns: slot bindings (adapter names) list
### Response:
def get_slot_bindings(self):
"""
Returns slot bindings.
:returns: slot bindings (adapter names) list
"""
slot_bindings = yield from self._hypervisor.send('vm slot_bindings "{}"'.format(self._name))
return slot_bindings |
def twisted_consume(callback, bindings=None, queues=None):
"""
Start a consumer using the provided callback and run it using the Twisted
event loop (reactor).
.. note:: Callbacks run in a Twisted-managed thread pool using the
:func:`twisted.internet.threads.deferToThread` API to avoid them blocking
the event loop. If you wish to use Twisted APIs in your callback you must
use the :func:`twisted.internet.threads.blockingCallFromThread` or
:class:`twisted.internet.interfaces.IReactorFromThreads` APIs.
This API expects the caller to start the reactor.
Args:
callback (callable): A callable object that accepts one positional argument,
a :class:`.Message` or a class object that implements the ``__call__``
method. The class will be instantiated before use.
bindings (dict or list of dict): Bindings to declare before consuming. This
should be the same format as the :ref:`conf-bindings` configuration.
queues (dict): The queue to declare and consume from. Each key in this
dictionary should be a queue name to declare, and each value should
be a dictionary with the "durable", "auto_delete", "exclusive", and
"arguments" keys.
Returns:
twisted.internet.defer.Deferred:
A deferred that fires with the list of one or more
:class:`.Consumer` objects. Each consumer object has a
:attr:`.Consumer.result` instance variable that is a Deferred that
fires or errors when the consumer halts. Note that this API is
meant to survive network problems, so consuming will continue until
:meth:`.Consumer.cancel` is called or a fatal server error occurs.
The deferred returned by this function may error back with a
:class:`fedora_messaging.exceptions.BadDeclaration` if queues or
bindings cannot be declared on the broker, a
:class:`fedora_messaging.exceptions.PermissionException` if the user
doesn't have access to the queue, or
:class:`fedora_messaging.exceptions.ConnectionException` if the TLS
or AMQP handshake fails.
"""
if isinstance(bindings, dict):
bindings = [bindings]
callback = _check_callback(callback)
global _twisted_service
if _twisted_service is None:
_twisted_service = service.FedoraMessagingServiceV2(config.conf["amqp_url"])
reactor.callWhenRunning(_twisted_service.startService)
# Twisted is killing the underlying connection before stopService gets
# called, so we need to add it as a pre-shutdown event to gracefully
# finish up messages in progress.
reactor.addSystemEventTrigger(
"before", "shutdown", _twisted_service.stopService
)
return _twisted_service._service.factory.consume(callback, bindings, queues) | Start a consumer using the provided callback and run it using the Twisted
event loop (reactor).
.. note:: Callbacks run in a Twisted-managed thread pool using the
:func:`twisted.internet.threads.deferToThread` API to avoid them blocking
the event loop. If you wish to use Twisted APIs in your callback you must
use the :func:`twisted.internet.threads.blockingCallFromThread` or
:class:`twisted.internet.interfaces.IReactorFromThreads` APIs.
This API expects the caller to start the reactor.
Args:
callback (callable): A callable object that accepts one positional argument,
a :class:`.Message` or a class object that implements the ``__call__``
method. The class will be instantiated before use.
bindings (dict or list of dict): Bindings to declare before consuming. This
should be the same format as the :ref:`conf-bindings` configuration.
queues (dict): The queue to declare and consume from. Each key in this
dictionary should be a queue name to declare, and each value should
be a dictionary with the "durable", "auto_delete", "exclusive", and
"arguments" keys.
Returns:
twisted.internet.defer.Deferred:
A deferred that fires with the list of one or more
:class:`.Consumer` objects. Each consumer object has a
:attr:`.Consumer.result` instance variable that is a Deferred that
fires or errors when the consumer halts. Note that this API is
meant to survive network problems, so consuming will continue until
:meth:`.Consumer.cancel` is called or a fatal server error occurs.
The deferred returned by this function may error back with a
:class:`fedora_messaging.exceptions.BadDeclaration` if queues or
bindings cannot be declared on the broker, a
:class:`fedora_messaging.exceptions.PermissionException` if the user
doesn't have access to the queue, or
:class:`fedora_messaging.exceptions.ConnectionException` if the TLS
or AMQP handshake fails. | Below is the the instruction that describes the task:
### Input:
Start a consumer using the provided callback and run it using the Twisted
event loop (reactor).
.. note:: Callbacks run in a Twisted-managed thread pool using the
:func:`twisted.internet.threads.deferToThread` API to avoid them blocking
the event loop. If you wish to use Twisted APIs in your callback you must
use the :func:`twisted.internet.threads.blockingCallFromThread` or
:class:`twisted.internet.interfaces.IReactorFromThreads` APIs.
This API expects the caller to start the reactor.
Args:
callback (callable): A callable object that accepts one positional argument,
a :class:`.Message` or a class object that implements the ``__call__``
method. The class will be instantiated before use.
bindings (dict or list of dict): Bindings to declare before consuming. This
should be the same format as the :ref:`conf-bindings` configuration.
queues (dict): The queue to declare and consume from. Each key in this
dictionary should be a queue name to declare, and each value should
be a dictionary with the "durable", "auto_delete", "exclusive", and
"arguments" keys.
Returns:
twisted.internet.defer.Deferred:
A deferred that fires with the list of one or more
:class:`.Consumer` objects. Each consumer object has a
:attr:`.Consumer.result` instance variable that is a Deferred that
fires or errors when the consumer halts. Note that this API is
meant to survive network problems, so consuming will continue until
:meth:`.Consumer.cancel` is called or a fatal server error occurs.
The deferred returned by this function may error back with a
:class:`fedora_messaging.exceptions.BadDeclaration` if queues or
bindings cannot be declared on the broker, a
:class:`fedora_messaging.exceptions.PermissionException` if the user
doesn't have access to the queue, or
:class:`fedora_messaging.exceptions.ConnectionException` if the TLS
or AMQP handshake fails.
### Response:
def twisted_consume(callback, bindings=None, queues=None):
"""
Start a consumer using the provided callback and run it using the Twisted
event loop (reactor).
.. note:: Callbacks run in a Twisted-managed thread pool using the
:func:`twisted.internet.threads.deferToThread` API to avoid them blocking
the event loop. If you wish to use Twisted APIs in your callback you must
use the :func:`twisted.internet.threads.blockingCallFromThread` or
:class:`twisted.internet.interfaces.IReactorFromThreads` APIs.
This API expects the caller to start the reactor.
Args:
callback (callable): A callable object that accepts one positional argument,
a :class:`.Message` or a class object that implements the ``__call__``
method. The class will be instantiated before use.
bindings (dict or list of dict): Bindings to declare before consuming. This
should be the same format as the :ref:`conf-bindings` configuration.
queues (dict): The queue to declare and consume from. Each key in this
dictionary should be a queue name to declare, and each value should
be a dictionary with the "durable", "auto_delete", "exclusive", and
"arguments" keys.
Returns:
twisted.internet.defer.Deferred:
A deferred that fires with the list of one or more
:class:`.Consumer` objects. Each consumer object has a
:attr:`.Consumer.result` instance variable that is a Deferred that
fires or errors when the consumer halts. Note that this API is
meant to survive network problems, so consuming will continue until
:meth:`.Consumer.cancel` is called or a fatal server error occurs.
The deferred returned by this function may error back with a
:class:`fedora_messaging.exceptions.BadDeclaration` if queues or
bindings cannot be declared on the broker, a
:class:`fedora_messaging.exceptions.PermissionException` if the user
doesn't have access to the queue, or
:class:`fedora_messaging.exceptions.ConnectionException` if the TLS
or AMQP handshake fails.
"""
if isinstance(bindings, dict):
bindings = [bindings]
callback = _check_callback(callback)
global _twisted_service
if _twisted_service is None:
_twisted_service = service.FedoraMessagingServiceV2(config.conf["amqp_url"])
reactor.callWhenRunning(_twisted_service.startService)
# Twisted is killing the underlying connection before stopService gets
# called, so we need to add it as a pre-shutdown event to gracefully
# finish up messages in progress.
reactor.addSystemEventTrigger(
"before", "shutdown", _twisted_service.stopService
)
return _twisted_service._service.factory.consume(callback, bindings, queues) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.