code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def add_media_description(self, media_description):
"""Adds a media_description.
arg: media_description (displayText): the new media_description
raise: InvalidArgument - ``media_description`` is invalid
raise: NoAccess - ``Metadata.isReadOnly()`` is ``true``
raise: NullArgument - ``media_description`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
if self.get_media_descriptions_metadata().is_read_only():
raise NoAccess()
self.add_or_replace_value('mediaDescriptions', media_description) | Adds a media_description.
arg: media_description (displayText): the new media_description
raise: InvalidArgument - ``media_description`` is invalid
raise: NoAccess - ``Metadata.isReadOnly()`` is ``true``
raise: NullArgument - ``media_description`` is ``null``
*compliance: mandatory -- This method must be implemented.* | Below is the the instruction that describes the task:
### Input:
Adds a media_description.
arg: media_description (displayText): the new media_description
raise: InvalidArgument - ``media_description`` is invalid
raise: NoAccess - ``Metadata.isReadOnly()`` is ``true``
raise: NullArgument - ``media_description`` is ``null``
*compliance: mandatory -- This method must be implemented.*
### Response:
def add_media_description(self, media_description):
"""Adds a media_description.
arg: media_description (displayText): the new media_description
raise: InvalidArgument - ``media_description`` is invalid
raise: NoAccess - ``Metadata.isReadOnly()`` is ``true``
raise: NullArgument - ``media_description`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
if self.get_media_descriptions_metadata().is_read_only():
raise NoAccess()
self.add_or_replace_value('mediaDescriptions', media_description) |
def inferObjectsWithRandomMovements(self):
"""
Infer each object without any location input.
"""
for objectName, objectFeatures in self.objects.iteritems():
self.reset()
inferred = False
prevTouchSequence = None
for _ in xrange(4):
while True:
touchSequence = list(objectFeatures)
random.shuffle(touchSequence)
if prevTouchSequence is not None:
if touchSequence[0] == prevTouchSequence[-1]:
continue
break
for i, feature in enumerate(touchSequence):
locationOnObject = (feature["top"] + feature["height"]/2,
feature["left"] + feature["width"]/2)
self.move(objectName, locationOnObject)
featureName = feature["name"]
featureSDR = self.features[featureName]
self.sense(featureSDR, learn=False)
inferred = (
set(self.objectLayer.getActiveCells()) ==
set(self.objectRepresentations[objectName]) and
set(self.inputLayer.getActiveCells()) ==
set(self.inputRepresentations[(objectName,
locationOnObject,
featureName)]) and
set(self.getActiveLocationCells()) ==
set(self.locationRepresentations[(objectName, locationOnObject)]))
if inferred:
break
prevTouchSequence = touchSequence
if inferred:
break | Infer each object without any location input. | Below is the the instruction that describes the task:
### Input:
Infer each object without any location input.
### Response:
def inferObjectsWithRandomMovements(self):
"""
Infer each object without any location input.
"""
for objectName, objectFeatures in self.objects.iteritems():
self.reset()
inferred = False
prevTouchSequence = None
for _ in xrange(4):
while True:
touchSequence = list(objectFeatures)
random.shuffle(touchSequence)
if prevTouchSequence is not None:
if touchSequence[0] == prevTouchSequence[-1]:
continue
break
for i, feature in enumerate(touchSequence):
locationOnObject = (feature["top"] + feature["height"]/2,
feature["left"] + feature["width"]/2)
self.move(objectName, locationOnObject)
featureName = feature["name"]
featureSDR = self.features[featureName]
self.sense(featureSDR, learn=False)
inferred = (
set(self.objectLayer.getActiveCells()) ==
set(self.objectRepresentations[objectName]) and
set(self.inputLayer.getActiveCells()) ==
set(self.inputRepresentations[(objectName,
locationOnObject,
featureName)]) and
set(self.getActiveLocationCells()) ==
set(self.locationRepresentations[(objectName, locationOnObject)]))
if inferred:
break
prevTouchSequence = touchSequence
if inferred:
break |
def _element_to_bson(key, value, check_keys, opts):
"""Encode a single key, value pair."""
if not isinstance(key, string_type):
raise InvalidDocument("documents must have only string keys, "
"key was %r" % (key,))
if check_keys:
if key.startswith("$"):
raise InvalidDocument("key %r must not start with '$'" % (key,))
if "." in key:
raise InvalidDocument("key %r must not contain '.'" % (key,))
name = _make_name(key)
return _name_value_to_bson(name, value, check_keys, opts) | Encode a single key, value pair. | Below is the the instruction that describes the task:
### Input:
Encode a single key, value pair.
### Response:
def _element_to_bson(key, value, check_keys, opts):
"""Encode a single key, value pair."""
if not isinstance(key, string_type):
raise InvalidDocument("documents must have only string keys, "
"key was %r" % (key,))
if check_keys:
if key.startswith("$"):
raise InvalidDocument("key %r must not start with '$'" % (key,))
if "." in key:
raise InvalidDocument("key %r must not contain '.'" % (key,))
name = _make_name(key)
return _name_value_to_bson(name, value, check_keys, opts) |
def get_doc(self, objtxt):
"""Get object documentation dictionary"""
if self._reading:
return
wait_loop = QEventLoop()
self.sig_got_reply.connect(wait_loop.quit)
self.silent_exec_method("get_ipython().kernel.get_doc('%s')" % objtxt)
wait_loop.exec_()
# Remove loop connection and loop
self.sig_got_reply.disconnect(wait_loop.quit)
wait_loop = None
return self._kernel_reply | Get object documentation dictionary | Below is the the instruction that describes the task:
### Input:
Get object documentation dictionary
### Response:
def get_doc(self, objtxt):
"""Get object documentation dictionary"""
if self._reading:
return
wait_loop = QEventLoop()
self.sig_got_reply.connect(wait_loop.quit)
self.silent_exec_method("get_ipython().kernel.get_doc('%s')" % objtxt)
wait_loop.exec_()
# Remove loop connection and loop
self.sig_got_reply.disconnect(wait_loop.quit)
wait_loop = None
return self._kernel_reply |
def mod_watch(name, **kwargs):
'''
Install/reinstall a package based on a watch requisite
.. note::
This state exists to support special handling of the ``watch``
:ref:`requisite <requisites>`. It should not be called directly.
Parameters for this function should be set by the state being triggered.
'''
sfun = kwargs.pop('sfun', None)
mapfun = {'purged': purged,
'latest': latest,
'removed': removed,
'installed': installed}
if sfun in mapfun:
return mapfun[sfun](name, **kwargs)
return {'name': name,
'changes': {},
'comment': 'pkg.{0} does not work with the watch requisite'.format(sfun),
'result': False} | Install/reinstall a package based on a watch requisite
.. note::
This state exists to support special handling of the ``watch``
:ref:`requisite <requisites>`. It should not be called directly.
Parameters for this function should be set by the state being triggered. | Below is the the instruction that describes the task:
### Input:
Install/reinstall a package based on a watch requisite
.. note::
This state exists to support special handling of the ``watch``
:ref:`requisite <requisites>`. It should not be called directly.
Parameters for this function should be set by the state being triggered.
### Response:
def mod_watch(name, **kwargs):
'''
Install/reinstall a package based on a watch requisite
.. note::
This state exists to support special handling of the ``watch``
:ref:`requisite <requisites>`. It should not be called directly.
Parameters for this function should be set by the state being triggered.
'''
sfun = kwargs.pop('sfun', None)
mapfun = {'purged': purged,
'latest': latest,
'removed': removed,
'installed': installed}
if sfun in mapfun:
return mapfun[sfun](name, **kwargs)
return {'name': name,
'changes': {},
'comment': 'pkg.{0} does not work with the watch requisite'.format(sfun),
'result': False} |
def loadNetworkbyName(self, name, callback=None, errback=None):
"""
Load an existing Network by name into a high level Network object
:param str name: Name of an existing Network
"""
import ns1.ipam
network = ns1.ipam.Network(self.config, name=name)
return network.load(callback=callback, errback=errback) | Load an existing Network by name into a high level Network object
:param str name: Name of an existing Network | Below is the the instruction that describes the task:
### Input:
Load an existing Network by name into a high level Network object
:param str name: Name of an existing Network
### Response:
def loadNetworkbyName(self, name, callback=None, errback=None):
"""
Load an existing Network by name into a high level Network object
:param str name: Name of an existing Network
"""
import ns1.ipam
network = ns1.ipam.Network(self.config, name=name)
return network.load(callback=callback, errback=errback) |
def _set_hw_state(self, v, load=False):
"""
Setter method for hw_state, mapped from YANG variable /hw_state (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_hw_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_hw_state() directly.
YANG Description: HW Route Info
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=hw_state.hw_state, is_container='container', presence=False, yang_name="hw-state", rest_name="hw-state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'sysdiag-hw', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-sysdiag-operational', defining_module='brocade-sysdiag-operational', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """hw_state must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=hw_state.hw_state, is_container='container', presence=False, yang_name="hw-state", rest_name="hw-state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'sysdiag-hw', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-sysdiag-operational', defining_module='brocade-sysdiag-operational', yang_type='container', is_config=True)""",
})
self.__hw_state = t
if hasattr(self, '_set'):
self._set() | Setter method for hw_state, mapped from YANG variable /hw_state (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_hw_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_hw_state() directly.
YANG Description: HW Route Info | Below is the the instruction that describes the task:
### Input:
Setter method for hw_state, mapped from YANG variable /hw_state (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_hw_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_hw_state() directly.
YANG Description: HW Route Info
### Response:
def _set_hw_state(self, v, load=False):
"""
Setter method for hw_state, mapped from YANG variable /hw_state (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_hw_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_hw_state() directly.
YANG Description: HW Route Info
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=hw_state.hw_state, is_container='container', presence=False, yang_name="hw-state", rest_name="hw-state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'sysdiag-hw', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-sysdiag-operational', defining_module='brocade-sysdiag-operational', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """hw_state must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=hw_state.hw_state, is_container='container', presence=False, yang_name="hw-state", rest_name="hw-state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'sysdiag-hw', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-sysdiag-operational', defining_module='brocade-sysdiag-operational', yang_type='container', is_config=True)""",
})
self.__hw_state = t
if hasattr(self, '_set'):
self._set() |
def getPotential(self, columnIndex, potential):
"""
:param columnIndex: (int) column index to get potential for.
:param potential: (list) will be overwritten with column potentials. Must
match the number of inputs.
"""
assert(columnIndex < self._numColumns)
potential[:] = self._potentialPools[columnIndex] | :param columnIndex: (int) column index to get potential for.
:param potential: (list) will be overwritten with column potentials. Must
match the number of inputs. | Below is the the instruction that describes the task:
### Input:
:param columnIndex: (int) column index to get potential for.
:param potential: (list) will be overwritten with column potentials. Must
match the number of inputs.
### Response:
def getPotential(self, columnIndex, potential):
"""
:param columnIndex: (int) column index to get potential for.
:param potential: (list) will be overwritten with column potentials. Must
match the number of inputs.
"""
assert(columnIndex < self._numColumns)
potential[:] = self._potentialPools[columnIndex] |
def name(self, new_name):
"""
Sets the name of this VPCS VM.
:param new_name: name
"""
if self.script_file:
content = self.startup_script
content = content.replace(self._name, new_name)
escaped_name = new_name.replace('\\', '')
content = re.sub(r"^set pcname .+$", "set pcname " + escaped_name, content, flags=re.MULTILINE)
self.startup_script = content
super(VPCSVM, VPCSVM).name.__set__(self, new_name) | Sets the name of this VPCS VM.
:param new_name: name | Below is the the instruction that describes the task:
### Input:
Sets the name of this VPCS VM.
:param new_name: name
### Response:
def name(self, new_name):
"""
Sets the name of this VPCS VM.
:param new_name: name
"""
if self.script_file:
content = self.startup_script
content = content.replace(self._name, new_name)
escaped_name = new_name.replace('\\', '')
content = re.sub(r"^set pcname .+$", "set pcname " + escaped_name, content, flags=re.MULTILINE)
self.startup_script = content
super(VPCSVM, VPCSVM).name.__set__(self, new_name) |
def put(self, url, html, cache_info=None):
"""
Put response into cache
:param url: Url to cache
:type url: str | unicode
:param html: HTML content of url
:type html: str | unicode
:param cache_info: Cache Info (default: None)
:type cache_info: floscraper.models.CacheInfo
:rtype: None
"""
key = hashlib.md5(url).hexdigest()
try:
self._cache_set(key, html)
except:
self.exception("Failed to write cache")
return
self.update(url, cache_info) | Put response into cache
:param url: Url to cache
:type url: str | unicode
:param html: HTML content of url
:type html: str | unicode
:param cache_info: Cache Info (default: None)
:type cache_info: floscraper.models.CacheInfo
:rtype: None | Below is the the instruction that describes the task:
### Input:
Put response into cache
:param url: Url to cache
:type url: str | unicode
:param html: HTML content of url
:type html: str | unicode
:param cache_info: Cache Info (default: None)
:type cache_info: floscraper.models.CacheInfo
:rtype: None
### Response:
def put(self, url, html, cache_info=None):
"""
Put response into cache
:param url: Url to cache
:type url: str | unicode
:param html: HTML content of url
:type html: str | unicode
:param cache_info: Cache Info (default: None)
:type cache_info: floscraper.models.CacheInfo
:rtype: None
"""
key = hashlib.md5(url).hexdigest()
try:
self._cache_set(key, html)
except:
self.exception("Failed to write cache")
return
self.update(url, cache_info) |
def simulate_leapfrog(config_func: Callable, accel_func: Callable,
t0: date, t1: date, steps_per_day: int):
"""
Simulate the earth-sun system from t0 to t1 using Leapfrog Integration.
INPUTS:
config_func: function taking a date or date range and returning position and velocity of bodies
accel_func: function taking positions of the bodies and returning their accelerations
t0: start date of the simulation; a python date
t1: end date of the simulation (exclusive); a python date
dt: time step in days.
num_bodies: the number of celestial bodies in the simulation
"""
# Length of the simulation (number of steps)
N: int = (t1 - t0).days * steps_per_day
# Get the initial conditions
q0, v0 = config_func(t0)
# Infer the number of dimensions from the shape of q0
dims: int = q0.shape[1]
# The time step in seconds
dt = float(day2sec) / float(steps_per_day)
# Square of the time step
dt2: float = dt * dt
# Initialize arrays to store computed positions and velocities
q: np.ndarray = np.zeros((N, dims))
v: np.ndarray = np.zeros((N, dims))
# Initialize the first row with the initial conditions from the JPL ephemerides
q[0, :] = q0
v[0, :] = v0
# Initialize an array to store the acceleration at each time step
a: np.ndarray = np.zeros((N, dims))
# First row of accelerations
a[0, :] = accel_func(q[0])
# Perform leapfrog integration simulation
# https://en.wikipedia.org/wiki/Leapfrog_integration
print(f'Performing leapfrog integration with {N} steps...')
for i in tqdm(range(N-1)):
# Positions at the next time step
q[i+1,:] = q[i,:] + v[i,:] * dt + 0.5 * a[i,:] * dt2
# Accelerations of each body in the system at the next time step
a[i+1,:] = accel_func(q[i+1])
# Velocities of each body at the next time step
v[i+1,:] = v[i,:] + 0.5 * (a[i,:] + a[i+1,:]) * dt
return q, v | Simulate the earth-sun system from t0 to t1 using Leapfrog Integration.
INPUTS:
config_func: function taking a date or date range and returning position and velocity of bodies
accel_func: function taking positions of the bodies and returning their accelerations
t0: start date of the simulation; a python date
t1: end date of the simulation (exclusive); a python date
dt: time step in days.
num_bodies: the number of celestial bodies in the simulation | Below is the the instruction that describes the task:
### Input:
Simulate the earth-sun system from t0 to t1 using Leapfrog Integration.
INPUTS:
config_func: function taking a date or date range and returning position and velocity of bodies
accel_func: function taking positions of the bodies and returning their accelerations
t0: start date of the simulation; a python date
t1: end date of the simulation (exclusive); a python date
dt: time step in days.
num_bodies: the number of celestial bodies in the simulation
### Response:
def simulate_leapfrog(config_func: Callable, accel_func: Callable,
t0: date, t1: date, steps_per_day: int):
"""
Simulate the earth-sun system from t0 to t1 using Leapfrog Integration.
INPUTS:
config_func: function taking a date or date range and returning position and velocity of bodies
accel_func: function taking positions of the bodies and returning their accelerations
t0: start date of the simulation; a python date
t1: end date of the simulation (exclusive); a python date
dt: time step in days.
num_bodies: the number of celestial bodies in the simulation
"""
# Length of the simulation (number of steps)
N: int = (t1 - t0).days * steps_per_day
# Get the initial conditions
q0, v0 = config_func(t0)
# Infer the number of dimensions from the shape of q0
dims: int = q0.shape[1]
# The time step in seconds
dt = float(day2sec) / float(steps_per_day)
# Square of the time step
dt2: float = dt * dt
# Initialize arrays to store computed positions and velocities
q: np.ndarray = np.zeros((N, dims))
v: np.ndarray = np.zeros((N, dims))
# Initialize the first row with the initial conditions from the JPL ephemerides
q[0, :] = q0
v[0, :] = v0
# Initialize an array to store the acceleration at each time step
a: np.ndarray = np.zeros((N, dims))
# First row of accelerations
a[0, :] = accel_func(q[0])
# Perform leapfrog integration simulation
# https://en.wikipedia.org/wiki/Leapfrog_integration
print(f'Performing leapfrog integration with {N} steps...')
for i in tqdm(range(N-1)):
# Positions at the next time step
q[i+1,:] = q[i,:] + v[i,:] * dt + 0.5 * a[i,:] * dt2
# Accelerations of each body in the system at the next time step
a[i+1,:] = accel_func(q[i+1])
# Velocities of each body at the next time step
v[i+1,:] = v[i,:] + 0.5 * (a[i,:] + a[i+1,:]) * dt
return q, v |
def print_about(self):
"""Print an info message about the tool."""
filepath = os.path.join(self.suite_path, "bin", self.tool_name)
print "Tool: %s" % self.tool_name
print "Path: %s" % filepath
print "Suite: %s" % self.suite_path
msg = "%s (%r)" % (self.context.load_path, self.context_name)
print "Context: %s" % msg
variants = self.context.get_tool_variants(self.tool_name)
if variants:
if len(variants) > 1:
self._print_conflicting(variants)
else:
variant = iter(variants).next()
print "Package: %s" % variant.qualified_package_name
return 0 | Print an info message about the tool. | Below is the the instruction that describes the task:
### Input:
Print an info message about the tool.
### Response:
def print_about(self):
"""Print an info message about the tool."""
filepath = os.path.join(self.suite_path, "bin", self.tool_name)
print "Tool: %s" % self.tool_name
print "Path: %s" % filepath
print "Suite: %s" % self.suite_path
msg = "%s (%r)" % (self.context.load_path, self.context_name)
print "Context: %s" % msg
variants = self.context.get_tool_variants(self.tool_name)
if variants:
if len(variants) > 1:
self._print_conflicting(variants)
else:
variant = iter(variants).next()
print "Package: %s" % variant.qualified_package_name
return 0 |
def search(query, team=None):
"""
Search for packages
"""
if team is None:
team = _find_logged_in_team()
if team is not None:
session = _get_session(team)
response = session.get("%s/api/search/" % get_registry_url(team), params=dict(q=query))
print("* Packages in team %s" % team)
packages = response.json()['packages']
for pkg in packages:
print(("%s:" % team) + ("%(owner)s/%(name)s" % pkg))
if len(packages) == 0:
print("(No results)")
print("* Packages in public cloud")
public_session = _get_session(None)
response = public_session.get("%s/api/search/" % get_registry_url(None), params=dict(q=query))
packages = response.json()['packages']
for pkg in packages:
print("%(owner)s/%(name)s" % pkg)
if len(packages) == 0:
print("(No results)") | Search for packages | Below is the the instruction that describes the task:
### Input:
Search for packages
### Response:
def search(query, team=None):
"""
Search for packages
"""
if team is None:
team = _find_logged_in_team()
if team is not None:
session = _get_session(team)
response = session.get("%s/api/search/" % get_registry_url(team), params=dict(q=query))
print("* Packages in team %s" % team)
packages = response.json()['packages']
for pkg in packages:
print(("%s:" % team) + ("%(owner)s/%(name)s" % pkg))
if len(packages) == 0:
print("(No results)")
print("* Packages in public cloud")
public_session = _get_session(None)
response = public_session.get("%s/api/search/" % get_registry_url(None), params=dict(q=query))
packages = response.json()['packages']
for pkg in packages:
print("%(owner)s/%(name)s" % pkg)
if len(packages) == 0:
print("(No results)") |
def messages(self):
"""
Access the messages
:returns: twilio.rest.messaging.v1.session.message.MessageList
:rtype: twilio.rest.messaging.v1.session.message.MessageList
"""
if self._messages is None:
self._messages = MessageList(self._version, session_sid=self._solution['sid'], )
return self._messages | Access the messages
:returns: twilio.rest.messaging.v1.session.message.MessageList
:rtype: twilio.rest.messaging.v1.session.message.MessageList | Below is the the instruction that describes the task:
### Input:
Access the messages
:returns: twilio.rest.messaging.v1.session.message.MessageList
:rtype: twilio.rest.messaging.v1.session.message.MessageList
### Response:
def messages(self):
"""
Access the messages
:returns: twilio.rest.messaging.v1.session.message.MessageList
:rtype: twilio.rest.messaging.v1.session.message.MessageList
"""
if self._messages is None:
self._messages = MessageList(self._version, session_sid=self._solution['sid'], )
return self._messages |
def dumps(number):
"""Dumps an integer into a base36 string.
:param number: the 10-based integer.
:returns: the base36 string.
"""
if not isinstance(number, integer_types):
raise TypeError('number must be an integer')
if number < 0:
return '-' + dumps(-number)
value = ''
while number != 0:
number, index = divmod(number, len(alphabet))
value = alphabet[index] + value
return value or '0' | Dumps an integer into a base36 string.
:param number: the 10-based integer.
:returns: the base36 string. | Below is the the instruction that describes the task:
### Input:
Dumps an integer into a base36 string.
:param number: the 10-based integer.
:returns: the base36 string.
### Response:
def dumps(number):
"""Dumps an integer into a base36 string.
:param number: the 10-based integer.
:returns: the base36 string.
"""
if not isinstance(number, integer_types):
raise TypeError('number must be an integer')
if number < 0:
return '-' + dumps(-number)
value = ''
while number != 0:
number, index = divmod(number, len(alphabet))
value = alphabet[index] + value
return value or '0' |
def reset_stats(self):
"""
Returns:
mean, max: two stats of the runners, to be added to backend
"""
scores = list(itertools.chain.from_iterable([v.total_scores for v in self._runners]))
for v in self._runners:
v.total_scores.clear()
try:
return np.mean(scores), np.max(scores)
except Exception:
logger.exception("Cannot compute total scores in EnvRunner.")
return None, None | Returns:
mean, max: two stats of the runners, to be added to backend | Below is the the instruction that describes the task:
### Input:
Returns:
mean, max: two stats of the runners, to be added to backend
### Response:
def reset_stats(self):
"""
Returns:
mean, max: two stats of the runners, to be added to backend
"""
scores = list(itertools.chain.from_iterable([v.total_scores for v in self._runners]))
for v in self._runners:
v.total_scores.clear()
try:
return np.mean(scores), np.max(scores)
except Exception:
logger.exception("Cannot compute total scores in EnvRunner.")
return None, None |
async def dump_variant(self, elem, elem_type=None, params=None, obj=None):
"""
Dumps variant type to the writer.
Supports both wrapped and raw variant.
:param elem:
:param elem_type:
:param params:
:param obj:
:return:
"""
fvalue = None
if isinstance(elem, x.VariantType) or elem_type.WRAPS_VALUE:
try:
self.tracker.push_variant(elem.variant_elem_type)
fvalue = {
elem.variant_elem: await self._dump_field(getattr(elem, elem.variant_elem), elem.variant_elem_type, obj=obj)
}
self.tracker.pop()
except Exception as e:
raise helpers.ArchiveException(e, tracker=self.tracker) from e
else:
try:
fdef = elem_type.find_fdef(elem_type.f_specs(), elem)
self.tracker.push_variant(fdef[1])
fvalue = {
fdef[0]: await self._dump_field(elem, fdef[1], obj=obj)
}
self.tracker.pop()
except Exception as e:
raise helpers.ArchiveException(e, tracker=self.tracker) from e
return fvalue | Dumps variant type to the writer.
Supports both wrapped and raw variant.
:param elem:
:param elem_type:
:param params:
:param obj:
:return: | Below is the the instruction that describes the task:
### Input:
Dumps variant type to the writer.
Supports both wrapped and raw variant.
:param elem:
:param elem_type:
:param params:
:param obj:
:return:
### Response:
async def dump_variant(self, elem, elem_type=None, params=None, obj=None):
"""
Dumps variant type to the writer.
Supports both wrapped and raw variant.
:param elem:
:param elem_type:
:param params:
:param obj:
:return:
"""
fvalue = None
if isinstance(elem, x.VariantType) or elem_type.WRAPS_VALUE:
try:
self.tracker.push_variant(elem.variant_elem_type)
fvalue = {
elem.variant_elem: await self._dump_field(getattr(elem, elem.variant_elem), elem.variant_elem_type, obj=obj)
}
self.tracker.pop()
except Exception as e:
raise helpers.ArchiveException(e, tracker=self.tracker) from e
else:
try:
fdef = elem_type.find_fdef(elem_type.f_specs(), elem)
self.tracker.push_variant(fdef[1])
fvalue = {
fdef[0]: await self._dump_field(elem, fdef[1], obj=obj)
}
self.tracker.pop()
except Exception as e:
raise helpers.ArchiveException(e, tracker=self.tracker) from e
return fvalue |
def ParseFileObject(self, parser_mediator, file_object):
"""Parses a PLSRecall.dat file-like object.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
file_object (dfvfs.FileIO): a file-like object.
Raises:
UnableToParseFile: when the file cannot be parsed.
"""
file_offset = 0
file_size = file_object.get_size()
record_map = self._GetDataTypeMap('pls_recall_record')
while file_offset < file_size:
try:
pls_record, record_data_size = self._ReadStructureFromFileObject(
file_object, file_offset, record_map)
except (ValueError, errors.ParseError) as exception:
if file_offset == 0:
raise errors.UnableToParseFile('Unable to parse first record.')
parser_mediator.ProduceExtractionWarning((
'unable to parse record at offset: 0x{0:08x} with error: '
'{1!s}').format(file_offset, exception))
break
if file_offset == 0 and not self._VerifyRecord(pls_record):
raise errors.UnableToParseFile('Verification of first record failed.')
event_data = PlsRecallEventData()
event_data.database_name = pls_record.database_name.rstrip('\x00')
event_data.sequence_number = pls_record.sequence_number
event_data.offset = file_offset
event_data.query = pls_record.query.rstrip('\x00')
event_data.username = pls_record.username.rstrip('\x00')
date_time = dfdatetime_delphi_date_time.DelphiDateTime(
timestamp=pls_record.last_written_time)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_WRITTEN)
parser_mediator.ProduceEventWithEventData(event, event_data)
file_offset += record_data_size | Parses a PLSRecall.dat file-like object.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
file_object (dfvfs.FileIO): a file-like object.
Raises:
UnableToParseFile: when the file cannot be parsed. | Below is the the instruction that describes the task:
### Input:
Parses a PLSRecall.dat file-like object.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
file_object (dfvfs.FileIO): a file-like object.
Raises:
UnableToParseFile: when the file cannot be parsed.
### Response:
def ParseFileObject(self, parser_mediator, file_object):
"""Parses a PLSRecall.dat file-like object.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
file_object (dfvfs.FileIO): a file-like object.
Raises:
UnableToParseFile: when the file cannot be parsed.
"""
file_offset = 0
file_size = file_object.get_size()
record_map = self._GetDataTypeMap('pls_recall_record')
while file_offset < file_size:
try:
pls_record, record_data_size = self._ReadStructureFromFileObject(
file_object, file_offset, record_map)
except (ValueError, errors.ParseError) as exception:
if file_offset == 0:
raise errors.UnableToParseFile('Unable to parse first record.')
parser_mediator.ProduceExtractionWarning((
'unable to parse record at offset: 0x{0:08x} with error: '
'{1!s}').format(file_offset, exception))
break
if file_offset == 0 and not self._VerifyRecord(pls_record):
raise errors.UnableToParseFile('Verification of first record failed.')
event_data = PlsRecallEventData()
event_data.database_name = pls_record.database_name.rstrip('\x00')
event_data.sequence_number = pls_record.sequence_number
event_data.offset = file_offset
event_data.query = pls_record.query.rstrip('\x00')
event_data.username = pls_record.username.rstrip('\x00')
date_time = dfdatetime_delphi_date_time.DelphiDateTime(
timestamp=pls_record.last_written_time)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_WRITTEN)
parser_mediator.ProduceEventWithEventData(event, event_data)
file_offset += record_data_size |
def filter_factory(global_conf, **local_conf):
"""Returns a WSGI filter app for use with paste.deploy."""
conf = global_conf.copy()
conf.update(local_conf)
def visible(app):
return VisibleFilter(app, conf)
return visible | Returns a WSGI filter app for use with paste.deploy. | Below is the the instruction that describes the task:
### Input:
Returns a WSGI filter app for use with paste.deploy.
### Response:
def filter_factory(global_conf, **local_conf):
"""Returns a WSGI filter app for use with paste.deploy."""
conf = global_conf.copy()
conf.update(local_conf)
def visible(app):
return VisibleFilter(app, conf)
return visible |
async def echo_all(app, message):
"""Send and recieve a message from all running echo servers"""
# Loop through all registered server addresses
for address in app.kv.get_prefix('address.').values():
# Parse the host and port from the stored address
host, port = address.decode().split(':')
port = int(port)
# Send the message to the echo server
await tcp_echo_client(message, loop, host, port) | Send and recieve a message from all running echo servers | Below is the the instruction that describes the task:
### Input:
Send and recieve a message from all running echo servers
### Response:
async def echo_all(app, message):
"""Send and recieve a message from all running echo servers"""
# Loop through all registered server addresses
for address in app.kv.get_prefix('address.').values():
# Parse the host and port from the stored address
host, port = address.decode().split(':')
port = int(port)
# Send the message to the echo server
await tcp_echo_client(message, loop, host, port) |
def label(self):
"""Provide access to the notification label.
Returns:
str: The notification label
"""
with self.selenium.context(self.selenium.CONTEXT_CHROME):
return self.root.get_attribute("label") | Provide access to the notification label.
Returns:
str: The notification label | Below is the the instruction that describes the task:
### Input:
Provide access to the notification label.
Returns:
str: The notification label
### Response:
def label(self):
"""Provide access to the notification label.
Returns:
str: The notification label
"""
with self.selenium.context(self.selenium.CONTEXT_CHROME):
return self.root.get_attribute("label") |
def get_setup_version(reponame):
"""Use autover to get up to date version."""
# importing self into setup.py is unorthodox, but param has no
# required dependencies outside of python
from param.version import Version
return Version.setup_version(os.path.dirname(__file__),reponame,archive_commit="$Format:%h$") | Use autover to get up to date version. | Below is the the instruction that describes the task:
### Input:
Use autover to get up to date version.
### Response:
def get_setup_version(reponame):
"""Use autover to get up to date version."""
# importing self into setup.py is unorthodox, but param has no
# required dependencies outside of python
from param.version import Version
return Version.setup_version(os.path.dirname(__file__),reponame,archive_commit="$Format:%h$") |
def gssa(model, maxit=100, tol=1e-8, initial_dr=None, verbose=False,
n_sim=10000, deg=3, damp=0.1, seed=42):
"""
Sketch of algorithm:
0. Choose levels for the initial states and the simulation length (n_sim)
1. Obtain an initial decision rule -- here using first order perturbation
2. Draw a sequence of innovations epsilon
3. Iterate on the following steps:
- Use the epsilons, initial states, and proposed decision rule to
simulate model forward. Will leave us with time series of states and
controls
- Evaluate expectations using quadrature
- Use direct response to get alternative proposal for controls
- Regress updated controls on the simulated states to get proposal
coefficients. New coefficients are convex combination of previous
coefficients and proposal coefficients. Weights controlled by damp,
where damp is the weight on the old coefficients. This should be
fairly low to increase chances of convergence.
- Check difference between the simulated series of controls and the
direct response version of controls
"""
# verify input arguments
if deg < 0 or deg > 5:
raise ValueError("deg must be in [1, 5]")
if damp < 0 or damp > 1:
raise ValueError("damp must be in [0, 1]")
t1 = time.time()
# extract model functions and parameters
g = model.__original_functions__['transition']
g_gu = model.__original_gufunctions__['transition']
h_gu = model.__original_gufunctions__['expectation']
d_gu = model.__original_gufunctions__['direct_response']
p = model.calibration['parameters']
n_s = len(model.symbols["states"])
n_x = len(model.symbols["controls"])
n_z = len(model.symbols["expectations"])
n_eps = len(model.symbols["shocks"])
s0 = model.calibration["states"]
x0 = model.calibration["controls"]
# construct initial decision rule if not supplied
if initial_dr is None:
drp = approximate_controls(model)
else:
drp = initial_dr
# set up quadrature weights and nodes
distrib = model.get_distribution()
nodes, weights = distrib.discretize()
# draw sequence of innovations
np.random.seed(seed)
distrib = model.get_distribution()
sigma = distrib.sigma
epsilon = np.random.multivariate_normal(np.zeros(n_eps), sigma, n_sim)
# simulate initial decision rule and do initial regression for coefs
init_sim = simulate(model, drp, horizon=n_sim, return_array=True,
forcing_shocks=epsilon)
s_sim = init_sim[:, 0, 0:n_s]
x_sim = init_sim[:, 0, n_s:n_s + n_x]
Phi_sim = complete_polynomial(s_sim.T, deg).T
coefs = np.ascontiguousarray(lstsq(Phi_sim, x_sim)[0])
# NOTE: the ascontiguousarray above was needed for numba to compile the
# `np.dot` in the simulation function in no python mode. Appearantly
# the array returned from lstsq is not C-contiguous
# allocate for simulated series of expectations and next period states
z_sim = np.empty((n_sim, n_z))
S = np.empty_like(s_sim)
X = np.empty_like(x_sim)
H = np.empty_like(z_sim)
new_x = np.empty_like(x_sim)
# set initial states and controls
s_sim[0, :] = s0
x_sim[0, :] = x0
Phi_t = np.empty(n_complete(n_s, deg)) # buffer array for simulation
# create jitted function that will simulate states and controls, using
# the epsilon shocks from above (define here as closure over all data
# above).
@jit(nopython=True)
def simulate_states_controls(s, x, Phi_t, coefs):
for t in range(1, n_sim):
g(s[t - 1, :], x[t - 1, :], epsilon[t, :], p, s[t, :])
# fill Phi_t with new complete poly version of s[t, :]
_complete_poly_impl_vec(s[t, :], deg, Phi_t)
# do inner product to get new controls
x[t, :] = Phi_t @coefs
it = 0
err = 10.0
err_0 = 10
if verbose:
headline = '|{0:^4} | {1:10} | {2:8} | {3:8} |'
headline = headline.format('N', ' Error', 'Gain', 'Time')
stars = '-' * len(headline)
print(stars)
print(headline)
print(stars)
# format string for within loop
fmt_str = '|{0:4} | {1:10.3e} | {2:8.3f} | {3:8.3f} |'
while err > tol and it <= maxit:
t_start = time.time()
# simulate with new coefficients
simulate_states_controls(s_sim, x_sim, Phi_t, coefs)
# update expectations of z
# update_expectations(s_sim, x_sim, z_sim, Phi_sim)
z_sim[:, :] = 0.0
for i in range(weights.shape[0]):
e = nodes[i, :] # extract nodes
# evaluate future states at each node (stores in S)
g_gu(s_sim, x_sim, e, p, S)
# evaluate future controls at each future state
_complete_poly_impl(S.T, deg, Phi_sim.T)
np.dot(Phi_sim, coefs, out=X)
# compute expectation (stores in H)
h_gu(S, X, p, H)
z_sim += weights[i] * H
# get controls on the simulated points from direct_resposne
# (stores in new_x)
d_gu(s_sim, z_sim, p, new_x)
# update basis matrix and do regression of new_x on s_sim to get
# updated coefficients
_complete_poly_impl(s_sim.T, deg, Phi_sim.T)
new_coefs = np.ascontiguousarray(lstsq(Phi_sim, new_x)[0])
# check whether they differ from the preceding guess
err = (abs(new_x - x_sim).max())
# update the series of controls and coefficients
x_sim[:, :] = new_x
coefs = (1 - damp) * new_coefs + damp * coefs
if verbose:
# update error and print if `verbose`
err_SA = err / err_0
err_0 = err
t_finish = time.time()
elapsed = t_finish - t_start
if verbose:
print(fmt_str.format(it, err, err_SA, elapsed))
it += 1
if it == maxit:
warnings.warn(UserWarning("Maximum number of iterations reached"))
# compute final fime and do final printout if `verbose`
t2 = time.time()
if verbose:
print(stars)
print('Elapsed: {} seconds.'.format(t2 - t1))
print(stars)
cp = CompletePolynomial(deg, len(s0))
cp.fit_values(s_sim, x_sim)
return cp | Sketch of algorithm:
0. Choose levels for the initial states and the simulation length (n_sim)
1. Obtain an initial decision rule -- here using first order perturbation
2. Draw a sequence of innovations epsilon
3. Iterate on the following steps:
- Use the epsilons, initial states, and proposed decision rule to
simulate model forward. Will leave us with time series of states and
controls
- Evaluate expectations using quadrature
- Use direct response to get alternative proposal for controls
- Regress updated controls on the simulated states to get proposal
coefficients. New coefficients are convex combination of previous
coefficients and proposal coefficients. Weights controlled by damp,
where damp is the weight on the old coefficients. This should be
fairly low to increase chances of convergence.
- Check difference between the simulated series of controls and the
direct response version of controls | Below is the the instruction that describes the task:
### Input:
Sketch of algorithm:
0. Choose levels for the initial states and the simulation length (n_sim)
1. Obtain an initial decision rule -- here using first order perturbation
2. Draw a sequence of innovations epsilon
3. Iterate on the following steps:
- Use the epsilons, initial states, and proposed decision rule to
simulate model forward. Will leave us with time series of states and
controls
- Evaluate expectations using quadrature
- Use direct response to get alternative proposal for controls
- Regress updated controls on the simulated states to get proposal
coefficients. New coefficients are convex combination of previous
coefficients and proposal coefficients. Weights controlled by damp,
where damp is the weight on the old coefficients. This should be
fairly low to increase chances of convergence.
- Check difference between the simulated series of controls and the
direct response version of controls
### Response:
def gssa(model, maxit=100, tol=1e-8, initial_dr=None, verbose=False,
n_sim=10000, deg=3, damp=0.1, seed=42):
"""
Sketch of algorithm:
0. Choose levels for the initial states and the simulation length (n_sim)
1. Obtain an initial decision rule -- here using first order perturbation
2. Draw a sequence of innovations epsilon
3. Iterate on the following steps:
- Use the epsilons, initial states, and proposed decision rule to
simulate model forward. Will leave us with time series of states and
controls
- Evaluate expectations using quadrature
- Use direct response to get alternative proposal for controls
- Regress updated controls on the simulated states to get proposal
coefficients. New coefficients are convex combination of previous
coefficients and proposal coefficients. Weights controlled by damp,
where damp is the weight on the old coefficients. This should be
fairly low to increase chances of convergence.
- Check difference between the simulated series of controls and the
direct response version of controls
"""
# verify input arguments
if deg < 0 or deg > 5:
raise ValueError("deg must be in [1, 5]")
if damp < 0 or damp > 1:
raise ValueError("damp must be in [0, 1]")
t1 = time.time()
# extract model functions and parameters
g = model.__original_functions__['transition']
g_gu = model.__original_gufunctions__['transition']
h_gu = model.__original_gufunctions__['expectation']
d_gu = model.__original_gufunctions__['direct_response']
p = model.calibration['parameters']
n_s = len(model.symbols["states"])
n_x = len(model.symbols["controls"])
n_z = len(model.symbols["expectations"])
n_eps = len(model.symbols["shocks"])
s0 = model.calibration["states"]
x0 = model.calibration["controls"]
# construct initial decision rule if not supplied
if initial_dr is None:
drp = approximate_controls(model)
else:
drp = initial_dr
# set up quadrature weights and nodes
distrib = model.get_distribution()
nodes, weights = distrib.discretize()
# draw sequence of innovations
np.random.seed(seed)
distrib = model.get_distribution()
sigma = distrib.sigma
epsilon = np.random.multivariate_normal(np.zeros(n_eps), sigma, n_sim)
# simulate initial decision rule and do initial regression for coefs
init_sim = simulate(model, drp, horizon=n_sim, return_array=True,
forcing_shocks=epsilon)
s_sim = init_sim[:, 0, 0:n_s]
x_sim = init_sim[:, 0, n_s:n_s + n_x]
Phi_sim = complete_polynomial(s_sim.T, deg).T
coefs = np.ascontiguousarray(lstsq(Phi_sim, x_sim)[0])
# NOTE: the ascontiguousarray above was needed for numba to compile the
# `np.dot` in the simulation function in no python mode. Appearantly
# the array returned from lstsq is not C-contiguous
# allocate for simulated series of expectations and next period states
z_sim = np.empty((n_sim, n_z))
S = np.empty_like(s_sim)
X = np.empty_like(x_sim)
H = np.empty_like(z_sim)
new_x = np.empty_like(x_sim)
# set initial states and controls
s_sim[0, :] = s0
x_sim[0, :] = x0
Phi_t = np.empty(n_complete(n_s, deg)) # buffer array for simulation
# create jitted function that will simulate states and controls, using
# the epsilon shocks from above (define here as closure over all data
# above).
@jit(nopython=True)
def simulate_states_controls(s, x, Phi_t, coefs):
for t in range(1, n_sim):
g(s[t - 1, :], x[t - 1, :], epsilon[t, :], p, s[t, :])
# fill Phi_t with new complete poly version of s[t, :]
_complete_poly_impl_vec(s[t, :], deg, Phi_t)
# do inner product to get new controls
x[t, :] = Phi_t @coefs
it = 0
err = 10.0
err_0 = 10
if verbose:
headline = '|{0:^4} | {1:10} | {2:8} | {3:8} |'
headline = headline.format('N', ' Error', 'Gain', 'Time')
stars = '-' * len(headline)
print(stars)
print(headline)
print(stars)
# format string for within loop
fmt_str = '|{0:4} | {1:10.3e} | {2:8.3f} | {3:8.3f} |'
while err > tol and it <= maxit:
t_start = time.time()
# simulate with new coefficients
simulate_states_controls(s_sim, x_sim, Phi_t, coefs)
# update expectations of z
# update_expectations(s_sim, x_sim, z_sim, Phi_sim)
z_sim[:, :] = 0.0
for i in range(weights.shape[0]):
e = nodes[i, :] # extract nodes
# evaluate future states at each node (stores in S)
g_gu(s_sim, x_sim, e, p, S)
# evaluate future controls at each future state
_complete_poly_impl(S.T, deg, Phi_sim.T)
np.dot(Phi_sim, coefs, out=X)
# compute expectation (stores in H)
h_gu(S, X, p, H)
z_sim += weights[i] * H
# get controls on the simulated points from direct_resposne
# (stores in new_x)
d_gu(s_sim, z_sim, p, new_x)
# update basis matrix and do regression of new_x on s_sim to get
# updated coefficients
_complete_poly_impl(s_sim.T, deg, Phi_sim.T)
new_coefs = np.ascontiguousarray(lstsq(Phi_sim, new_x)[0])
# check whether they differ from the preceding guess
err = (abs(new_x - x_sim).max())
# update the series of controls and coefficients
x_sim[:, :] = new_x
coefs = (1 - damp) * new_coefs + damp * coefs
if verbose:
# update error and print if `verbose`
err_SA = err / err_0
err_0 = err
t_finish = time.time()
elapsed = t_finish - t_start
if verbose:
print(fmt_str.format(it, err, err_SA, elapsed))
it += 1
if it == maxit:
warnings.warn(UserWarning("Maximum number of iterations reached"))
# compute final fime and do final printout if `verbose`
t2 = time.time()
if verbose:
print(stars)
print('Elapsed: {} seconds.'.format(t2 - t1))
print(stars)
cp = CompletePolynomial(deg, len(s0))
cp.fit_values(s_sim, x_sim)
return cp |
def on_scenario_directory_radio_toggled(self, flag):
"""Autoconnect slot activated when scenario_directory_radio is checked.
:param flag: Flag indicating whether the checkbox was toggled on or
off.
:type flag: bool
"""
if flag:
self.output_directory.setText(self.source_directory.text())
self.output_directory_chooser.setEnabled(not flag) | Autoconnect slot activated when scenario_directory_radio is checked.
:param flag: Flag indicating whether the checkbox was toggled on or
off.
:type flag: bool | Below is the the instruction that describes the task:
### Input:
Autoconnect slot activated when scenario_directory_radio is checked.
:param flag: Flag indicating whether the checkbox was toggled on or
off.
:type flag: bool
### Response:
def on_scenario_directory_radio_toggled(self, flag):
"""Autoconnect slot activated when scenario_directory_radio is checked.
:param flag: Flag indicating whether the checkbox was toggled on or
off.
:type flag: bool
"""
if flag:
self.output_directory.setText(self.source_directory.text())
self.output_directory_chooser.setEnabled(not flag) |
def __build_parser_for_fileobject_and_desiredtype(self, obj_on_filesystem: PersistedObject, object_typ: Type[T],
logger: Logger = None) -> Parser:
"""
Builds from the registry, a parser to parse object obj_on_filesystem as an object of type object_type.
To do that, it iterates through all registered parsers in the list in reverse order (last inserted first),
and checks if they support the provided object format (single or multifile) and type.
If several parsers match, it returns a cascadingparser that will try them in order.
:param obj_on_filesystem:
:param object_typ:
:param logger:
:return:
"""
# first remove any non-generic customization
object_type = get_base_generic_type(object_typ)
# find all matching parsers for this
matching, no_type_match_but_ext_match, no_ext_match_but_type_match, no_match = \
self.find_all_matching_parsers(strict=self.is_strict, desired_type=object_type,
required_ext=obj_on_filesystem.ext)
matching_parsers = matching[0] + matching[1] + matching[2]
if len(matching_parsers) == 0:
# No match. Do we have a close match ? (correct type, but not correct extension ?)
if len(no_ext_match_but_type_match) > 0:
raise NoParserFoundForObjectExt.create(obj_on_filesystem, object_type,
set([ext_ for ext_set in
[p.supported_exts for p in no_ext_match_but_type_match]
for ext_ in ext_set]))
else:
# no, no match at all
raise NoParserFoundForObjectType.create(obj_on_filesystem, object_type,
set([typ_ for typ_set in
[p.supported_types for p in no_type_match_but_ext_match]
for typ_ in typ_set]))
elif len(matching_parsers) == 1:
# return the match directly
return matching_parsers[0]
else:
# return a cascade of all parsers, in reverse order (since last is our preferred one)
# print('----- WARNING : Found several parsers able to parse this item. Combining them into a cascade.')
return CascadingParser(list(reversed(matching_parsers))) | Builds from the registry, a parser to parse object obj_on_filesystem as an object of type object_type.
To do that, it iterates through all registered parsers in the list in reverse order (last inserted first),
and checks if they support the provided object format (single or multifile) and type.
If several parsers match, it returns a cascadingparser that will try them in order.
:param obj_on_filesystem:
:param object_typ:
:param logger:
:return: | Below is the the instruction that describes the task:
### Input:
Builds from the registry, a parser to parse object obj_on_filesystem as an object of type object_type.
To do that, it iterates through all registered parsers in the list in reverse order (last inserted first),
and checks if they support the provided object format (single or multifile) and type.
If several parsers match, it returns a cascadingparser that will try them in order.
:param obj_on_filesystem:
:param object_typ:
:param logger:
:return:
### Response:
def __build_parser_for_fileobject_and_desiredtype(self, obj_on_filesystem: PersistedObject, object_typ: Type[T],
logger: Logger = None) -> Parser:
"""
Builds from the registry, a parser to parse object obj_on_filesystem as an object of type object_type.
To do that, it iterates through all registered parsers in the list in reverse order (last inserted first),
and checks if they support the provided object format (single or multifile) and type.
If several parsers match, it returns a cascadingparser that will try them in order.
:param obj_on_filesystem:
:param object_typ:
:param logger:
:return:
"""
# first remove any non-generic customization
object_type = get_base_generic_type(object_typ)
# find all matching parsers for this
matching, no_type_match_but_ext_match, no_ext_match_but_type_match, no_match = \
self.find_all_matching_parsers(strict=self.is_strict, desired_type=object_type,
required_ext=obj_on_filesystem.ext)
matching_parsers = matching[0] + matching[1] + matching[2]
if len(matching_parsers) == 0:
# No match. Do we have a close match ? (correct type, but not correct extension ?)
if len(no_ext_match_but_type_match) > 0:
raise NoParserFoundForObjectExt.create(obj_on_filesystem, object_type,
set([ext_ for ext_set in
[p.supported_exts for p in no_ext_match_but_type_match]
for ext_ in ext_set]))
else:
# no, no match at all
raise NoParserFoundForObjectType.create(obj_on_filesystem, object_type,
set([typ_ for typ_set in
[p.supported_types for p in no_type_match_but_ext_match]
for typ_ in typ_set]))
elif len(matching_parsers) == 1:
# return the match directly
return matching_parsers[0]
else:
# return a cascade of all parsers, in reverse order (since last is our preferred one)
# print('----- WARNING : Found several parsers able to parse this item. Combining them into a cascade.')
return CascadingParser(list(reversed(matching_parsers))) |
def _t_of_e(self, a0=None, t_start=None, f0=None, ef=None, t_obs=5.0):
"""Rearranged versions of Peters equations
This function calculates the semi-major axis and eccentricity over time.
"""
if ef is None:
ef = np.ones_like(self.e0)*0.0000001
beta = 64.0/5.0*self.m1*self.m2*(self.m1+self.m2)
e_vals = np.asarray([np.linspace(ef[i], self.e0[i], self.num_points)
for i in range(len(self.e0))])
integrand = self._find_integrand(e_vals)
integral = np.asarray([np.trapz(integrand[:, i:], x=e_vals[:, i:])
for i in range(e_vals.shape[1])]).T
if a0 is None and f0 is None:
a0 = (19./12.*t_start*beta*1/integral[:, 0])**(1./4.) * self._f_e(e_vals[:, -1])
elif a0 is None:
a0 = ((self.m1 + self.m2)/self.f0**2)**(1./3.)
c0 = self._c0_func(a0, self.e0)
a_vals = c0[:, np.newaxis]*self._f_e(e_vals)
delta_t = 12./19*c0[:, np.newaxis]**4/beta[:, np.newaxis]*integral
return e_vals, a_vals, delta_t | Rearranged versions of Peters equations
This function calculates the semi-major axis and eccentricity over time. | Below is the the instruction that describes the task:
### Input:
Rearranged versions of Peters equations
This function calculates the semi-major axis and eccentricity over time.
### Response:
def _t_of_e(self, a0=None, t_start=None, f0=None, ef=None, t_obs=5.0):
"""Rearranged versions of Peters equations
This function calculates the semi-major axis and eccentricity over time.
"""
if ef is None:
ef = np.ones_like(self.e0)*0.0000001
beta = 64.0/5.0*self.m1*self.m2*(self.m1+self.m2)
e_vals = np.asarray([np.linspace(ef[i], self.e0[i], self.num_points)
for i in range(len(self.e0))])
integrand = self._find_integrand(e_vals)
integral = np.asarray([np.trapz(integrand[:, i:], x=e_vals[:, i:])
for i in range(e_vals.shape[1])]).T
if a0 is None and f0 is None:
a0 = (19./12.*t_start*beta*1/integral[:, 0])**(1./4.) * self._f_e(e_vals[:, -1])
elif a0 is None:
a0 = ((self.m1 + self.m2)/self.f0**2)**(1./3.)
c0 = self._c0_func(a0, self.e0)
a_vals = c0[:, np.newaxis]*self._f_e(e_vals)
delta_t = 12./19*c0[:, np.newaxis]**4/beta[:, np.newaxis]*integral
return e_vals, a_vals, delta_t |
def set_outflow_BC(self, pores, mode='merge'):
r"""
Adds outflow boundary condition to the selected pores.
Outflow condition simply means that the gradient of the solved
quantity does not change, i.e. is 0.
"""
# Hijack the parse_mode function to verify mode/pores argument
mode = self._parse_mode(mode, allowed=['merge', 'overwrite', 'remove'],
single=True)
pores = self._parse_indices(pores)
# Calculating A[i,i] values to ensure the outflow condition
network = self.project.network
phase = self.project.phases()[self.settings['phase']]
throats = network.find_neighbor_throats(pores=pores)
C12 = network['throat.conns'][throats]
P12 = phase[self.settings['pressure']][C12]
gh = phase[self.settings['hydraulic_conductance']][throats]
Q12 = -gh * np.diff(P12, axis=1).squeeze()
Qp = np.zeros(self.Np)
np.add.at(Qp, C12[:, 0], -Q12)
np.add.at(Qp, C12[:, 1], Q12)
# Store boundary values
if ('pore.bc_outflow' not in self.keys()) or (mode == 'overwrite'):
self['pore.bc_outflow'] = np.nan
self['pore.bc_outflow'][pores] = Qp[pores] | r"""
Adds outflow boundary condition to the selected pores.
Outflow condition simply means that the gradient of the solved
quantity does not change, i.e. is 0. | Below is the the instruction that describes the task:
### Input:
r"""
Adds outflow boundary condition to the selected pores.
Outflow condition simply means that the gradient of the solved
quantity does not change, i.e. is 0.
### Response:
def set_outflow_BC(self, pores, mode='merge'):
r"""
Adds outflow boundary condition to the selected pores.
Outflow condition simply means that the gradient of the solved
quantity does not change, i.e. is 0.
"""
# Hijack the parse_mode function to verify mode/pores argument
mode = self._parse_mode(mode, allowed=['merge', 'overwrite', 'remove'],
single=True)
pores = self._parse_indices(pores)
# Calculating A[i,i] values to ensure the outflow condition
network = self.project.network
phase = self.project.phases()[self.settings['phase']]
throats = network.find_neighbor_throats(pores=pores)
C12 = network['throat.conns'][throats]
P12 = phase[self.settings['pressure']][C12]
gh = phase[self.settings['hydraulic_conductance']][throats]
Q12 = -gh * np.diff(P12, axis=1).squeeze()
Qp = np.zeros(self.Np)
np.add.at(Qp, C12[:, 0], -Q12)
np.add.at(Qp, C12[:, 1], Q12)
# Store boundary values
if ('pore.bc_outflow' not in self.keys()) or (mode == 'overwrite'):
self['pore.bc_outflow'] = np.nan
self['pore.bc_outflow'][pores] = Qp[pores] |
def dims(x):
"""Returns a list of dimension sizes, or `None` if `rank` is unknown.
For more details, see `help(tf.TensorShape.dims)`.
Args:
x: object representing a shape; convertible to `tf.TensorShape`.
Returns:
shape_as_list: list of sizes or `None` values representing each
dimensions size if known. A size is `tf.Dimension` if input is a
`tf.TensorShape` and an `int` otherwise.
"""
if isinstance(x, tf.TensorShape):
return x.dims
r = tf.TensorShape(x).dims
return None if r is None else list(map(tf.compat.dimension_value, r)) | Returns a list of dimension sizes, or `None` if `rank` is unknown.
For more details, see `help(tf.TensorShape.dims)`.
Args:
x: object representing a shape; convertible to `tf.TensorShape`.
Returns:
shape_as_list: list of sizes or `None` values representing each
dimensions size if known. A size is `tf.Dimension` if input is a
`tf.TensorShape` and an `int` otherwise. | Below is the the instruction that describes the task:
### Input:
Returns a list of dimension sizes, or `None` if `rank` is unknown.
For more details, see `help(tf.TensorShape.dims)`.
Args:
x: object representing a shape; convertible to `tf.TensorShape`.
Returns:
shape_as_list: list of sizes or `None` values representing each
dimensions size if known. A size is `tf.Dimension` if input is a
`tf.TensorShape` and an `int` otherwise.
### Response:
def dims(x):
"""Returns a list of dimension sizes, or `None` if `rank` is unknown.
For more details, see `help(tf.TensorShape.dims)`.
Args:
x: object representing a shape; convertible to `tf.TensorShape`.
Returns:
shape_as_list: list of sizes or `None` values representing each
dimensions size if known. A size is `tf.Dimension` if input is a
`tf.TensorShape` and an `int` otherwise.
"""
if isinstance(x, tf.TensorShape):
return x.dims
r = tf.TensorShape(x).dims
return None if r is None else list(map(tf.compat.dimension_value, r)) |
def msgmerge(self, locale_file, po_string):
"""
Runs msgmerge on a locale_file and po_string
"""
cmd = "msgmerge -q %s -" % locale_file
p = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(msg, err) = p.communicate(input=po_string)
if err:
# dont raise exception, some stuff in stderr are just warmings
logging.warning("%s \nfile: %s\npostring: %s" % (err, locale_file, po_string))
return msg | Runs msgmerge on a locale_file and po_string | Below is the the instruction that describes the task:
### Input:
Runs msgmerge on a locale_file and po_string
### Response:
def msgmerge(self, locale_file, po_string):
"""
Runs msgmerge on a locale_file and po_string
"""
cmd = "msgmerge -q %s -" % locale_file
p = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(msg, err) = p.communicate(input=po_string)
if err:
# dont raise exception, some stuff in stderr are just warmings
logging.warning("%s \nfile: %s\npostring: %s" % (err, locale_file, po_string))
return msg |
def build_board_checkers():
""" builds a checkers starting board
Printing Grid
0 B 0 B 0 B 0 B
B 0 B 0 B 0 B 0
0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0
0 W 0 W 0 W 0 W
W 0 W 0 W 0 W 0
"""
grd = Grid(8,8, ["B","W"])
for c in range(4):
grd.set_tile(0,(c*2) - 1, "B")
grd.set_tile(1,(c*2) - 0, "B")
grd.set_tile(6,(c*2) + 1, "W")
grd.set_tile(7,(c*2) - 0, "W")
print(grd)
return grd | builds a checkers starting board
Printing Grid
0 B 0 B 0 B 0 B
B 0 B 0 B 0 B 0
0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0
0 W 0 W 0 W 0 W
W 0 W 0 W 0 W 0 | Below is the the instruction that describes the task:
### Input:
builds a checkers starting board
Printing Grid
0 B 0 B 0 B 0 B
B 0 B 0 B 0 B 0
0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0
0 W 0 W 0 W 0 W
W 0 W 0 W 0 W 0
### Response:
def build_board_checkers():
""" builds a checkers starting board
Printing Grid
0 B 0 B 0 B 0 B
B 0 B 0 B 0 B 0
0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0
0 W 0 W 0 W 0 W
W 0 W 0 W 0 W 0
"""
grd = Grid(8,8, ["B","W"])
for c in range(4):
grd.set_tile(0,(c*2) - 1, "B")
grd.set_tile(1,(c*2) - 0, "B")
grd.set_tile(6,(c*2) + 1, "W")
grd.set_tile(7,(c*2) - 0, "W")
print(grd)
return grd |
def merge_with(self, other):
"""Returns a `TensorShape` combining the information in `self` and `other`.
The dimensions in `self` and `other` are merged elementwise,
according to the rules defined for `Dimension.merge_with()`.
Args:
other: Another `TensorShape`.
Returns:
A `TensorShape` containing the combined information of `self` and
`other`.
Raises:
ValueError: If `self` and `other` are not convertible.
"""
other = as_shape(other)
if self._dims is None:
return other
else:
try:
self.assert_same_rank(other)
new_dims = []
for i, dim in enumerate(self._dims):
new_dims.append(dim.merge_with(other[i]))
return TensorShape(new_dims)
except ValueError:
raise ValueError("Shapes %s and %s are not convertible" % (self, other)) | Returns a `TensorShape` combining the information in `self` and `other`.
The dimensions in `self` and `other` are merged elementwise,
according to the rules defined for `Dimension.merge_with()`.
Args:
other: Another `TensorShape`.
Returns:
A `TensorShape` containing the combined information of `self` and
`other`.
Raises:
ValueError: If `self` and `other` are not convertible. | Below is the the instruction that describes the task:
### Input:
Returns a `TensorShape` combining the information in `self` and `other`.
The dimensions in `self` and `other` are merged elementwise,
according to the rules defined for `Dimension.merge_with()`.
Args:
other: Another `TensorShape`.
Returns:
A `TensorShape` containing the combined information of `self` and
`other`.
Raises:
ValueError: If `self` and `other` are not convertible.
### Response:
def merge_with(self, other):
"""Returns a `TensorShape` combining the information in `self` and `other`.
The dimensions in `self` and `other` are merged elementwise,
according to the rules defined for `Dimension.merge_with()`.
Args:
other: Another `TensorShape`.
Returns:
A `TensorShape` containing the combined information of `self` and
`other`.
Raises:
ValueError: If `self` and `other` are not convertible.
"""
other = as_shape(other)
if self._dims is None:
return other
else:
try:
self.assert_same_rank(other)
new_dims = []
for i, dim in enumerate(self._dims):
new_dims.append(dim.merge_with(other[i]))
return TensorShape(new_dims)
except ValueError:
raise ValueError("Shapes %s and %s are not convertible" % (self, other)) |
def _post(url, headers={}, data=None, files=None):
"""Tries to POST data to an endpoint"""
try:
response = requests.post(url, headers=headers, data=data, files=files, verify=VERIFY_SSL)
return _process_response(response)
except requests.exceptions.RequestException as e:
_log_and_raise_exception('Error connecting with foursquare API', e) | Tries to POST data to an endpoint | Below is the the instruction that describes the task:
### Input:
Tries to POST data to an endpoint
### Response:
def _post(url, headers={}, data=None, files=None):
"""Tries to POST data to an endpoint"""
try:
response = requests.post(url, headers=headers, data=data, files=files, verify=VERIFY_SSL)
return _process_response(response)
except requests.exceptions.RequestException as e:
_log_and_raise_exception('Error connecting with foursquare API', e) |
def is_entailed_by(self, other):
"""
Given two beliefstates, returns True iff the calling instance
implies the other beliefstate, meaning it contains at least the same
structure (for all structures) and all values (for all defined values).
Inverse of `entails`.
Note: this only compares the items in the DictCell, not `pos`,
`environment_variables` or `deferred_effects`.
"""
for (s_key, s_val) in self:
if s_key in other:
if not hasattr(other[s_key], 'implies'):
raise Exception("Cell for %s is missing implies()" % s_key)
if not other[s_key].implies(s_val):
return False
else:
return False
return True | Given two beliefstates, returns True iff the calling instance
implies the other beliefstate, meaning it contains at least the same
structure (for all structures) and all values (for all defined values).
Inverse of `entails`.
Note: this only compares the items in the DictCell, not `pos`,
`environment_variables` or `deferred_effects`. | Below is the the instruction that describes the task:
### Input:
Given two beliefstates, returns True iff the calling instance
implies the other beliefstate, meaning it contains at least the same
structure (for all structures) and all values (for all defined values).
Inverse of `entails`.
Note: this only compares the items in the DictCell, not `pos`,
`environment_variables` or `deferred_effects`.
### Response:
def is_entailed_by(self, other):
"""
Given two beliefstates, returns True iff the calling instance
implies the other beliefstate, meaning it contains at least the same
structure (for all structures) and all values (for all defined values).
Inverse of `entails`.
Note: this only compares the items in the DictCell, not `pos`,
`environment_variables` or `deferred_effects`.
"""
for (s_key, s_val) in self:
if s_key in other:
if not hasattr(other[s_key], 'implies'):
raise Exception("Cell for %s is missing implies()" % s_key)
if not other[s_key].implies(s_val):
return False
else:
return False
return True |
def request(self, action, data={}, headers={}, method='GET'):
"""
Append the REST headers to every request
"""
headers = {
"Authorization": "Bearer " + self.token,
"Content-Type": "application/json",
"X-Version": "1",
"Accept": "application/json"
}
return Transport.request(self, action, data, headers, method) | Append the REST headers to every request | Below is the the instruction that describes the task:
### Input:
Append the REST headers to every request
### Response:
def request(self, action, data={}, headers={}, method='GET'):
"""
Append the REST headers to every request
"""
headers = {
"Authorization": "Bearer " + self.token,
"Content-Type": "application/json",
"X-Version": "1",
"Accept": "application/json"
}
return Transport.request(self, action, data, headers, method) |
def process_inlines(parser, token):
"""
Searches through the provided content and applies inlines where ever they
are found.
Syntax::
{% process_inlines entry.body [in template_dir] [as varname] }
Examples::
{% process_inlines entry.body %}
{% process_inlines entry.body as body %}
{% process_inlines entry.body in 'inlines/sidebar' %}
{% process_inlines entry.body in 'inlines/sidebar' as body %}
"""
args = token.split_contents()
if not len(args) in (2, 4, 6):
raise template.TemplateSyntaxError("%r tag requires either 1, 3 or 5 arguments." % args[0])
var_name = args[1]
ALLOWED_ARGS = ['as', 'in']
kwargs = { 'template_directory': None }
if len(args) > 2:
tuples = zip(*[args[2:][i::2] for i in range(2)])
for k,v in tuples:
if not k in ALLOWED_ARGS:
raise template.TemplateSyntaxError("%r tag options arguments must be one of %s." % (args[0], ', '.join(ALLOWED_ARGS)))
if k == 'in':
kwargs['template_directory'] = v
if k == 'as':
kwargs['asvar'] = v
return InlinesNode(var_name, **kwargs) | Searches through the provided content and applies inlines where ever they
are found.
Syntax::
{% process_inlines entry.body [in template_dir] [as varname] }
Examples::
{% process_inlines entry.body %}
{% process_inlines entry.body as body %}
{% process_inlines entry.body in 'inlines/sidebar' %}
{% process_inlines entry.body in 'inlines/sidebar' as body %} | Below is the the instruction that describes the task:
### Input:
Searches through the provided content and applies inlines where ever they
are found.
Syntax::
{% process_inlines entry.body [in template_dir] [as varname] }
Examples::
{% process_inlines entry.body %}
{% process_inlines entry.body as body %}
{% process_inlines entry.body in 'inlines/sidebar' %}
{% process_inlines entry.body in 'inlines/sidebar' as body %}
### Response:
def process_inlines(parser, token):
"""
Searches through the provided content and applies inlines where ever they
are found.
Syntax::
{% process_inlines entry.body [in template_dir] [as varname] }
Examples::
{% process_inlines entry.body %}
{% process_inlines entry.body as body %}
{% process_inlines entry.body in 'inlines/sidebar' %}
{% process_inlines entry.body in 'inlines/sidebar' as body %}
"""
args = token.split_contents()
if not len(args) in (2, 4, 6):
raise template.TemplateSyntaxError("%r tag requires either 1, 3 or 5 arguments." % args[0])
var_name = args[1]
ALLOWED_ARGS = ['as', 'in']
kwargs = { 'template_directory': None }
if len(args) > 2:
tuples = zip(*[args[2:][i::2] for i in range(2)])
for k,v in tuples:
if not k in ALLOWED_ARGS:
raise template.TemplateSyntaxError("%r tag options arguments must be one of %s." % (args[0], ', '.join(ALLOWED_ARGS)))
if k == 'in':
kwargs['template_directory'] = v
if k == 'as':
kwargs['asvar'] = v
return InlinesNode(var_name, **kwargs) |
def amount(self):
"""
Determine the sum of mole amounts of all the compounds.
:returns: Amount. [kmol]
"""
return sum(self.get_compound_amount(c) for c in self.material.compounds) | Determine the sum of mole amounts of all the compounds.
:returns: Amount. [kmol] | Below is the the instruction that describes the task:
### Input:
Determine the sum of mole amounts of all the compounds.
:returns: Amount. [kmol]
### Response:
def amount(self):
"""
Determine the sum of mole amounts of all the compounds.
:returns: Amount. [kmol]
"""
return sum(self.get_compound_amount(c) for c in self.material.compounds) |
def overlay_gateway_sflow_sflow_vlan_action(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
overlay_gateway = ET.SubElement(config, "overlay-gateway", xmlns="urn:brocade.com:mgmt:brocade-tunnels")
name_key = ET.SubElement(overlay_gateway, "name")
name_key.text = kwargs.pop('name')
sflow = ET.SubElement(overlay_gateway, "sflow")
sflow_profile_name_key = ET.SubElement(sflow, "sflow-profile-name")
sflow_profile_name_key.text = kwargs.pop('sflow_profile_name')
sflow_vlan_action = ET.SubElement(sflow, "sflow-vlan-action")
sflow_vlan_action.text = kwargs.pop('sflow_vlan_action')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code | Below is the the instruction that describes the task:
### Input:
Auto Generated Code
### Response:
def overlay_gateway_sflow_sflow_vlan_action(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
overlay_gateway = ET.SubElement(config, "overlay-gateway", xmlns="urn:brocade.com:mgmt:brocade-tunnels")
name_key = ET.SubElement(overlay_gateway, "name")
name_key.text = kwargs.pop('name')
sflow = ET.SubElement(overlay_gateway, "sflow")
sflow_profile_name_key = ET.SubElement(sflow, "sflow-profile-name")
sflow_profile_name_key.text = kwargs.pop('sflow_profile_name')
sflow_vlan_action = ET.SubElement(sflow, "sflow-vlan-action")
sflow_vlan_action.text = kwargs.pop('sflow_vlan_action')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def main():
"""Entry point for command line usage."""
import colorama
import argparse
import logging
import sys
import os
parser = argparse.ArgumentParser(prog="gulpless",
description="Simple build system.")
parser.add_argument("-v", "--version",
action="version",
version="%(prog)s 0.7.6")
parser.add_argument("-d", "--directory",
action="store",
default=os.getcwd(),
help="Look for `build.py` in this folder (defaults to "
"the current directory)")
parser.add_argument("mode",
action="store",
choices=["build", "interactive"],
default="interactive",
metavar="mode",
nargs="?",
help="If `interactive` (the default), will wait for "
"filesystem events and attempt to keep the input "
"and output folders in sync. If `build`, it will "
"attempt to build all updated files, then exit.")
args = parser.parse_args()
os.chdir(args.directory)
sys.path.append(os.getcwd())
if os.environ.get("TERM") == "cygwin":
# colorama doesn't play well with git bash
del os.environ["TERM"]
colorama.init()
os.environ["TERM"] = "cygwin"
else:
colorama.init()
try:
old, sys.dont_write_bytecode = sys.dont_write_bytecode, True
import build
except ImportError:
sys.exit("No `build.py` found in current folder.")
finally:
sys.dont_write_bytecode = old
try:
logging.basicConfig(level=build.LOGGING,
format="%(message)s")
except AttributeError:
logging.basicConfig(level=logging.INFO,
format="%(message)s")
reactor = Reactor(build.SRC, build.DEST)
for handler in build.HANDLERS:
reactor.add_handler(handler)
reactor.run(args.mode == "build") | Entry point for command line usage. | Below is the the instruction that describes the task:
### Input:
Entry point for command line usage.
### Response:
def main():
"""Entry point for command line usage."""
import colorama
import argparse
import logging
import sys
import os
parser = argparse.ArgumentParser(prog="gulpless",
description="Simple build system.")
parser.add_argument("-v", "--version",
action="version",
version="%(prog)s 0.7.6")
parser.add_argument("-d", "--directory",
action="store",
default=os.getcwd(),
help="Look for `build.py` in this folder (defaults to "
"the current directory)")
parser.add_argument("mode",
action="store",
choices=["build", "interactive"],
default="interactive",
metavar="mode",
nargs="?",
help="If `interactive` (the default), will wait for "
"filesystem events and attempt to keep the input "
"and output folders in sync. If `build`, it will "
"attempt to build all updated files, then exit.")
args = parser.parse_args()
os.chdir(args.directory)
sys.path.append(os.getcwd())
if os.environ.get("TERM") == "cygwin":
# colorama doesn't play well with git bash
del os.environ["TERM"]
colorama.init()
os.environ["TERM"] = "cygwin"
else:
colorama.init()
try:
old, sys.dont_write_bytecode = sys.dont_write_bytecode, True
import build
except ImportError:
sys.exit("No `build.py` found in current folder.")
finally:
sys.dont_write_bytecode = old
try:
logging.basicConfig(level=build.LOGGING,
format="%(message)s")
except AttributeError:
logging.basicConfig(level=logging.INFO,
format="%(message)s")
reactor = Reactor(build.SRC, build.DEST)
for handler in build.HANDLERS:
reactor.add_handler(handler)
reactor.run(args.mode == "build") |
def members(name, members_list, root=None):
'''
Replaces members of the group with a provided list.
CLI Example:
salt '*' group.members foo 'user1,user2,user3,...'
Replaces a membership list for a local group 'foo'.
foo:x:1234:user1,user2,user3,...
'''
cmd = 'chgrpmem -m = {0} {1}'.format(members_list, name)
retcode = __salt__['cmd.retcode'](cmd, python_shell=False)
return not retcode | Replaces members of the group with a provided list.
CLI Example:
salt '*' group.members foo 'user1,user2,user3,...'
Replaces a membership list for a local group 'foo'.
foo:x:1234:user1,user2,user3,... | Below is the the instruction that describes the task:
### Input:
Replaces members of the group with a provided list.
CLI Example:
salt '*' group.members foo 'user1,user2,user3,...'
Replaces a membership list for a local group 'foo'.
foo:x:1234:user1,user2,user3,...
### Response:
def members(name, members_list, root=None):
'''
Replaces members of the group with a provided list.
CLI Example:
salt '*' group.members foo 'user1,user2,user3,...'
Replaces a membership list for a local group 'foo'.
foo:x:1234:user1,user2,user3,...
'''
cmd = 'chgrpmem -m = {0} {1}'.format(members_list, name)
retcode = __salt__['cmd.retcode'](cmd, python_shell=False)
return not retcode |
def to_dict(self, save_data=True):
"""
Convert the object into a json serializable dictionary.
:param boolean save_data: if true, it adds the training data self.X and self.Y to the dictionary
:return dict: json serializable dictionary containing the needed information to instantiate the object
"""
input_dict = super(SparseGP, self).to_dict(save_data)
input_dict["class"] = "GPy.core.SparseGP"
input_dict["Z"] = self.Z.tolist()
return input_dict | Convert the object into a json serializable dictionary.
:param boolean save_data: if true, it adds the training data self.X and self.Y to the dictionary
:return dict: json serializable dictionary containing the needed information to instantiate the object | Below is the the instruction that describes the task:
### Input:
Convert the object into a json serializable dictionary.
:param boolean save_data: if true, it adds the training data self.X and self.Y to the dictionary
:return dict: json serializable dictionary containing the needed information to instantiate the object
### Response:
def to_dict(self, save_data=True):
"""
Convert the object into a json serializable dictionary.
:param boolean save_data: if true, it adds the training data self.X and self.Y to the dictionary
:return dict: json serializable dictionary containing the needed information to instantiate the object
"""
input_dict = super(SparseGP, self).to_dict(save_data)
input_dict["class"] = "GPy.core.SparseGP"
input_dict["Z"] = self.Z.tolist()
return input_dict |
def acl_show(self, msg, args):
"""Show current allow and deny blocks for the given acl."""
name = args[0] if len(args) > 0 else None
if name is None:
return "%s: The following ACLs are defined: %s" % (msg.user, ', '.join(self._acl.keys()))
if name not in self._acl:
return "Sorry, couldn't find an acl named '%s'" % name
return '\n'.join([
"%s: ACL '%s' is defined as follows:" % (msg.user, name),
"allow: %s" % ', '.join(self._acl[name]['allow']),
"deny: %s" % ', '.join(self._acl[name]['deny'])
]) | Show current allow and deny blocks for the given acl. | Below is the the instruction that describes the task:
### Input:
Show current allow and deny blocks for the given acl.
### Response:
def acl_show(self, msg, args):
"""Show current allow and deny blocks for the given acl."""
name = args[0] if len(args) > 0 else None
if name is None:
return "%s: The following ACLs are defined: %s" % (msg.user, ', '.join(self._acl.keys()))
if name not in self._acl:
return "Sorry, couldn't find an acl named '%s'" % name
return '\n'.join([
"%s: ACL '%s' is defined as follows:" % (msg.user, name),
"allow: %s" % ', '.join(self._acl[name]['allow']),
"deny: %s" % ', '.join(self._acl[name]['deny'])
]) |
def extract_ipv4(roster_order, ipv4):
'''
Extract the preferred IP address from the ipv4 grain
'''
for ip_type in roster_order:
for ip_ in ipv4:
if ':' in ip_:
continue
if not salt.utils.validate.net.ipv4_addr(ip_):
continue
if ip_type == 'local' and ip_.startswith('127.'):
return ip_
elif ip_type == 'private' and not salt.utils.cloud.is_public_ip(ip_):
return ip_
elif ip_type == 'public' and salt.utils.cloud.is_public_ip(ip_):
return ip_
return None | Extract the preferred IP address from the ipv4 grain | Below is the the instruction that describes the task:
### Input:
Extract the preferred IP address from the ipv4 grain
### Response:
def extract_ipv4(roster_order, ipv4):
'''
Extract the preferred IP address from the ipv4 grain
'''
for ip_type in roster_order:
for ip_ in ipv4:
if ':' in ip_:
continue
if not salt.utils.validate.net.ipv4_addr(ip_):
continue
if ip_type == 'local' and ip_.startswith('127.'):
return ip_
elif ip_type == 'private' and not salt.utils.cloud.is_public_ip(ip_):
return ip_
elif ip_type == 'public' and salt.utils.cloud.is_public_ip(ip_):
return ip_
return None |
def assign_tip_labels_and_colors(self):
"assign tip labels based on user provided kwargs"
# COLOR
# tip color overrides tipstyle.fill
if self.style.tip_labels_colors:
#if self.style.tip_labels_style.fill:
# self.style.tip_labels_style.fill = None
if self.ttree._fixed_order:
if isinstance(self.style.tip_labels_colors, (list, np.ndarray)):
cols = np.array(self.style.tip_labels_colors)
orde = cols[self.ttree._fixed_idx]
self.style.tip_labels_colors = list(orde)
# LABELS
# False == hide tip labels
if self.style.tip_labels is False:
self.style.tip_labels_style["-toyplot-anchor-shift"] = "0px"
self.tip_labels = ["" for i in self.ttree.get_tip_labels()]
# LABELS
# user entered something...
else:
# if user did not change label-offset then shift it here
if not self.style.tip_labels_style["-toyplot-anchor-shift"]:
self.style.tip_labels_style["-toyplot-anchor-shift"] = "15px"
# if user entered list in get_tip_labels order reverse it for plot
if isinstance(self.style.tip_labels, list):
self.tip_labels = self.style.tip_labels
# True assigns tip labels from tree
else:
if self.ttree._fixed_order:
self.tip_labels = self.ttree._fixed_order
else:
self.tip_labels = self.ttree.get_tip_labels() | assign tip labels based on user provided kwargs | Below is the the instruction that describes the task:
### Input:
assign tip labels based on user provided kwargs
### Response:
def assign_tip_labels_and_colors(self):
"assign tip labels based on user provided kwargs"
# COLOR
# tip color overrides tipstyle.fill
if self.style.tip_labels_colors:
#if self.style.tip_labels_style.fill:
# self.style.tip_labels_style.fill = None
if self.ttree._fixed_order:
if isinstance(self.style.tip_labels_colors, (list, np.ndarray)):
cols = np.array(self.style.tip_labels_colors)
orde = cols[self.ttree._fixed_idx]
self.style.tip_labels_colors = list(orde)
# LABELS
# False == hide tip labels
if self.style.tip_labels is False:
self.style.tip_labels_style["-toyplot-anchor-shift"] = "0px"
self.tip_labels = ["" for i in self.ttree.get_tip_labels()]
# LABELS
# user entered something...
else:
# if user did not change label-offset then shift it here
if not self.style.tip_labels_style["-toyplot-anchor-shift"]:
self.style.tip_labels_style["-toyplot-anchor-shift"] = "15px"
# if user entered list in get_tip_labels order reverse it for plot
if isinstance(self.style.tip_labels, list):
self.tip_labels = self.style.tip_labels
# True assigns tip labels from tree
else:
if self.ttree._fixed_order:
self.tip_labels = self.ttree._fixed_order
else:
self.tip_labels = self.ttree.get_tip_labels() |
def setup_handlers():
'''
sets up the sentry handler
'''
__grains__ = salt.loader.grains(__opts__)
__salt__ = salt.loader.minion_mods(__opts__)
if 'sentry_handler' not in __opts__:
log.debug('No \'sentry_handler\' key was found in the configuration')
return False
options = {}
dsn = get_config_value('dsn')
if dsn is not None:
try:
# support raven ver 5.5.0
from raven.transport import TransportRegistry, default_transports
from raven.utils.urlparse import urlparse
transport_registry = TransportRegistry(default_transports)
url = urlparse(dsn)
if not transport_registry.supported_scheme(url.scheme):
raise ValueError('Unsupported Sentry DSN scheme: {0}'.format(url.scheme))
except ValueError as exc:
log.info(
'Raven failed to parse the configuration provided DSN: %s', exc
)
if not dsn:
for key in ('project', 'servers', 'public_key', 'secret_key'):
config_value = get_config_value(key)
if config_value is None and key not in options:
log.debug(
'The required \'sentry_handler\' configuration key, '
'\'%s\', is not properly configured. Not configuring '
'the sentry logging handler.', key
)
return
elif config_value is None:
continue
options[key] = config_value
# site: An optional, arbitrary string to identify this client installation.
options.update({
# site: An optional, arbitrary string to identify this client
# installation
'site': get_config_value('site'),
# name: This will override the server_name value for this installation.
# Defaults to socket.gethostname()
'name': get_config_value('name'),
# exclude_paths: Extending this allow you to ignore module prefixes
# when sentry attempts to discover which function an error comes from
'exclude_paths': get_config_value('exclude_paths', ()),
# include_paths: For example, in Django this defaults to your list of
# INSTALLED_APPS, and is used for drilling down where an exception is
# located
'include_paths': get_config_value('include_paths', ()),
# list_max_length: The maximum number of items a list-like container
# should store.
'list_max_length': get_config_value('list_max_length'),
# string_max_length: The maximum characters of a string that should be
# stored.
'string_max_length': get_config_value('string_max_length'),
# auto_log_stacks: Should Raven automatically log frame stacks
# (including locals) all calls as it would for exceptions.
'auto_log_stacks': get_config_value('auto_log_stacks'),
# timeout: If supported, the timeout value for sending messages to
# remote.
'timeout': get_config_value('timeout', 1),
# processors: A list of processors to apply to events before sending
# them to the Sentry server. Useful for sending additional global state
# data or sanitizing data that you want to keep off of the server.
'processors': get_config_value('processors'),
# dsn: Ensure the DSN is passed into the client
'dsn': dsn
})
client = raven.Client(**options)
context = get_config_value('context')
context_dict = {}
if context is not None:
for tag in context:
try:
tag_value = __grains__[tag]
except KeyError:
log.debug('Sentry tag \'%s\' not found in grains.', tag)
continue
if tag_value:
context_dict[tag] = tag_value
if context_dict:
client.context.merge({'tags': context_dict})
try:
handler = SentryHandler(client)
exclude_patterns = get_config_value('exclude_patterns', None)
if exclude_patterns:
filter_regexes = [re.compile(pattern) for pattern in exclude_patterns]
class FilterExcludedMessages(object):
@staticmethod
def filter(record):
m = record.getMessage()
return not any(regex.search(m) for regex in filter_regexes)
handler.addFilter(FilterExcludedMessages())
handler.setLevel(LOG_LEVELS[get_config_value('log_level', 'error')])
return handler
except ValueError as exc:
log.debug('Failed to setup the sentry logging handler', exc_info=True) | sets up the sentry handler | Below is the the instruction that describes the task:
### Input:
sets up the sentry handler
### Response:
def setup_handlers():
'''
sets up the sentry handler
'''
__grains__ = salt.loader.grains(__opts__)
__salt__ = salt.loader.minion_mods(__opts__)
if 'sentry_handler' not in __opts__:
log.debug('No \'sentry_handler\' key was found in the configuration')
return False
options = {}
dsn = get_config_value('dsn')
if dsn is not None:
try:
# support raven ver 5.5.0
from raven.transport import TransportRegistry, default_transports
from raven.utils.urlparse import urlparse
transport_registry = TransportRegistry(default_transports)
url = urlparse(dsn)
if not transport_registry.supported_scheme(url.scheme):
raise ValueError('Unsupported Sentry DSN scheme: {0}'.format(url.scheme))
except ValueError as exc:
log.info(
'Raven failed to parse the configuration provided DSN: %s', exc
)
if not dsn:
for key in ('project', 'servers', 'public_key', 'secret_key'):
config_value = get_config_value(key)
if config_value is None and key not in options:
log.debug(
'The required \'sentry_handler\' configuration key, '
'\'%s\', is not properly configured. Not configuring '
'the sentry logging handler.', key
)
return
elif config_value is None:
continue
options[key] = config_value
# site: An optional, arbitrary string to identify this client installation.
options.update({
# site: An optional, arbitrary string to identify this client
# installation
'site': get_config_value('site'),
# name: This will override the server_name value for this installation.
# Defaults to socket.gethostname()
'name': get_config_value('name'),
# exclude_paths: Extending this allow you to ignore module prefixes
# when sentry attempts to discover which function an error comes from
'exclude_paths': get_config_value('exclude_paths', ()),
# include_paths: For example, in Django this defaults to your list of
# INSTALLED_APPS, and is used for drilling down where an exception is
# located
'include_paths': get_config_value('include_paths', ()),
# list_max_length: The maximum number of items a list-like container
# should store.
'list_max_length': get_config_value('list_max_length'),
# string_max_length: The maximum characters of a string that should be
# stored.
'string_max_length': get_config_value('string_max_length'),
# auto_log_stacks: Should Raven automatically log frame stacks
# (including locals) all calls as it would for exceptions.
'auto_log_stacks': get_config_value('auto_log_stacks'),
# timeout: If supported, the timeout value for sending messages to
# remote.
'timeout': get_config_value('timeout', 1),
# processors: A list of processors to apply to events before sending
# them to the Sentry server. Useful for sending additional global state
# data or sanitizing data that you want to keep off of the server.
'processors': get_config_value('processors'),
# dsn: Ensure the DSN is passed into the client
'dsn': dsn
})
client = raven.Client(**options)
context = get_config_value('context')
context_dict = {}
if context is not None:
for tag in context:
try:
tag_value = __grains__[tag]
except KeyError:
log.debug('Sentry tag \'%s\' not found in grains.', tag)
continue
if tag_value:
context_dict[tag] = tag_value
if context_dict:
client.context.merge({'tags': context_dict})
try:
handler = SentryHandler(client)
exclude_patterns = get_config_value('exclude_patterns', None)
if exclude_patterns:
filter_regexes = [re.compile(pattern) for pattern in exclude_patterns]
class FilterExcludedMessages(object):
@staticmethod
def filter(record):
m = record.getMessage()
return not any(regex.search(m) for regex in filter_regexes)
handler.addFilter(FilterExcludedMessages())
handler.setLevel(LOG_LEVELS[get_config_value('log_level', 'error')])
return handler
except ValueError as exc:
log.debug('Failed to setup the sentry logging handler', exc_info=True) |
def delete_contacts(
self,
ids: List[int]
):
"""Use this method to delete contacts from your Telegram address book.
Args:
ids (List of ``int``):
A list of unique identifiers for the target users.
Can be an ID (int), a username (string) or phone number (string).
Returns:
True on success.
Raises:
:class:`RPCError <pyrogram.RPCError>` in case of a Telegram RPC error.
"""
contacts = []
for i in ids:
try:
input_user = self.resolve_peer(i)
except PeerIdInvalid:
continue
else:
if isinstance(input_user, types.InputPeerUser):
contacts.append(input_user)
return self.send(
functions.contacts.DeleteContacts(
id=contacts
)
) | Use this method to delete contacts from your Telegram address book.
Args:
ids (List of ``int``):
A list of unique identifiers for the target users.
Can be an ID (int), a username (string) or phone number (string).
Returns:
True on success.
Raises:
:class:`RPCError <pyrogram.RPCError>` in case of a Telegram RPC error. | Below is the the instruction that describes the task:
### Input:
Use this method to delete contacts from your Telegram address book.
Args:
ids (List of ``int``):
A list of unique identifiers for the target users.
Can be an ID (int), a username (string) or phone number (string).
Returns:
True on success.
Raises:
:class:`RPCError <pyrogram.RPCError>` in case of a Telegram RPC error.
### Response:
def delete_contacts(
self,
ids: List[int]
):
"""Use this method to delete contacts from your Telegram address book.
Args:
ids (List of ``int``):
A list of unique identifiers for the target users.
Can be an ID (int), a username (string) or phone number (string).
Returns:
True on success.
Raises:
:class:`RPCError <pyrogram.RPCError>` in case of a Telegram RPC error.
"""
contacts = []
for i in ids:
try:
input_user = self.resolve_peer(i)
except PeerIdInvalid:
continue
else:
if isinstance(input_user, types.InputPeerUser):
contacts.append(input_user)
return self.send(
functions.contacts.DeleteContacts(
id=contacts
)
) |
def detached_signature_for(plaintext_str, keys):
"""
Signs the given plaintext string and returns the detached signature.
A detached signature in GPG speak is a separate blob of data containing
a signature for the specified plaintext.
:param bytes plaintext_str: bytestring to sign
:param keys: list of one or more key to sign with.
:type keys: list[gpg.gpgme._gpgme_key]
:returns: A list of signature and the signed blob of data
:rtype: tuple[list[gpg.results.NewSignature], str]
"""
ctx = gpg.core.Context(armor=True)
ctx.signers = keys
(sigblob, sign_result) = ctx.sign(plaintext_str,
mode=gpg.constants.SIG_MODE_DETACH)
return sign_result.signatures, sigblob | Signs the given plaintext string and returns the detached signature.
A detached signature in GPG speak is a separate blob of data containing
a signature for the specified plaintext.
:param bytes plaintext_str: bytestring to sign
:param keys: list of one or more key to sign with.
:type keys: list[gpg.gpgme._gpgme_key]
:returns: A list of signature and the signed blob of data
:rtype: tuple[list[gpg.results.NewSignature], str] | Below is the the instruction that describes the task:
### Input:
Signs the given plaintext string and returns the detached signature.
A detached signature in GPG speak is a separate blob of data containing
a signature for the specified plaintext.
:param bytes plaintext_str: bytestring to sign
:param keys: list of one or more key to sign with.
:type keys: list[gpg.gpgme._gpgme_key]
:returns: A list of signature and the signed blob of data
:rtype: tuple[list[gpg.results.NewSignature], str]
### Response:
def detached_signature_for(plaintext_str, keys):
"""
Signs the given plaintext string and returns the detached signature.
A detached signature in GPG speak is a separate blob of data containing
a signature for the specified plaintext.
:param bytes plaintext_str: bytestring to sign
:param keys: list of one or more key to sign with.
:type keys: list[gpg.gpgme._gpgme_key]
:returns: A list of signature and the signed blob of data
:rtype: tuple[list[gpg.results.NewSignature], str]
"""
ctx = gpg.core.Context(armor=True)
ctx.signers = keys
(sigblob, sign_result) = ctx.sign(plaintext_str,
mode=gpg.constants.SIG_MODE_DETACH)
return sign_result.signatures, sigblob |
def rename_variables(expression: Expression, renaming: Dict[str, str]) -> Expression:
"""Rename the variables in the expression according to the given dictionary.
Args:
expression:
The expression in which the variables are renamed.
renaming:
The renaming dictionary. Maps old variable names to new ones.
Variable names not occuring in the dictionary are left unchanged.
Returns:
The expression with renamed variables.
"""
if isinstance(expression, Operation):
if hasattr(expression, 'variable_name'):
variable_name = renaming.get(expression.variable_name, expression.variable_name)
return create_operation_expression(
expression, [rename_variables(o, renaming) for o in op_iter(expression)], variable_name=variable_name
)
operands = [rename_variables(o, renaming) for o in op_iter(expression)]
return create_operation_expression(expression, operands)
elif isinstance(expression, Expression):
expression = expression.__copy__()
expression.variable_name = renaming.get(expression.variable_name, expression.variable_name)
return expression | Rename the variables in the expression according to the given dictionary.
Args:
expression:
The expression in which the variables are renamed.
renaming:
The renaming dictionary. Maps old variable names to new ones.
Variable names not occuring in the dictionary are left unchanged.
Returns:
The expression with renamed variables. | Below is the the instruction that describes the task:
### Input:
Rename the variables in the expression according to the given dictionary.
Args:
expression:
The expression in which the variables are renamed.
renaming:
The renaming dictionary. Maps old variable names to new ones.
Variable names not occuring in the dictionary are left unchanged.
Returns:
The expression with renamed variables.
### Response:
def rename_variables(expression: Expression, renaming: Dict[str, str]) -> Expression:
"""Rename the variables in the expression according to the given dictionary.
Args:
expression:
The expression in which the variables are renamed.
renaming:
The renaming dictionary. Maps old variable names to new ones.
Variable names not occuring in the dictionary are left unchanged.
Returns:
The expression with renamed variables.
"""
if isinstance(expression, Operation):
if hasattr(expression, 'variable_name'):
variable_name = renaming.get(expression.variable_name, expression.variable_name)
return create_operation_expression(
expression, [rename_variables(o, renaming) for o in op_iter(expression)], variable_name=variable_name
)
operands = [rename_variables(o, renaming) for o in op_iter(expression)]
return create_operation_expression(expression, operands)
elif isinstance(expression, Expression):
expression = expression.__copy__()
expression.variable_name = renaming.get(expression.variable_name, expression.variable_name)
return expression |
def unwrap(self):
"""
Unwraps an RSA public key into an RSAPublicKey object. Does not support
DSA or EC public keys since they do not have an unwrapped form.
:return:
An RSAPublicKey object
"""
if self.algorithm == 'rsa':
return self['public_key'].parsed
key_type = self.algorithm.upper()
a_an = 'an' if key_type == 'EC' else 'a'
raise ValueError(unwrap(
'''
Only RSA public keys may be unwrapped - this key is %s %s public
key
''',
a_an,
key_type
)) | Unwraps an RSA public key into an RSAPublicKey object. Does not support
DSA or EC public keys since they do not have an unwrapped form.
:return:
An RSAPublicKey object | Below is the the instruction that describes the task:
### Input:
Unwraps an RSA public key into an RSAPublicKey object. Does not support
DSA or EC public keys since they do not have an unwrapped form.
:return:
An RSAPublicKey object
### Response:
def unwrap(self):
"""
Unwraps an RSA public key into an RSAPublicKey object. Does not support
DSA or EC public keys since they do not have an unwrapped form.
:return:
An RSAPublicKey object
"""
if self.algorithm == 'rsa':
return self['public_key'].parsed
key_type = self.algorithm.upper()
a_an = 'an' if key_type == 'EC' else 'a'
raise ValueError(unwrap(
'''
Only RSA public keys may be unwrapped - this key is %s %s public
key
''',
a_an,
key_type
)) |
def index_all(self):
"""
Index all records under :attr:`record_path`.
"""
self.logger.debug('Start indexing all records under: %s',
self.record_path)
with self.db.connection():
for json_path in sorted(self.find_record_files()):
self.index_record(json_path) | Index all records under :attr:`record_path`. | Below is the the instruction that describes the task:
### Input:
Index all records under :attr:`record_path`.
### Response:
def index_all(self):
"""
Index all records under :attr:`record_path`.
"""
self.logger.debug('Start indexing all records under: %s',
self.record_path)
with self.db.connection():
for json_path in sorted(self.find_record_files()):
self.index_record(json_path) |
def decorate_class_method(func, classkey=None, skipmain=False):
"""
Will inject all decorated function as methods of classkey
classkey is some identifying string, tuple, or object
func can also be a tuple
"""
#import utool as ut
global __CLASSTYPE_ATTRIBUTES__
assert classkey is not None, 'must specify classkey'
#if not (skipmain and ut.get_caller_modname() == '__main__'):
__CLASSTYPE_ATTRIBUTES__[classkey].append(func)
return func | Will inject all decorated function as methods of classkey
classkey is some identifying string, tuple, or object
func can also be a tuple | Below is the the instruction that describes the task:
### Input:
Will inject all decorated function as methods of classkey
classkey is some identifying string, tuple, or object
func can also be a tuple
### Response:
def decorate_class_method(func, classkey=None, skipmain=False):
"""
Will inject all decorated function as methods of classkey
classkey is some identifying string, tuple, or object
func can also be a tuple
"""
#import utool as ut
global __CLASSTYPE_ATTRIBUTES__
assert classkey is not None, 'must specify classkey'
#if not (skipmain and ut.get_caller_modname() == '__main__'):
__CLASSTYPE_ATTRIBUTES__[classkey].append(func)
return func |
def cli():
""" Command line interface """
ch = logging.StreamHandler()
ch.setFormatter(logging.Formatter(
'%(asctime)s.%(msecs)03d %(levelname)s: %(message)s',
datefmt="%Y-%m-%d %H:%M:%S"
))
logger.addHandler(ch)
import argparse
parser = argparse.ArgumentParser(description="Search 'network' for hosts with a \
response to 'path' that matches 'filter'")
parser.add_argument('network', help='IP address with optional mask, e.g. 192.168.0.0/24')
parser.add_argument('-p', '--path', help='URL path at host, e.g. index.html',
default='')
parser.add_argument('-f', '--filter', help='Regular expression pattern for filter',
dest='pattern', default='')
parser.add_argument('-l', '--log', help='Enable logging', action='store_true')
args = parser.parse_args()
print('Scanning, please wait ...')
result = survey(**vars(args))
print('Found {} match{}{}{} on {}'.format(len(result), 'es' if len(result)!=1 else '',
' for ' if args.pattern else '', args.pattern, args.network))
for x in result:
print(x.hostname) | Command line interface | Below is the the instruction that describes the task:
### Input:
Command line interface
### Response:
def cli():
""" Command line interface """
ch = logging.StreamHandler()
ch.setFormatter(logging.Formatter(
'%(asctime)s.%(msecs)03d %(levelname)s: %(message)s',
datefmt="%Y-%m-%d %H:%M:%S"
))
logger.addHandler(ch)
import argparse
parser = argparse.ArgumentParser(description="Search 'network' for hosts with a \
response to 'path' that matches 'filter'")
parser.add_argument('network', help='IP address with optional mask, e.g. 192.168.0.0/24')
parser.add_argument('-p', '--path', help='URL path at host, e.g. index.html',
default='')
parser.add_argument('-f', '--filter', help='Regular expression pattern for filter',
dest='pattern', default='')
parser.add_argument('-l', '--log', help='Enable logging', action='store_true')
args = parser.parse_args()
print('Scanning, please wait ...')
result = survey(**vars(args))
print('Found {} match{}{}{} on {}'.format(len(result), 'es' if len(result)!=1 else '',
' for ' if args.pattern else '', args.pattern, args.network))
for x in result:
print(x.hostname) |
def p_element_list(self, p):
"""element_list : elision_opt assignment_expr
| element_list COMMA elision_opt assignment_expr
"""
if len(p) == 3:
p[0] = p[1] + [p[2]]
else:
p[1].extend(p[3])
p[1].append(p[4])
p[0] = p[1] | element_list : elision_opt assignment_expr
| element_list COMMA elision_opt assignment_expr | Below is the the instruction that describes the task:
### Input:
element_list : elision_opt assignment_expr
| element_list COMMA elision_opt assignment_expr
### Response:
def p_element_list(self, p):
"""element_list : elision_opt assignment_expr
| element_list COMMA elision_opt assignment_expr
"""
if len(p) == 3:
p[0] = p[1] + [p[2]]
else:
p[1].extend(p[3])
p[1].append(p[4])
p[0] = p[1] |
def pagure_specific_project_tag_filter(config, message, tags=None, *args, **kw):
""" Particular pagure project tags
Adding this rule allows you to get notifications for one or more
`pagure.io <https://pagure.io>`_ projects having the specified tags.
Specify multiple tags by separating them with a comma ','.
"""
if not pagure_catchall(config, message):
return False
tags = tags.split(',') if tags else []
tags = [tag.strip() for tag in tags if tag and tag.strip()]
project_tags = set()
project_tags.update(message.get('project', {}).get('tags', []))
project_tags.update(
message.get('pullrequest', {}).get('project', {}).get('tags', []))
project_tags.update(
message.get('commit', {}).get('repo', {}).get('tags', []))
valid = len(project_tags.intersection(set(tags))) > 0
return valid | Particular pagure project tags
Adding this rule allows you to get notifications for one or more
`pagure.io <https://pagure.io>`_ projects having the specified tags.
Specify multiple tags by separating them with a comma ','. | Below is the the instruction that describes the task:
### Input:
Particular pagure project tags
Adding this rule allows you to get notifications for one or more
`pagure.io <https://pagure.io>`_ projects having the specified tags.
Specify multiple tags by separating them with a comma ','.
### Response:
def pagure_specific_project_tag_filter(config, message, tags=None, *args, **kw):
""" Particular pagure project tags
Adding this rule allows you to get notifications for one or more
`pagure.io <https://pagure.io>`_ projects having the specified tags.
Specify multiple tags by separating them with a comma ','.
"""
if not pagure_catchall(config, message):
return False
tags = tags.split(',') if tags else []
tags = [tag.strip() for tag in tags if tag and tag.strip()]
project_tags = set()
project_tags.update(message.get('project', {}).get('tags', []))
project_tags.update(
message.get('pullrequest', {}).get('project', {}).get('tags', []))
project_tags.update(
message.get('commit', {}).get('repo', {}).get('tags', []))
valid = len(project_tags.intersection(set(tags))) > 0
return valid |
def createEncoder():
"""Create the encoder instance for our test and return it."""
consumption_encoder = ScalarEncoder(21, 0.0, 100.0, n=50, name="consumption",
clipInput=True)
time_encoder = DateEncoder(timeOfDay=(21, 9.5), name="timestamp_timeOfDay")
encoder = MultiEncoder()
encoder.addEncoder("consumption", consumption_encoder)
encoder.addEncoder("timestamp", time_encoder)
return encoder | Create the encoder instance for our test and return it. | Below is the the instruction that describes the task:
### Input:
Create the encoder instance for our test and return it.
### Response:
def createEncoder():
"""Create the encoder instance for our test and return it."""
consumption_encoder = ScalarEncoder(21, 0.0, 100.0, n=50, name="consumption",
clipInput=True)
time_encoder = DateEncoder(timeOfDay=(21, 9.5), name="timestamp_timeOfDay")
encoder = MultiEncoder()
encoder.addEncoder("consumption", consumption_encoder)
encoder.addEncoder("timestamp", time_encoder)
return encoder |
def Create(path, password, generate_default_key=True):
"""
Create a new user wallet.
Args:
path (str): A path indicating where to create or open the wallet e.g. "/Wallets/mywallet".
password (str): a 10 characters minimum password to secure the wallet with.
Returns:
UserWallet: a UserWallet instance.
"""
wallet = UserWallet(path=path, passwordKey=password, create=True)
if generate_default_key:
wallet.CreateKey()
return wallet | Create a new user wallet.
Args:
path (str): A path indicating where to create or open the wallet e.g. "/Wallets/mywallet".
password (str): a 10 characters minimum password to secure the wallet with.
Returns:
UserWallet: a UserWallet instance. | Below is the the instruction that describes the task:
### Input:
Create a new user wallet.
Args:
path (str): A path indicating where to create or open the wallet e.g. "/Wallets/mywallet".
password (str): a 10 characters minimum password to secure the wallet with.
Returns:
UserWallet: a UserWallet instance.
### Response:
def Create(path, password, generate_default_key=True):
"""
Create a new user wallet.
Args:
path (str): A path indicating where to create or open the wallet e.g. "/Wallets/mywallet".
password (str): a 10 characters minimum password to secure the wallet with.
Returns:
UserWallet: a UserWallet instance.
"""
wallet = UserWallet(path=path, passwordKey=password, create=True)
if generate_default_key:
wallet.CreateKey()
return wallet |
def to_dict(self):
"""
Convert the object into a json serializable dictionary.
Note: It uses the private method _save_to_input_dict of the parent.
:return dict: json serializable dictionary containing the needed information to instantiate the object
"""
input_dict = super(Add, self)._save_to_input_dict()
input_dict["class"] = str("GPy.kern.Add")
return input_dict | Convert the object into a json serializable dictionary.
Note: It uses the private method _save_to_input_dict of the parent.
:return dict: json serializable dictionary containing the needed information to instantiate the object | Below is the the instruction that describes the task:
### Input:
Convert the object into a json serializable dictionary.
Note: It uses the private method _save_to_input_dict of the parent.
:return dict: json serializable dictionary containing the needed information to instantiate the object
### Response:
def to_dict(self):
"""
Convert the object into a json serializable dictionary.
Note: It uses the private method _save_to_input_dict of the parent.
:return dict: json serializable dictionary containing the needed information to instantiate the object
"""
input_dict = super(Add, self)._save_to_input_dict()
input_dict["class"] = str("GPy.kern.Add")
return input_dict |
def prune_old_authorization_codes():
"""
Removes all unused and expired authorization codes from the database.
"""
from .compat import now
from .models import AuthorizationCode
AuthorizationCode.objects.with_expiration_before(now()).delete() | Removes all unused and expired authorization codes from the database. | Below is the the instruction that describes the task:
### Input:
Removes all unused and expired authorization codes from the database.
### Response:
def prune_old_authorization_codes():
"""
Removes all unused and expired authorization codes from the database.
"""
from .compat import now
from .models import AuthorizationCode
AuthorizationCode.objects.with_expiration_before(now()).delete() |
async def storm(self, text, opts=None):
'''
Evaluate a storm query and yield result messages.
Yields:
((str,dict)): Storm messages.
'''
async for mesg in self.cell.streamstorm(text, opts, user=self.user):
yield mesg | Evaluate a storm query and yield result messages.
Yields:
((str,dict)): Storm messages. | Below is the the instruction that describes the task:
### Input:
Evaluate a storm query and yield result messages.
Yields:
((str,dict)): Storm messages.
### Response:
async def storm(self, text, opts=None):
'''
Evaluate a storm query and yield result messages.
Yields:
((str,dict)): Storm messages.
'''
async for mesg in self.cell.streamstorm(text, opts, user=self.user):
yield mesg |
def get_token_issuer(token):
"""
Issuer of a token is the identifier used to recover the secret
Need to extract this from token to ensure we can proceed to the signature validation stage
Does not check validity of the token
:param token: signed JWT token
:return issuer: iss field of the JWT token
:raises TokenIssuerError: if iss field not present
:raises TokenDecodeError: if token does not conform to JWT spec
"""
try:
unverified = decode_token(token)
if 'iss' not in unverified:
raise TokenIssuerError
return unverified.get('iss')
except jwt.DecodeError:
raise TokenDecodeError | Issuer of a token is the identifier used to recover the secret
Need to extract this from token to ensure we can proceed to the signature validation stage
Does not check validity of the token
:param token: signed JWT token
:return issuer: iss field of the JWT token
:raises TokenIssuerError: if iss field not present
:raises TokenDecodeError: if token does not conform to JWT spec | Below is the the instruction that describes the task:
### Input:
Issuer of a token is the identifier used to recover the secret
Need to extract this from token to ensure we can proceed to the signature validation stage
Does not check validity of the token
:param token: signed JWT token
:return issuer: iss field of the JWT token
:raises TokenIssuerError: if iss field not present
:raises TokenDecodeError: if token does not conform to JWT spec
### Response:
def get_token_issuer(token):
"""
Issuer of a token is the identifier used to recover the secret
Need to extract this from token to ensure we can proceed to the signature validation stage
Does not check validity of the token
:param token: signed JWT token
:return issuer: iss field of the JWT token
:raises TokenIssuerError: if iss field not present
:raises TokenDecodeError: if token does not conform to JWT spec
"""
try:
unverified = decode_token(token)
if 'iss' not in unverified:
raise TokenIssuerError
return unverified.get('iss')
except jwt.DecodeError:
raise TokenDecodeError |
def get_verb_phrases(sentence_doc):
"""
Returns an object like,
[(1), (5,6,7)]
where this means 2 verb phrases. a single verb at index 1, another verb phrase 5,6,7.
- Adverbs are not included.
- Infinitive phrases (and verb phrases that are subsets of infinitive phrases) are not included
"""
pattern = r'<VERB>*<ADV>*<VERB>+' # r'<VERB>?<ADV>*<VERB>+' is suggested by textacy site
verb_phrases = textacy.extract.pos_regex_matches(sentence_doc, pattern)
result = [] # [(1), (5,6,7)] => 2 verb phrases. a single verb at index 1, another verb phrase 5,6,7
for vp in verb_phrases:
word_numbers = []
# return the index of 'could have been happily eating' from 'She could have been happily eating chowder'
first_word = vp.start
x = first_word
if len(vp) > 1:
for verb_or_adverb in vp:
# filter out adverbs
if not verb_or_adverb.pos_ == 'ADV':
word_numbers.append(x)
x += 1
else:
word_numbers.append(first_word)
# filter out infinitive phrases
if ( (word_numbers[0] - 1) < 0) or (sentence_doc[word_numbers[0] - 1].text.lower() != 'to'):
result.append(word_numbers)
return result | Returns an object like,
[(1), (5,6,7)]
where this means 2 verb phrases. a single verb at index 1, another verb phrase 5,6,7.
- Adverbs are not included.
- Infinitive phrases (and verb phrases that are subsets of infinitive phrases) are not included | Below is the the instruction that describes the task:
### Input:
Returns an object like,
[(1), (5,6,7)]
where this means 2 verb phrases. a single verb at index 1, another verb phrase 5,6,7.
- Adverbs are not included.
- Infinitive phrases (and verb phrases that are subsets of infinitive phrases) are not included
### Response:
def get_verb_phrases(sentence_doc):
"""
Returns an object like,
[(1), (5,6,7)]
where this means 2 verb phrases. a single verb at index 1, another verb phrase 5,6,7.
- Adverbs are not included.
- Infinitive phrases (and verb phrases that are subsets of infinitive phrases) are not included
"""
pattern = r'<VERB>*<ADV>*<VERB>+' # r'<VERB>?<ADV>*<VERB>+' is suggested by textacy site
verb_phrases = textacy.extract.pos_regex_matches(sentence_doc, pattern)
result = [] # [(1), (5,6,7)] => 2 verb phrases. a single verb at index 1, another verb phrase 5,6,7
for vp in verb_phrases:
word_numbers = []
# return the index of 'could have been happily eating' from 'She could have been happily eating chowder'
first_word = vp.start
x = first_word
if len(vp) > 1:
for verb_or_adverb in vp:
# filter out adverbs
if not verb_or_adverb.pos_ == 'ADV':
word_numbers.append(x)
x += 1
else:
word_numbers.append(first_word)
# filter out infinitive phrases
if ( (word_numbers[0] - 1) < 0) or (sentence_doc[word_numbers[0] - 1].text.lower() != 'to'):
result.append(word_numbers)
return result |
def run(path, code=None, params=None, **meta):
"""pydocstyle code checking.
:return list: List of errors.
"""
if 'ignore_decorators' in params:
ignore_decorators = params['ignore_decorators']
else:
ignore_decorators = None
check_source_args = (code, path, ignore_decorators) if THIRD_ARG else (code, path)
return [{
'lnum': e.line,
# Remove colon after error code ("D403: ..." => "D403 ...").
'text': (e.message[0:4] + e.message[5:]
if e.message[4] == ':' else e.message),
'type': 'D',
'number': e.code
} for e in PyDocChecker().check_source(*check_source_args)] | pydocstyle code checking.
:return list: List of errors. | Below is the the instruction that describes the task:
### Input:
pydocstyle code checking.
:return list: List of errors.
### Response:
def run(path, code=None, params=None, **meta):
"""pydocstyle code checking.
:return list: List of errors.
"""
if 'ignore_decorators' in params:
ignore_decorators = params['ignore_decorators']
else:
ignore_decorators = None
check_source_args = (code, path, ignore_decorators) if THIRD_ARG else (code, path)
return [{
'lnum': e.line,
# Remove colon after error code ("D403: ..." => "D403 ...").
'text': (e.message[0:4] + e.message[5:]
if e.message[4] == ':' else e.message),
'type': 'D',
'number': e.code
} for e in PyDocChecker().check_source(*check_source_args)] |
def hazards_for_layer(layer_geometry_key):
"""Get hazard categories form layer_geometry_key.
:param layer_geometry_key: The geometry id
:type layer_geometry_key: str
:returns: List of hazard
:rtype: list
"""
result = []
for hazard in hazard_all:
if layer_geometry_key in hazard.get('allowed_geometries'):
result.append(hazard)
return sorted(result, key=lambda k: k['key']) | Get hazard categories form layer_geometry_key.
:param layer_geometry_key: The geometry id
:type layer_geometry_key: str
:returns: List of hazard
:rtype: list | Below is the the instruction that describes the task:
### Input:
Get hazard categories form layer_geometry_key.
:param layer_geometry_key: The geometry id
:type layer_geometry_key: str
:returns: List of hazard
:rtype: list
### Response:
def hazards_for_layer(layer_geometry_key):
"""Get hazard categories form layer_geometry_key.
:param layer_geometry_key: The geometry id
:type layer_geometry_key: str
:returns: List of hazard
:rtype: list
"""
result = []
for hazard in hazard_all:
if layer_geometry_key in hazard.get('allowed_geometries'):
result.append(hazard)
return sorted(result, key=lambda k: k['key']) |
def flo(string):
'''Return the string given by param formatted with the callers locals.'''
callers_locals = {}
frame = inspect.currentframe()
try:
outerframe = frame.f_back
callers_locals = outerframe.f_locals
finally:
del frame
return string.format(**callers_locals) | Return the string given by param formatted with the callers locals. | Below is the the instruction that describes the task:
### Input:
Return the string given by param formatted with the callers locals.
### Response:
def flo(string):
'''Return the string given by param formatted with the callers locals.'''
callers_locals = {}
frame = inspect.currentframe()
try:
outerframe = frame.f_back
callers_locals = outerframe.f_locals
finally:
del frame
return string.format(**callers_locals) |
def clip_foreign(network):
"""
Delete all components and timelines located outside of Germany.
Add transborder flows divided by country of origin as
network.foreign_trade.
Parameters
----------
network : :class:`pypsa.Network
Overall container of PyPSA
Returns
-------
network : :class:`pypsa.Network
Overall container of PyPSA
"""
# get foreign buses by country
foreign_buses = network.buses[network.buses.country_code != 'DE']
network.buses = network.buses.drop(
network.buses.loc[foreign_buses.index].index)
# identify transborder lines (one bus foreign, one bus not) and the country
# it is coming from
"""transborder_lines = pd.DataFrame(index=network.lines[
((network.lines['bus0'].isin(network.buses.index) == False) &
(network.lines['bus1'].isin(network.buses.index) == True)) |
((network.lines['bus0'].isin(network.buses.index) == True) &
(network.lines['bus1'].isin(network.buses.index) == False))].index)
transborder_lines['bus0'] = network.lines['bus0']
transborder_lines['bus1'] = network.lines['bus1']
transborder_lines['country'] = ""
for i in range(0, len(transborder_lines)):
if transborder_lines.iloc[i, 0] in foreign_buses.index:
transborder_lines['country'][i] = foreign_buses[str(
transborder_lines.iloc[i, 0])]
else:
transborder_lines['country'][i] = foreign_buses[str(
transborder_lines.iloc[i, 1])]
# identify amount of flows per line and group to get flow per country
transborder_flows = network.lines_t.p0[transborder_lines.index]
for i in transborder_flows.columns:
if network.lines.loc[str(i)]['bus1'] in foreign_buses.index:
transborder_flows.loc[:, str(
i)] = transborder_flows.loc[:, str(i)]*-1
network.foreign_trade = transborder_flows.\
groupby(transborder_lines['country'], axis=1).sum()"""
# drop foreign components
network.lines = network.lines.drop(network.lines[
(network.lines['bus0'].isin(network.buses.index) == False) |
(network.lines['bus1'].isin(network.buses.index) == False)].index)
network.links = network.links.drop(network.links[
(network.links['bus0'].isin(network.buses.index) == False) |
(network.links['bus1'].isin(network.buses.index) == False)].index)
network.transformers = network.transformers.drop(network.transformers[
(network.transformers['bus0'].isin(network.buses.index) == False) |
(network.transformers['bus1'].isin(network.
buses.index) == False)].index)
network.generators = network.generators.drop(network.generators[
(network.generators['bus'].isin(network.buses.index) == False)].index)
network.loads = network.loads.drop(network.loads[
(network.loads['bus'].isin(network.buses.index) == False)].index)
network.storage_units = network.storage_units.drop(network.storage_units[
(network.storage_units['bus'].isin(network.
buses.index) == False)].index)
components = ['loads', 'generators', 'lines', 'buses', 'transformers',
'links']
for g in components: # loads_t
h = g + '_t'
nw = getattr(network, h) # network.loads_t
for i in nw.keys(): # network.loads_t.p
cols = [j for j in getattr(
nw, i).columns if j not in getattr(network, g).index]
for k in cols:
del getattr(nw, i)[k]
return network | Delete all components and timelines located outside of Germany.
Add transborder flows divided by country of origin as
network.foreign_trade.
Parameters
----------
network : :class:`pypsa.Network
Overall container of PyPSA
Returns
-------
network : :class:`pypsa.Network
Overall container of PyPSA | Below is the the instruction that describes the task:
### Input:
Delete all components and timelines located outside of Germany.
Add transborder flows divided by country of origin as
network.foreign_trade.
Parameters
----------
network : :class:`pypsa.Network
Overall container of PyPSA
Returns
-------
network : :class:`pypsa.Network
Overall container of PyPSA
### Response:
def clip_foreign(network):
"""
Delete all components and timelines located outside of Germany.
Add transborder flows divided by country of origin as
network.foreign_trade.
Parameters
----------
network : :class:`pypsa.Network
Overall container of PyPSA
Returns
-------
network : :class:`pypsa.Network
Overall container of PyPSA
"""
# get foreign buses by country
foreign_buses = network.buses[network.buses.country_code != 'DE']
network.buses = network.buses.drop(
network.buses.loc[foreign_buses.index].index)
# identify transborder lines (one bus foreign, one bus not) and the country
# it is coming from
"""transborder_lines = pd.DataFrame(index=network.lines[
((network.lines['bus0'].isin(network.buses.index) == False) &
(network.lines['bus1'].isin(network.buses.index) == True)) |
((network.lines['bus0'].isin(network.buses.index) == True) &
(network.lines['bus1'].isin(network.buses.index) == False))].index)
transborder_lines['bus0'] = network.lines['bus0']
transborder_lines['bus1'] = network.lines['bus1']
transborder_lines['country'] = ""
for i in range(0, len(transborder_lines)):
if transborder_lines.iloc[i, 0] in foreign_buses.index:
transborder_lines['country'][i] = foreign_buses[str(
transborder_lines.iloc[i, 0])]
else:
transborder_lines['country'][i] = foreign_buses[str(
transborder_lines.iloc[i, 1])]
# identify amount of flows per line and group to get flow per country
transborder_flows = network.lines_t.p0[transborder_lines.index]
for i in transborder_flows.columns:
if network.lines.loc[str(i)]['bus1'] in foreign_buses.index:
transborder_flows.loc[:, str(
i)] = transborder_flows.loc[:, str(i)]*-1
network.foreign_trade = transborder_flows.\
groupby(transborder_lines['country'], axis=1).sum()"""
# drop foreign components
network.lines = network.lines.drop(network.lines[
(network.lines['bus0'].isin(network.buses.index) == False) |
(network.lines['bus1'].isin(network.buses.index) == False)].index)
network.links = network.links.drop(network.links[
(network.links['bus0'].isin(network.buses.index) == False) |
(network.links['bus1'].isin(network.buses.index) == False)].index)
network.transformers = network.transformers.drop(network.transformers[
(network.transformers['bus0'].isin(network.buses.index) == False) |
(network.transformers['bus1'].isin(network.
buses.index) == False)].index)
network.generators = network.generators.drop(network.generators[
(network.generators['bus'].isin(network.buses.index) == False)].index)
network.loads = network.loads.drop(network.loads[
(network.loads['bus'].isin(network.buses.index) == False)].index)
network.storage_units = network.storage_units.drop(network.storage_units[
(network.storage_units['bus'].isin(network.
buses.index) == False)].index)
components = ['loads', 'generators', 'lines', 'buses', 'transformers',
'links']
for g in components: # loads_t
h = g + '_t'
nw = getattr(network, h) # network.loads_t
for i in nw.keys(): # network.loads_t.p
cols = [j for j in getattr(
nw, i).columns if j not in getattr(network, g).index]
for k in cols:
del getattr(nw, i)[k]
return network |
def solve(succ, orien, i, direc):
"""Can a laser leaving mirror i in direction direc reach exit ?
:param i: mirror index
:param direc: direction leaving mirror i
:param orient: orient[i]=orientation of mirror i
:param succ: succ[i][direc]=succ mirror reached
when leaving i in direction direc
"""
assert orien[i] is not None
j = succ[i][direc]
if j is None: # basic case
return False
if j == len(orien) - 1:
return True
if orien[j] is None: # try both orientations
for x in [0, 1]:
orien[j] = x
if solve(succ, orien, j, reflex[direc][x]):
return True
orien[j] = None
return False
else:
return solve(succ, orien, j, reflex[direc][orien[j]]) | Can a laser leaving mirror i in direction direc reach exit ?
:param i: mirror index
:param direc: direction leaving mirror i
:param orient: orient[i]=orientation of mirror i
:param succ: succ[i][direc]=succ mirror reached
when leaving i in direction direc | Below is the the instruction that describes the task:
### Input:
Can a laser leaving mirror i in direction direc reach exit ?
:param i: mirror index
:param direc: direction leaving mirror i
:param orient: orient[i]=orientation of mirror i
:param succ: succ[i][direc]=succ mirror reached
when leaving i in direction direc
### Response:
def solve(succ, orien, i, direc):
"""Can a laser leaving mirror i in direction direc reach exit ?
:param i: mirror index
:param direc: direction leaving mirror i
:param orient: orient[i]=orientation of mirror i
:param succ: succ[i][direc]=succ mirror reached
when leaving i in direction direc
"""
assert orien[i] is not None
j = succ[i][direc]
if j is None: # basic case
return False
if j == len(orien) - 1:
return True
if orien[j] is None: # try both orientations
for x in [0, 1]:
orien[j] = x
if solve(succ, orien, j, reflex[direc][x]):
return True
orien[j] = None
return False
else:
return solve(succ, orien, j, reflex[direc][orien[j]]) |
def generator_checker_py2(gen, gen_type, bound_Generic, bound_typevars,
bound_typevars_readonly, follow_fwd_refs, _recursion_check):
"""Builds a typechecking wrapper around a Python 2 style generator object.
"""
initialized = False
sn = None
while True:
a = gen.send(sn)
if initialized or not a is None:
if not gen_type.__args__[0] is Any and \
not _isinstance(a, gen_type.__args__[0], bound_Generic, bound_typevars,
bound_typevars_readonly, follow_fwd_refs, _recursion_check):
tpa = deep_type(a)
msg = _make_generator_error_message(tpa, gen, gen_type.__args__[0],
'has incompatible yield type')
_raise_typecheck_error(msg, True, a, tpa, gen_type.__args__[0])
# raise pytypes.ReturnTypeError(_make_generator_error_message(tpa, gen,
# gen_type.__args__[0], 'has incompatible yield type'))
initialized = True
sn = yield a
if not gen_type.__args__[1] is Any and \
not _isinstance(sn, gen_type.__args__[1], bound_Generic, bound_typevars,
bound_typevars_readonly, follow_fwd_refs, _recursion_check):
tpsn = deep_type(sn)
msg = _make_generator_error_message(tpsn, gen, gen_type.__args__[1],
'has incompatible send type')
_raise_typecheck_error(msg, False, sn, tpsn, gen_type.__args__[1]) | Builds a typechecking wrapper around a Python 2 style generator object. | Below is the the instruction that describes the task:
### Input:
Builds a typechecking wrapper around a Python 2 style generator object.
### Response:
def generator_checker_py2(gen, gen_type, bound_Generic, bound_typevars,
bound_typevars_readonly, follow_fwd_refs, _recursion_check):
"""Builds a typechecking wrapper around a Python 2 style generator object.
"""
initialized = False
sn = None
while True:
a = gen.send(sn)
if initialized or not a is None:
if not gen_type.__args__[0] is Any and \
not _isinstance(a, gen_type.__args__[0], bound_Generic, bound_typevars,
bound_typevars_readonly, follow_fwd_refs, _recursion_check):
tpa = deep_type(a)
msg = _make_generator_error_message(tpa, gen, gen_type.__args__[0],
'has incompatible yield type')
_raise_typecheck_error(msg, True, a, tpa, gen_type.__args__[0])
# raise pytypes.ReturnTypeError(_make_generator_error_message(tpa, gen,
# gen_type.__args__[0], 'has incompatible yield type'))
initialized = True
sn = yield a
if not gen_type.__args__[1] is Any and \
not _isinstance(sn, gen_type.__args__[1], bound_Generic, bound_typevars,
bound_typevars_readonly, follow_fwd_refs, _recursion_check):
tpsn = deep_type(sn)
msg = _make_generator_error_message(tpsn, gen, gen_type.__args__[1],
'has incompatible send type')
_raise_typecheck_error(msg, False, sn, tpsn, gen_type.__args__[1]) |
def process_remote_sources(self):
"""Create synthetic targets with populated sources from remote_sources targets."""
unpacked_sources = self.context.products.get_data(UnpackedArchives)
remote_sources_targets = self.context.targets(predicate=lambda t: isinstance(t, RemoteSources))
if not remote_sources_targets:
return
snapshot_specs = []
filespecs = []
unpack_dirs = []
for target in remote_sources_targets:
unpacked_archive = unpacked_sources[target.sources_target]
sources = unpacked_archive.found_files
rel_unpack_dir = unpacked_archive.rel_unpack_dir
self.context.log.debug('target: {}, rel_unpack_dir: {}, sources: {}'
.format(target, rel_unpack_dir, sources))
sources_in_dir = tuple(os.path.join(rel_unpack_dir, source) for source in sources)
snapshot_specs.append(PathGlobsAndRoot(
PathGlobs(sources_in_dir),
get_buildroot(),
))
filespecs.append({'globs': sources_in_dir})
unpack_dirs.append(rel_unpack_dir)
snapshots = self.context._scheduler.capture_snapshots(tuple(snapshot_specs))
for target, snapshot, filespec, rel_unpack_dir in \
zip(remote_sources_targets, snapshots, filespecs, unpack_dirs):
synthetic_target = self.context.add_new_target(
address=Address(os.path.relpath(self.workdir, get_buildroot()), target.id),
target_type=target.destination_target_type,
dependencies=target.dependencies,
sources=EagerFilesetWithSpec(rel_unpack_dir, filespec, snapshot),
derived_from=target,
**target.destination_target_args
)
self.context.log.debug('synthetic_target: {}'.format(synthetic_target))
for dependent in self.context.build_graph.dependents_of(target.address):
self.context.build_graph.inject_dependency(dependent, synthetic_target.address) | Create synthetic targets with populated sources from remote_sources targets. | Below is the the instruction that describes the task:
### Input:
Create synthetic targets with populated sources from remote_sources targets.
### Response:
def process_remote_sources(self):
"""Create synthetic targets with populated sources from remote_sources targets."""
unpacked_sources = self.context.products.get_data(UnpackedArchives)
remote_sources_targets = self.context.targets(predicate=lambda t: isinstance(t, RemoteSources))
if not remote_sources_targets:
return
snapshot_specs = []
filespecs = []
unpack_dirs = []
for target in remote_sources_targets:
unpacked_archive = unpacked_sources[target.sources_target]
sources = unpacked_archive.found_files
rel_unpack_dir = unpacked_archive.rel_unpack_dir
self.context.log.debug('target: {}, rel_unpack_dir: {}, sources: {}'
.format(target, rel_unpack_dir, sources))
sources_in_dir = tuple(os.path.join(rel_unpack_dir, source) for source in sources)
snapshot_specs.append(PathGlobsAndRoot(
PathGlobs(sources_in_dir),
get_buildroot(),
))
filespecs.append({'globs': sources_in_dir})
unpack_dirs.append(rel_unpack_dir)
snapshots = self.context._scheduler.capture_snapshots(tuple(snapshot_specs))
for target, snapshot, filespec, rel_unpack_dir in \
zip(remote_sources_targets, snapshots, filespecs, unpack_dirs):
synthetic_target = self.context.add_new_target(
address=Address(os.path.relpath(self.workdir, get_buildroot()), target.id),
target_type=target.destination_target_type,
dependencies=target.dependencies,
sources=EagerFilesetWithSpec(rel_unpack_dir, filespec, snapshot),
derived_from=target,
**target.destination_target_args
)
self.context.log.debug('synthetic_target: {}'.format(synthetic_target))
for dependent in self.context.build_graph.dependents_of(target.address):
self.context.build_graph.inject_dependency(dependent, synthetic_target.address) |
def add_volume_bricks(name, bricks):
'''
Add brick(s) to an existing volume
name
Volume name
bricks
List of bricks to add to the volume
CLI Example:
.. code-block:: bash
salt '*' glusterfs.add_volume_bricks <volume> <bricks>
'''
volinfo = info()
if name not in volinfo:
log.error('Volume %s does not exist, cannot add bricks', name)
return False
new_bricks = []
cmd = 'volume add-brick {0}'.format(name)
if isinstance(bricks, six.string_types):
bricks = [bricks]
volume_bricks = [x['path'] for x in volinfo[name]['bricks'].values()]
for brick in bricks:
if brick in volume_bricks:
log.debug(
'Brick %s already in volume %s...excluding from command',
brick,
name)
else:
new_bricks.append(brick)
if new_bricks:
for brick in new_bricks:
cmd += ' {0}'.format(brick)
return _gluster(cmd)
return True | Add brick(s) to an existing volume
name
Volume name
bricks
List of bricks to add to the volume
CLI Example:
.. code-block:: bash
salt '*' glusterfs.add_volume_bricks <volume> <bricks> | Below is the the instruction that describes the task:
### Input:
Add brick(s) to an existing volume
name
Volume name
bricks
List of bricks to add to the volume
CLI Example:
.. code-block:: bash
salt '*' glusterfs.add_volume_bricks <volume> <bricks>
### Response:
def add_volume_bricks(name, bricks):
'''
Add brick(s) to an existing volume
name
Volume name
bricks
List of bricks to add to the volume
CLI Example:
.. code-block:: bash
salt '*' glusterfs.add_volume_bricks <volume> <bricks>
'''
volinfo = info()
if name not in volinfo:
log.error('Volume %s does not exist, cannot add bricks', name)
return False
new_bricks = []
cmd = 'volume add-brick {0}'.format(name)
if isinstance(bricks, six.string_types):
bricks = [bricks]
volume_bricks = [x['path'] for x in volinfo[name]['bricks'].values()]
for brick in bricks:
if brick in volume_bricks:
log.debug(
'Brick %s already in volume %s...excluding from command',
brick,
name)
else:
new_bricks.append(brick)
if new_bricks:
for brick in new_bricks:
cmd += ' {0}'.format(brick)
return _gluster(cmd)
return True |
def flat_list_to_polymer(atom_list, atom_group_s=4):
"""Takes a flat list of atomic coordinates and converts it to a `Polymer`.
Parameters
----------
atom_list : [Atom]
Flat list of coordinates.
atom_group_s : int, optional
Size of atom groups.
Returns
-------
polymer : Polypeptide
`Polymer` object containing atom coords converted `Monomers`.
Raises
------
ValueError
Raised if `atom_group_s` != 4 or 5
"""
atom_labels = ['N', 'CA', 'C', 'O', 'CB']
atom_elements = ['N', 'C', 'C', 'O', 'C']
atoms_coords = [atom_list[x:x + atom_group_s]
for x in range(0, len(atom_list), atom_group_s)]
atoms = [[Atom(x[0], x[1]) for x in zip(y, atom_elements)]
for y in atoms_coords]
if atom_group_s == 5:
monomers = [Residue(OrderedDict(zip(atom_labels, x)), 'ALA')
for x in atoms]
elif atom_group_s == 4:
monomers = [Residue(OrderedDict(zip(atom_labels, x)), 'GLY')
for x in atoms]
else:
raise ValueError(
'Parameter atom_group_s must be 4 or 5 so atoms can be labeled correctly.')
polymer = Polypeptide(monomers=monomers)
return polymer | Takes a flat list of atomic coordinates and converts it to a `Polymer`.
Parameters
----------
atom_list : [Atom]
Flat list of coordinates.
atom_group_s : int, optional
Size of atom groups.
Returns
-------
polymer : Polypeptide
`Polymer` object containing atom coords converted `Monomers`.
Raises
------
ValueError
Raised if `atom_group_s` != 4 or 5 | Below is the the instruction that describes the task:
### Input:
Takes a flat list of atomic coordinates and converts it to a `Polymer`.
Parameters
----------
atom_list : [Atom]
Flat list of coordinates.
atom_group_s : int, optional
Size of atom groups.
Returns
-------
polymer : Polypeptide
`Polymer` object containing atom coords converted `Monomers`.
Raises
------
ValueError
Raised if `atom_group_s` != 4 or 5
### Response:
def flat_list_to_polymer(atom_list, atom_group_s=4):
"""Takes a flat list of atomic coordinates and converts it to a `Polymer`.
Parameters
----------
atom_list : [Atom]
Flat list of coordinates.
atom_group_s : int, optional
Size of atom groups.
Returns
-------
polymer : Polypeptide
`Polymer` object containing atom coords converted `Monomers`.
Raises
------
ValueError
Raised if `atom_group_s` != 4 or 5
"""
atom_labels = ['N', 'CA', 'C', 'O', 'CB']
atom_elements = ['N', 'C', 'C', 'O', 'C']
atoms_coords = [atom_list[x:x + atom_group_s]
for x in range(0, len(atom_list), atom_group_s)]
atoms = [[Atom(x[0], x[1]) for x in zip(y, atom_elements)]
for y in atoms_coords]
if atom_group_s == 5:
monomers = [Residue(OrderedDict(zip(atom_labels, x)), 'ALA')
for x in atoms]
elif atom_group_s == 4:
monomers = [Residue(OrderedDict(zip(atom_labels, x)), 'GLY')
for x in atoms]
else:
raise ValueError(
'Parameter atom_group_s must be 4 or 5 so atoms can be labeled correctly.')
polymer = Polypeptide(monomers=monomers)
return polymer |
def open_zarr(store, group=None, synchronizer=None, chunks='auto',
decode_cf=True, mask_and_scale=True, decode_times=True,
concat_characters=True, decode_coords=True,
drop_variables=None, consolidated=False,
overwrite_encoded_chunks=False, **kwargs):
"""Load and decode a dataset from a Zarr store.
.. note:: Experimental
The Zarr backend is new and experimental. Please report any
unexpected behavior via github issues.
The `store` object should be a valid store for a Zarr group. `store`
variables must contain dimension metadata encoded in the
`_ARRAY_DIMENSIONS` attribute.
Parameters
----------
store : MutableMapping or str
A MutableMapping where a Zarr Group has been stored or a path to a
directory in file system where a Zarr DirectoryStore has been stored.
synchronizer : object, optional
Array synchronizer provided to zarr
group : str, obtional
Group path. (a.k.a. `path` in zarr terminology.)
chunks : int or dict or tuple or {None, 'auto'}, optional
Chunk sizes along each dimension, e.g., ``5`` or
``{'x': 5, 'y': 5}``. If `chunks='auto'`, dask chunks are created
based on the variable's zarr chunks. If `chunks=None`, zarr array
data will lazily convert to numpy arrays upon access. This accepts
all the chunk specifications as Dask does.
overwrite_encoded_chunks: bool, optional
Whether to drop the zarr chunks encoded for each variable when a
dataset is loaded with specified chunk sizes (default: False)
decode_cf : bool, optional
Whether to decode these variables, assuming they were saved according
to CF conventions.
mask_and_scale : bool, optional
If True, replace array values equal to `_FillValue` with NA and scale
values according to the formula `original_values * scale_factor +
add_offset`, where `_FillValue`, `scale_factor` and `add_offset` are
taken from variable attributes (if they exist). If the `_FillValue` or
`missing_value` attribute contains multiple values a warning will be
issued and all array values matching one of the multiple values will
be replaced by NA.
decode_times : bool, optional
If True, decode times encoded in the standard NetCDF datetime format
into datetime objects. Otherwise, leave them encoded as numbers.
concat_characters : bool, optional
If True, concatenate along the last dimension of character arrays to
form string arrays. Dimensions will only be concatenated over (and
removed) if they have no corresponding variable and if they are only
used as the last dimension of character arrays.
decode_coords : bool, optional
If True, decode the 'coordinates' attribute to identify coordinates in
the resulting dataset.
drop_variables : string or iterable, optional
A variable or list of variables to exclude from being parsed from the
dataset. This may be useful to drop variables with problems or
inconsistent values.
consolidated : bool, optional
Whether to open the store using zarr's consolidated metadata
capability. Only works for stores that have already been consolidated.
Returns
-------
dataset : Dataset
The newly created dataset.
See Also
--------
open_dataset
References
----------
http://zarr.readthedocs.io/
"""
if 'auto_chunk' in kwargs:
auto_chunk = kwargs.pop('auto_chunk')
if auto_chunk:
chunks = 'auto' # maintain backwards compatibility
else:
chunks = None
warnings.warn("auto_chunk is deprecated. Use chunks='auto' instead.",
FutureWarning, stacklevel=2)
if kwargs:
raise TypeError("open_zarr() got unexpected keyword arguments " +
",".join(kwargs.keys()))
if not isinstance(chunks, (int, dict)):
if chunks != 'auto' and chunks is not None:
raise ValueError("chunks must be an int, dict, 'auto', or None. "
"Instead found %s. " % chunks)
if not decode_cf:
mask_and_scale = False
decode_times = False
concat_characters = False
decode_coords = False
def maybe_decode_store(store, lock=False):
ds = conventions.decode_cf(
store, mask_and_scale=mask_and_scale, decode_times=decode_times,
concat_characters=concat_characters, decode_coords=decode_coords,
drop_variables=drop_variables)
# TODO: this is where we would apply caching
return ds
# Zarr supports a wide range of access modes, but for now xarray either
# reads or writes from a store, never both. For open_zarr, we only read
mode = 'r'
zarr_store = ZarrStore.open_group(store, mode=mode,
synchronizer=synchronizer,
group=group, consolidated=consolidated)
ds = maybe_decode_store(zarr_store)
# auto chunking needs to be here and not in ZarrStore because variable
# chunks do not survive decode_cf
# return trivial case
if not chunks:
return ds
# adapted from Dataset.Chunk()
if isinstance(chunks, int):
chunks = dict.fromkeys(ds.dims, chunks)
if isinstance(chunks, tuple) and len(chunks) == len(ds.dims):
chunks = dict(zip(ds.dims, chunks))
def get_chunk(name, var, chunks):
chunk_spec = dict(zip(var.dims, var.encoding.get('chunks')))
# Coordinate labels aren't chunked
if var.ndim == 1 and var.dims[0] == name:
return chunk_spec
if chunks == 'auto':
return chunk_spec
for dim in var.dims:
if dim in chunks:
spec = chunks[dim]
if isinstance(spec, int):
spec = (spec,)
if isinstance(spec, (tuple, list)) and chunk_spec[dim]:
if any(s % chunk_spec[dim] for s in spec):
warnings.warn("Specified Dask chunks %r would "
"separate Zarr chunk shape %r for "
"dimension %r. This significantly "
"degrades performance. Consider "
"rechunking after loading instead."
% (chunks[dim], chunk_spec[dim], dim),
stacklevel=2)
chunk_spec[dim] = chunks[dim]
return chunk_spec
def maybe_chunk(name, var, chunks):
from dask.base import tokenize
chunk_spec = get_chunk(name, var, chunks)
if (var.ndim > 0) and (chunk_spec is not None):
# does this cause any data to be read?
token2 = tokenize(name, var._data)
name2 = 'zarr-%s' % token2
var = var.chunk(chunk_spec, name=name2, lock=None)
if overwrite_encoded_chunks and var.chunks is not None:
var.encoding['chunks'] = tuple(x[0] for x in var.chunks)
return var
else:
return var
variables = OrderedDict([(k, maybe_chunk(k, v, chunks))
for k, v in ds.variables.items()])
return ds._replace_vars_and_dims(variables) | Load and decode a dataset from a Zarr store.
.. note:: Experimental
The Zarr backend is new and experimental. Please report any
unexpected behavior via github issues.
The `store` object should be a valid store for a Zarr group. `store`
variables must contain dimension metadata encoded in the
`_ARRAY_DIMENSIONS` attribute.
Parameters
----------
store : MutableMapping or str
A MutableMapping where a Zarr Group has been stored or a path to a
directory in file system where a Zarr DirectoryStore has been stored.
synchronizer : object, optional
Array synchronizer provided to zarr
group : str, obtional
Group path. (a.k.a. `path` in zarr terminology.)
chunks : int or dict or tuple or {None, 'auto'}, optional
Chunk sizes along each dimension, e.g., ``5`` or
``{'x': 5, 'y': 5}``. If `chunks='auto'`, dask chunks are created
based on the variable's zarr chunks. If `chunks=None`, zarr array
data will lazily convert to numpy arrays upon access. This accepts
all the chunk specifications as Dask does.
overwrite_encoded_chunks: bool, optional
Whether to drop the zarr chunks encoded for each variable when a
dataset is loaded with specified chunk sizes (default: False)
decode_cf : bool, optional
Whether to decode these variables, assuming they were saved according
to CF conventions.
mask_and_scale : bool, optional
If True, replace array values equal to `_FillValue` with NA and scale
values according to the formula `original_values * scale_factor +
add_offset`, where `_FillValue`, `scale_factor` and `add_offset` are
taken from variable attributes (if they exist). If the `_FillValue` or
`missing_value` attribute contains multiple values a warning will be
issued and all array values matching one of the multiple values will
be replaced by NA.
decode_times : bool, optional
If True, decode times encoded in the standard NetCDF datetime format
into datetime objects. Otherwise, leave them encoded as numbers.
concat_characters : bool, optional
If True, concatenate along the last dimension of character arrays to
form string arrays. Dimensions will only be concatenated over (and
removed) if they have no corresponding variable and if they are only
used as the last dimension of character arrays.
decode_coords : bool, optional
If True, decode the 'coordinates' attribute to identify coordinates in
the resulting dataset.
drop_variables : string or iterable, optional
A variable or list of variables to exclude from being parsed from the
dataset. This may be useful to drop variables with problems or
inconsistent values.
consolidated : bool, optional
Whether to open the store using zarr's consolidated metadata
capability. Only works for stores that have already been consolidated.
Returns
-------
dataset : Dataset
The newly created dataset.
See Also
--------
open_dataset
References
----------
http://zarr.readthedocs.io/ | Below is the the instruction that describes the task:
### Input:
Load and decode a dataset from a Zarr store.
.. note:: Experimental
The Zarr backend is new and experimental. Please report any
unexpected behavior via github issues.
The `store` object should be a valid store for a Zarr group. `store`
variables must contain dimension metadata encoded in the
`_ARRAY_DIMENSIONS` attribute.
Parameters
----------
store : MutableMapping or str
A MutableMapping where a Zarr Group has been stored or a path to a
directory in file system where a Zarr DirectoryStore has been stored.
synchronizer : object, optional
Array synchronizer provided to zarr
group : str, obtional
Group path. (a.k.a. `path` in zarr terminology.)
chunks : int or dict or tuple or {None, 'auto'}, optional
Chunk sizes along each dimension, e.g., ``5`` or
``{'x': 5, 'y': 5}``. If `chunks='auto'`, dask chunks are created
based on the variable's zarr chunks. If `chunks=None`, zarr array
data will lazily convert to numpy arrays upon access. This accepts
all the chunk specifications as Dask does.
overwrite_encoded_chunks: bool, optional
Whether to drop the zarr chunks encoded for each variable when a
dataset is loaded with specified chunk sizes (default: False)
decode_cf : bool, optional
Whether to decode these variables, assuming they were saved according
to CF conventions.
mask_and_scale : bool, optional
If True, replace array values equal to `_FillValue` with NA and scale
values according to the formula `original_values * scale_factor +
add_offset`, where `_FillValue`, `scale_factor` and `add_offset` are
taken from variable attributes (if they exist). If the `_FillValue` or
`missing_value` attribute contains multiple values a warning will be
issued and all array values matching one of the multiple values will
be replaced by NA.
decode_times : bool, optional
If True, decode times encoded in the standard NetCDF datetime format
into datetime objects. Otherwise, leave them encoded as numbers.
concat_characters : bool, optional
If True, concatenate along the last dimension of character arrays to
form string arrays. Dimensions will only be concatenated over (and
removed) if they have no corresponding variable and if they are only
used as the last dimension of character arrays.
decode_coords : bool, optional
If True, decode the 'coordinates' attribute to identify coordinates in
the resulting dataset.
drop_variables : string or iterable, optional
A variable or list of variables to exclude from being parsed from the
dataset. This may be useful to drop variables with problems or
inconsistent values.
consolidated : bool, optional
Whether to open the store using zarr's consolidated metadata
capability. Only works for stores that have already been consolidated.
Returns
-------
dataset : Dataset
The newly created dataset.
See Also
--------
open_dataset
References
----------
http://zarr.readthedocs.io/
### Response:
def open_zarr(store, group=None, synchronizer=None, chunks='auto',
decode_cf=True, mask_and_scale=True, decode_times=True,
concat_characters=True, decode_coords=True,
drop_variables=None, consolidated=False,
overwrite_encoded_chunks=False, **kwargs):
"""Load and decode a dataset from a Zarr store.
.. note:: Experimental
The Zarr backend is new and experimental. Please report any
unexpected behavior via github issues.
The `store` object should be a valid store for a Zarr group. `store`
variables must contain dimension metadata encoded in the
`_ARRAY_DIMENSIONS` attribute.
Parameters
----------
store : MutableMapping or str
A MutableMapping where a Zarr Group has been stored or a path to a
directory in file system where a Zarr DirectoryStore has been stored.
synchronizer : object, optional
Array synchronizer provided to zarr
group : str, obtional
Group path. (a.k.a. `path` in zarr terminology.)
chunks : int or dict or tuple or {None, 'auto'}, optional
Chunk sizes along each dimension, e.g., ``5`` or
``{'x': 5, 'y': 5}``. If `chunks='auto'`, dask chunks are created
based on the variable's zarr chunks. If `chunks=None`, zarr array
data will lazily convert to numpy arrays upon access. This accepts
all the chunk specifications as Dask does.
overwrite_encoded_chunks: bool, optional
Whether to drop the zarr chunks encoded for each variable when a
dataset is loaded with specified chunk sizes (default: False)
decode_cf : bool, optional
Whether to decode these variables, assuming they were saved according
to CF conventions.
mask_and_scale : bool, optional
If True, replace array values equal to `_FillValue` with NA and scale
values according to the formula `original_values * scale_factor +
add_offset`, where `_FillValue`, `scale_factor` and `add_offset` are
taken from variable attributes (if they exist). If the `_FillValue` or
`missing_value` attribute contains multiple values a warning will be
issued and all array values matching one of the multiple values will
be replaced by NA.
decode_times : bool, optional
If True, decode times encoded in the standard NetCDF datetime format
into datetime objects. Otherwise, leave them encoded as numbers.
concat_characters : bool, optional
If True, concatenate along the last dimension of character arrays to
form string arrays. Dimensions will only be concatenated over (and
removed) if they have no corresponding variable and if they are only
used as the last dimension of character arrays.
decode_coords : bool, optional
If True, decode the 'coordinates' attribute to identify coordinates in
the resulting dataset.
drop_variables : string or iterable, optional
A variable or list of variables to exclude from being parsed from the
dataset. This may be useful to drop variables with problems or
inconsistent values.
consolidated : bool, optional
Whether to open the store using zarr's consolidated metadata
capability. Only works for stores that have already been consolidated.
Returns
-------
dataset : Dataset
The newly created dataset.
See Also
--------
open_dataset
References
----------
http://zarr.readthedocs.io/
"""
if 'auto_chunk' in kwargs:
auto_chunk = kwargs.pop('auto_chunk')
if auto_chunk:
chunks = 'auto' # maintain backwards compatibility
else:
chunks = None
warnings.warn("auto_chunk is deprecated. Use chunks='auto' instead.",
FutureWarning, stacklevel=2)
if kwargs:
raise TypeError("open_zarr() got unexpected keyword arguments " +
",".join(kwargs.keys()))
if not isinstance(chunks, (int, dict)):
if chunks != 'auto' and chunks is not None:
raise ValueError("chunks must be an int, dict, 'auto', or None. "
"Instead found %s. " % chunks)
if not decode_cf:
mask_and_scale = False
decode_times = False
concat_characters = False
decode_coords = False
def maybe_decode_store(store, lock=False):
ds = conventions.decode_cf(
store, mask_and_scale=mask_and_scale, decode_times=decode_times,
concat_characters=concat_characters, decode_coords=decode_coords,
drop_variables=drop_variables)
# TODO: this is where we would apply caching
return ds
# Zarr supports a wide range of access modes, but for now xarray either
# reads or writes from a store, never both. For open_zarr, we only read
mode = 'r'
zarr_store = ZarrStore.open_group(store, mode=mode,
synchronizer=synchronizer,
group=group, consolidated=consolidated)
ds = maybe_decode_store(zarr_store)
# auto chunking needs to be here and not in ZarrStore because variable
# chunks do not survive decode_cf
# return trivial case
if not chunks:
return ds
# adapted from Dataset.Chunk()
if isinstance(chunks, int):
chunks = dict.fromkeys(ds.dims, chunks)
if isinstance(chunks, tuple) and len(chunks) == len(ds.dims):
chunks = dict(zip(ds.dims, chunks))
def get_chunk(name, var, chunks):
chunk_spec = dict(zip(var.dims, var.encoding.get('chunks')))
# Coordinate labels aren't chunked
if var.ndim == 1 and var.dims[0] == name:
return chunk_spec
if chunks == 'auto':
return chunk_spec
for dim in var.dims:
if dim in chunks:
spec = chunks[dim]
if isinstance(spec, int):
spec = (spec,)
if isinstance(spec, (tuple, list)) and chunk_spec[dim]:
if any(s % chunk_spec[dim] for s in spec):
warnings.warn("Specified Dask chunks %r would "
"separate Zarr chunk shape %r for "
"dimension %r. This significantly "
"degrades performance. Consider "
"rechunking after loading instead."
% (chunks[dim], chunk_spec[dim], dim),
stacklevel=2)
chunk_spec[dim] = chunks[dim]
return chunk_spec
def maybe_chunk(name, var, chunks):
from dask.base import tokenize
chunk_spec = get_chunk(name, var, chunks)
if (var.ndim > 0) and (chunk_spec is not None):
# does this cause any data to be read?
token2 = tokenize(name, var._data)
name2 = 'zarr-%s' % token2
var = var.chunk(chunk_spec, name=name2, lock=None)
if overwrite_encoded_chunks and var.chunks is not None:
var.encoding['chunks'] = tuple(x[0] for x in var.chunks)
return var
else:
return var
variables = OrderedDict([(k, maybe_chunk(k, v, chunks))
for k, v in ds.variables.items()])
return ds._replace_vars_and_dims(variables) |
def module_can_run_parallel(test_module: unittest.TestSuite) -> bool:
"""
Checks if a given module of tests can be run in parallel or not
:param test_module: the module to run
:return: True if the module can be run on parallel, False otherwise
"""
for test_class in test_module:
# if the test is already failed, we just don't filter it
# and let the test runner deal with it later.
if hasattr(unittest.loader, '_FailedTest'): # import failure in python 3.4.5+
# noinspection PyProtectedMember
if isinstance(test_class, unittest.loader._FailedTest):
continue
if not isinstance(test_class, collections.Iterable): # likely an import failure in python 3.4.4-
# before python 3.4.5, test import failures were not serializable.
# We are unable to be sure that this is a module import failure, but it very likely is
# if this is the case, we'll just run this locally and see
raise TestClassNotIterable()
for test_case in test_class:
return not getattr(sys.modules[test_case.__module__], "__no_parallel__", False) | Checks if a given module of tests can be run in parallel or not
:param test_module: the module to run
:return: True if the module can be run on parallel, False otherwise | Below is the the instruction that describes the task:
### Input:
Checks if a given module of tests can be run in parallel or not
:param test_module: the module to run
:return: True if the module can be run on parallel, False otherwise
### Response:
def module_can_run_parallel(test_module: unittest.TestSuite) -> bool:
"""
Checks if a given module of tests can be run in parallel or not
:param test_module: the module to run
:return: True if the module can be run on parallel, False otherwise
"""
for test_class in test_module:
# if the test is already failed, we just don't filter it
# and let the test runner deal with it later.
if hasattr(unittest.loader, '_FailedTest'): # import failure in python 3.4.5+
# noinspection PyProtectedMember
if isinstance(test_class, unittest.loader._FailedTest):
continue
if not isinstance(test_class, collections.Iterable): # likely an import failure in python 3.4.4-
# before python 3.4.5, test import failures were not serializable.
# We are unable to be sure that this is a module import failure, but it very likely is
# if this is the case, we'll just run this locally and see
raise TestClassNotIterable()
for test_case in test_class:
return not getattr(sys.modules[test_case.__module__], "__no_parallel__", False) |
def _generate_struct(self, struct_type, extra_parameters=None, nameOverride=None):
"""
Emits a JSDoc @typedef for a struct.
"""
extra_parameters = extra_parameters if extra_parameters is not None else []
self._emit_jsdoc_header(struct_type.doc)
self.emit(
' * @typedef {Object} %s' % (
nameOverride if nameOverride else fmt_type_name(struct_type)
)
)
# Some structs can explicitly list their subtypes. These structs
# have a .tag field that indicate which subtype they are.
if struct_type.is_member_of_enumerated_subtypes_tree():
if struct_type.has_enumerated_subtypes():
# This struct is the parent to multiple subtypes.
# Determine all of the possible values of the .tag
# property.
tag_values = []
for tags, _ in struct_type.get_all_subtypes_with_tags():
for tag in tags:
tag_values.append('"%s"' % tag)
jsdoc_tag_union = fmt_jsdoc_union(tag_values)
txt = '@property {%s} .tag - Tag identifying the subtype variant.' % \
jsdoc_tag_union
self.emit_wrapped_text(txt)
else:
# This struct is a particular subtype. Find the applicable
# .tag value from the parent type, which may be an
# arbitrary number of steps up the inheritance hierarchy.
parent = struct_type.parent_type
while not parent.has_enumerated_subtypes():
parent = parent.parent_type
# parent now contains the closest parent type in the
# inheritance hierarchy that has enumerated subtypes.
# Determine which subtype this is.
for subtype in parent.get_enumerated_subtypes():
if subtype.data_type == struct_type:
txt = '@property {\'%s\'} [.tag] - Tag identifying ' \
'this subtype variant. This field is only ' \
'present when needed to discriminate ' \
'between multiple possible subtypes.' % \
subtype.name
self.emit_wrapped_text(txt)
break
for param_name, param_type, param_docstring in extra_parameters:
param_docstring = ' - %s' % param_docstring if param_docstring else ''
self.emit_wrapped_text(
'@property {%s} %s%s' % (
param_type,
param_name,
param_docstring,
),
prefix=' * ',
)
# NOTE: JSDoc @typedef does not support inheritance. Using @class would be inappropriate,
# since these are not nominal types backed by a constructor. Thus, we emit all_fields,
# which includes fields on parent types.
for field in struct_type.all_fields:
field_doc = ' - ' + field.doc if field.doc else ''
field_type, nullable, _ = unwrap(field.data_type)
field_js_type = fmt_type(field_type)
# Translate nullable types into optional properties.
field_name = '[' + field.name + ']' if nullable else field.name
self.emit_wrapped_text(
'@property {%s} %s%s' % (
field_js_type,
field_name,
self.process_doc(field_doc, self._docf),
),
prefix=' * ',
)
self.emit(' */') | Emits a JSDoc @typedef for a struct. | Below is the the instruction that describes the task:
### Input:
Emits a JSDoc @typedef for a struct.
### Response:
def _generate_struct(self, struct_type, extra_parameters=None, nameOverride=None):
"""
Emits a JSDoc @typedef for a struct.
"""
extra_parameters = extra_parameters if extra_parameters is not None else []
self._emit_jsdoc_header(struct_type.doc)
self.emit(
' * @typedef {Object} %s' % (
nameOverride if nameOverride else fmt_type_name(struct_type)
)
)
# Some structs can explicitly list their subtypes. These structs
# have a .tag field that indicate which subtype they are.
if struct_type.is_member_of_enumerated_subtypes_tree():
if struct_type.has_enumerated_subtypes():
# This struct is the parent to multiple subtypes.
# Determine all of the possible values of the .tag
# property.
tag_values = []
for tags, _ in struct_type.get_all_subtypes_with_tags():
for tag in tags:
tag_values.append('"%s"' % tag)
jsdoc_tag_union = fmt_jsdoc_union(tag_values)
txt = '@property {%s} .tag - Tag identifying the subtype variant.' % \
jsdoc_tag_union
self.emit_wrapped_text(txt)
else:
# This struct is a particular subtype. Find the applicable
# .tag value from the parent type, which may be an
# arbitrary number of steps up the inheritance hierarchy.
parent = struct_type.parent_type
while not parent.has_enumerated_subtypes():
parent = parent.parent_type
# parent now contains the closest parent type in the
# inheritance hierarchy that has enumerated subtypes.
# Determine which subtype this is.
for subtype in parent.get_enumerated_subtypes():
if subtype.data_type == struct_type:
txt = '@property {\'%s\'} [.tag] - Tag identifying ' \
'this subtype variant. This field is only ' \
'present when needed to discriminate ' \
'between multiple possible subtypes.' % \
subtype.name
self.emit_wrapped_text(txt)
break
for param_name, param_type, param_docstring in extra_parameters:
param_docstring = ' - %s' % param_docstring if param_docstring else ''
self.emit_wrapped_text(
'@property {%s} %s%s' % (
param_type,
param_name,
param_docstring,
),
prefix=' * ',
)
# NOTE: JSDoc @typedef does not support inheritance. Using @class would be inappropriate,
# since these are not nominal types backed by a constructor. Thus, we emit all_fields,
# which includes fields on parent types.
for field in struct_type.all_fields:
field_doc = ' - ' + field.doc if field.doc else ''
field_type, nullable, _ = unwrap(field.data_type)
field_js_type = fmt_type(field_type)
# Translate nullable types into optional properties.
field_name = '[' + field.name + ']' if nullable else field.name
self.emit_wrapped_text(
'@property {%s} %s%s' % (
field_js_type,
field_name,
self.process_doc(field_doc, self._docf),
),
prefix=' * ',
)
self.emit(' */') |
def add_ms1_quant_from_top3_mzidtsv(proteins, psms, headerfields, protcol):
"""Collects PSMs with the highes precursor quant values,
adds sum of the top 3 of these to a protein table"""
if not protcol:
protcol = mzidtsvdata.HEADER_MASTER_PROT
top_ms1_psms = generate_top_psms(psms, protcol)
for protein in proteins:
prot_acc = protein[prottabledata.HEADER_PROTEIN]
prec_area = calculate_protein_precursor_quant(top_ms1_psms, prot_acc)
outprotein = {k: v for k, v in protein.items()}
outprotein[headerfields['precursorquant'][
prottabledata.HEADER_AREA][None]] = str(prec_area)
yield outprotein | Collects PSMs with the highes precursor quant values,
adds sum of the top 3 of these to a protein table | Below is the the instruction that describes the task:
### Input:
Collects PSMs with the highes precursor quant values,
adds sum of the top 3 of these to a protein table
### Response:
def add_ms1_quant_from_top3_mzidtsv(proteins, psms, headerfields, protcol):
"""Collects PSMs with the highes precursor quant values,
adds sum of the top 3 of these to a protein table"""
if not protcol:
protcol = mzidtsvdata.HEADER_MASTER_PROT
top_ms1_psms = generate_top_psms(psms, protcol)
for protein in proteins:
prot_acc = protein[prottabledata.HEADER_PROTEIN]
prec_area = calculate_protein_precursor_quant(top_ms1_psms, prot_acc)
outprotein = {k: v for k, v in protein.items()}
outprotein[headerfields['precursorquant'][
prottabledata.HEADER_AREA][None]] = str(prec_area)
yield outprotein |
def print_status(self, repo):
"""Print status
"""
print(" {0}{1}{2}".format(repo, " " * (19 - len(repo)), self.st)) | Print status | Below is the the instruction that describes the task:
### Input:
Print status
### Response:
def print_status(self, repo):
"""Print status
"""
print(" {0}{1}{2}".format(repo, " " * (19 - len(repo)), self.st)) |
def analyze(self, output_folder=".", auto_remove=False):
"""
:type auto_remove: boolean
:param boolean auto_remove: auto remove previous files in analyze folder
"""
if auto_remove:
try:
shutil.rmtree(output_folder)
except:
pass
try:
mkdir(output_folder)
except:
pass
tokens = [token for sublist in self.sentences for token in sublist]
df = pd.DataFrame(tokens)
log = u""
log += u"Sentences : {}\n".format(len(self.sentences))
n = df.shape[1]
log += self._analyze_first_token(df, 0, output_folder)
for i in range(1, n):
log += self._analyze_field(df, i, output_folder)
print(log)
stat_file = join(output_folder, "stats.txt")
write(stat_file, log) | :type auto_remove: boolean
:param boolean auto_remove: auto remove previous files in analyze folder | Below is the the instruction that describes the task:
### Input:
:type auto_remove: boolean
:param boolean auto_remove: auto remove previous files in analyze folder
### Response:
def analyze(self, output_folder=".", auto_remove=False):
"""
:type auto_remove: boolean
:param boolean auto_remove: auto remove previous files in analyze folder
"""
if auto_remove:
try:
shutil.rmtree(output_folder)
except:
pass
try:
mkdir(output_folder)
except:
pass
tokens = [token for sublist in self.sentences for token in sublist]
df = pd.DataFrame(tokens)
log = u""
log += u"Sentences : {}\n".format(len(self.sentences))
n = df.shape[1]
log += self._analyze_first_token(df, 0, output_folder)
for i in range(1, n):
log += self._analyze_field(df, i, output_folder)
print(log)
stat_file = join(output_folder, "stats.txt")
write(stat_file, log) |
def enable_gtk3(self, app=None):
"""Enable event loop integration with Gtk3 (gir bindings).
Parameters
----------
app : ignored
Ignored, it's only a placeholder to keep the call signature of all
gui activation methods consistent, which simplifies the logic of
supporting magics.
Notes
-----
This methods sets the PyOS_InputHook for Gtk3, which allows
the Gtk3 to integrate with terminal based applications like
IPython.
"""
from pydev_ipython.inputhookgtk3 import create_inputhook_gtk3
self.set_inputhook(create_inputhook_gtk3(self._stdin_file))
self._current_gui = GUI_GTK | Enable event loop integration with Gtk3 (gir bindings).
Parameters
----------
app : ignored
Ignored, it's only a placeholder to keep the call signature of all
gui activation methods consistent, which simplifies the logic of
supporting magics.
Notes
-----
This methods sets the PyOS_InputHook for Gtk3, which allows
the Gtk3 to integrate with terminal based applications like
IPython. | Below is the the instruction that describes the task:
### Input:
Enable event loop integration with Gtk3 (gir bindings).
Parameters
----------
app : ignored
Ignored, it's only a placeholder to keep the call signature of all
gui activation methods consistent, which simplifies the logic of
supporting magics.
Notes
-----
This methods sets the PyOS_InputHook for Gtk3, which allows
the Gtk3 to integrate with terminal based applications like
IPython.
### Response:
def enable_gtk3(self, app=None):
"""Enable event loop integration with Gtk3 (gir bindings).
Parameters
----------
app : ignored
Ignored, it's only a placeholder to keep the call signature of all
gui activation methods consistent, which simplifies the logic of
supporting magics.
Notes
-----
This methods sets the PyOS_InputHook for Gtk3, which allows
the Gtk3 to integrate with terminal based applications like
IPython.
"""
from pydev_ipython.inputhookgtk3 import create_inputhook_gtk3
self.set_inputhook(create_inputhook_gtk3(self._stdin_file))
self._current_gui = GUI_GTK |
def _clear(self, pipe=None):
"""Helper for clear operations.
:param pipe: Redis pipe in case update is performed as a part
of transaction.
:type pipe: :class:`redis.client.StrictPipeline` or
:class:`redis.client.StrictRedis`
"""
redis = self.redis if pipe is None else pipe
redis.delete(self.key) | Helper for clear operations.
:param pipe: Redis pipe in case update is performed as a part
of transaction.
:type pipe: :class:`redis.client.StrictPipeline` or
:class:`redis.client.StrictRedis` | Below is the the instruction that describes the task:
### Input:
Helper for clear operations.
:param pipe: Redis pipe in case update is performed as a part
of transaction.
:type pipe: :class:`redis.client.StrictPipeline` or
:class:`redis.client.StrictRedis`
### Response:
def _clear(self, pipe=None):
"""Helper for clear operations.
:param pipe: Redis pipe in case update is performed as a part
of transaction.
:type pipe: :class:`redis.client.StrictPipeline` or
:class:`redis.client.StrictRedis`
"""
redis = self.redis if pipe is None else pipe
redis.delete(self.key) |
def send_command_ack(self, device_id, action):
"""Send command, wait for gateway to repond with acknowledgment."""
# serialize commands
yield from self._ready_to_send.acquire()
acknowledgement = None
try:
self._command_ack.clear()
self.send_command(device_id, action)
log.debug('waiting for acknowledgement')
try:
yield from asyncio.wait_for(self._command_ack.wait(),
TIMEOUT.seconds, loop=self.loop)
log.debug('packet acknowledged')
except concurrent.futures._base.TimeoutError:
acknowledgement = {'ok': False, 'message': 'timeout'}
log.warning('acknowledge timeout')
else:
acknowledgement = self._last_ack.get('ok', False)
finally:
# allow next command
self._ready_to_send.release()
return acknowledgement | Send command, wait for gateway to repond with acknowledgment. | Below is the the instruction that describes the task:
### Input:
Send command, wait for gateway to repond with acknowledgment.
### Response:
def send_command_ack(self, device_id, action):
"""Send command, wait for gateway to repond with acknowledgment."""
# serialize commands
yield from self._ready_to_send.acquire()
acknowledgement = None
try:
self._command_ack.clear()
self.send_command(device_id, action)
log.debug('waiting for acknowledgement')
try:
yield from asyncio.wait_for(self._command_ack.wait(),
TIMEOUT.seconds, loop=self.loop)
log.debug('packet acknowledged')
except concurrent.futures._base.TimeoutError:
acknowledgement = {'ok': False, 'message': 'timeout'}
log.warning('acknowledge timeout')
else:
acknowledgement = self._last_ack.get('ok', False)
finally:
# allow next command
self._ready_to_send.release()
return acknowledgement |
def _stripe_object_to_refunds(cls, target_cls, data, charge):
"""
Retrieves Refunds for a charge
:param target_cls: The target class to instantiate per invoice item.
:type target_cls: ``Refund``
:param data: The data dictionary received from the Stripe API.
:type data: dict
:param charge: The charge object that refunds are for.
:type invoice: ``djstripe.models.Refund``
:return:
"""
refunds = data.get("refunds")
if not refunds:
return []
refund_objs = []
for refund_data in refunds.get("data", []):
item, _ = target_cls._get_or_create_from_stripe_object(refund_data, refetch=False)
refund_objs.append(item)
return refund_objs | Retrieves Refunds for a charge
:param target_cls: The target class to instantiate per invoice item.
:type target_cls: ``Refund``
:param data: The data dictionary received from the Stripe API.
:type data: dict
:param charge: The charge object that refunds are for.
:type invoice: ``djstripe.models.Refund``
:return: | Below is the the instruction that describes the task:
### Input:
Retrieves Refunds for a charge
:param target_cls: The target class to instantiate per invoice item.
:type target_cls: ``Refund``
:param data: The data dictionary received from the Stripe API.
:type data: dict
:param charge: The charge object that refunds are for.
:type invoice: ``djstripe.models.Refund``
:return:
### Response:
def _stripe_object_to_refunds(cls, target_cls, data, charge):
"""
Retrieves Refunds for a charge
:param target_cls: The target class to instantiate per invoice item.
:type target_cls: ``Refund``
:param data: The data dictionary received from the Stripe API.
:type data: dict
:param charge: The charge object that refunds are for.
:type invoice: ``djstripe.models.Refund``
:return:
"""
refunds = data.get("refunds")
if not refunds:
return []
refund_objs = []
for refund_data in refunds.get("data", []):
item, _ = target_cls._get_or_create_from_stripe_object(refund_data, refetch=False)
refund_objs.append(item)
return refund_objs |
def is_valid_catalog(catalog, validator=None):
"""Valida que un archivo `data.json` cumpla con el schema definido.
Chequea que el data.json tiene todos los campos obligatorios y que
tanto los campos obligatorios como los opcionales siguen la estructura
definida en el schema.
Args:
catalog (str o dict): Catálogo (dict, JSON o XLSX) a ser validado.
Returns:
bool: True si el data.json cumple con el schema, sino False.
"""
catalog = readers.read_catalog(catalog)
if not validator:
if hasattr(catalog, "validator"):
validator = catalog.validator
else:
validator = create_validator()
jsonschema_res = validator.is_valid(catalog)
custom_errors = iter_custom_errors(catalog)
return jsonschema_res and len(list(custom_errors)) == 0 | Valida que un archivo `data.json` cumpla con el schema definido.
Chequea que el data.json tiene todos los campos obligatorios y que
tanto los campos obligatorios como los opcionales siguen la estructura
definida en el schema.
Args:
catalog (str o dict): Catálogo (dict, JSON o XLSX) a ser validado.
Returns:
bool: True si el data.json cumple con el schema, sino False. | Below is the the instruction that describes the task:
### Input:
Valida que un archivo `data.json` cumpla con el schema definido.
Chequea que el data.json tiene todos los campos obligatorios y que
tanto los campos obligatorios como los opcionales siguen la estructura
definida en el schema.
Args:
catalog (str o dict): Catálogo (dict, JSON o XLSX) a ser validado.
Returns:
bool: True si el data.json cumple con el schema, sino False.
### Response:
def is_valid_catalog(catalog, validator=None):
"""Valida que un archivo `data.json` cumpla con el schema definido.
Chequea que el data.json tiene todos los campos obligatorios y que
tanto los campos obligatorios como los opcionales siguen la estructura
definida en el schema.
Args:
catalog (str o dict): Catálogo (dict, JSON o XLSX) a ser validado.
Returns:
bool: True si el data.json cumple con el schema, sino False.
"""
catalog = readers.read_catalog(catalog)
if not validator:
if hasattr(catalog, "validator"):
validator = catalog.validator
else:
validator = create_validator()
jsonschema_res = validator.is_valid(catalog)
custom_errors = iter_custom_errors(catalog)
return jsonschema_res and len(list(custom_errors)) == 0 |
def _write_wrapper(self, name):
"""Wrap write() to adapt return value for Python 2.
Returns:
Wrapper which is described below.
"""
io_attr = getattr(self._io, name)
def write_wrapper(*args, **kwargs):
"""Wrap all write calls to the stream object."""
ret_value = io_attr(*args, **kwargs)
if not IS_PY2:
return ret_value
return write_wrapper | Wrap write() to adapt return value for Python 2.
Returns:
Wrapper which is described below. | Below is the the instruction that describes the task:
### Input:
Wrap write() to adapt return value for Python 2.
Returns:
Wrapper which is described below.
### Response:
def _write_wrapper(self, name):
"""Wrap write() to adapt return value for Python 2.
Returns:
Wrapper which is described below.
"""
io_attr = getattr(self._io, name)
def write_wrapper(*args, **kwargs):
"""Wrap all write calls to the stream object."""
ret_value = io_attr(*args, **kwargs)
if not IS_PY2:
return ret_value
return write_wrapper |
def advance_job_status(namespace: str, job: Job, duration: float,
err: Optional[Exception]):
"""Advance the status of a job depending on its execution.
This function is called after a job has been executed. It calculates its
next status and calls the appropriate signals.
"""
duration = human_duration(duration)
if not err:
job.status = JobStatus.SUCCEEDED
logger.info('Finished execution of %s in %s', job, duration)
return
if job.should_retry:
job.status = JobStatus.NOT_SET
job.retries += 1
if isinstance(err, RetryException) and err.at is not None:
job.at = err.at
else:
job.at = (datetime.now(timezone.utc) +
exponential_backoff(job.retries))
signals.job_schedule_retry.send(namespace, job=job, err=err)
log_args = (
job.retries, job.max_retries + 1, job, duration,
human_duration(
(job.at - datetime.now(tz=timezone.utc)).total_seconds()
)
)
if isinstance(err, RetryException):
logger.info('Retry requested during execution %d/%d of %s '
'after %s, retry in %s', *log_args)
else:
logger.warning('Error during execution %d/%d of %s after %s, '
'retry in %s', *log_args)
return
job.status = JobStatus.FAILED
signals.job_failed.send(namespace, job=job, err=err)
logger.error(
'Error during execution %d/%d of %s after %s',
job.max_retries + 1, job.max_retries + 1, job, duration,
exc_info=err
) | Advance the status of a job depending on its execution.
This function is called after a job has been executed. It calculates its
next status and calls the appropriate signals. | Below is the the instruction that describes the task:
### Input:
Advance the status of a job depending on its execution.
This function is called after a job has been executed. It calculates its
next status and calls the appropriate signals.
### Response:
def advance_job_status(namespace: str, job: Job, duration: float,
err: Optional[Exception]):
"""Advance the status of a job depending on its execution.
This function is called after a job has been executed. It calculates its
next status and calls the appropriate signals.
"""
duration = human_duration(duration)
if not err:
job.status = JobStatus.SUCCEEDED
logger.info('Finished execution of %s in %s', job, duration)
return
if job.should_retry:
job.status = JobStatus.NOT_SET
job.retries += 1
if isinstance(err, RetryException) and err.at is not None:
job.at = err.at
else:
job.at = (datetime.now(timezone.utc) +
exponential_backoff(job.retries))
signals.job_schedule_retry.send(namespace, job=job, err=err)
log_args = (
job.retries, job.max_retries + 1, job, duration,
human_duration(
(job.at - datetime.now(tz=timezone.utc)).total_seconds()
)
)
if isinstance(err, RetryException):
logger.info('Retry requested during execution %d/%d of %s '
'after %s, retry in %s', *log_args)
else:
logger.warning('Error during execution %d/%d of %s after %s, '
'retry in %s', *log_args)
return
job.status = JobStatus.FAILED
signals.job_failed.send(namespace, job=job, err=err)
logger.error(
'Error during execution %d/%d of %s after %s',
job.max_retries + 1, job.max_retries + 1, job, duration,
exc_info=err
) |
def pipes(stream, *transformers):
"""Pipe several transformers end to end."""
for transformer in transformers:
stream = stream.pipe(transformer)
return stream | Pipe several transformers end to end. | Below is the the instruction that describes the task:
### Input:
Pipe several transformers end to end.
### Response:
def pipes(stream, *transformers):
"""Pipe several transformers end to end."""
for transformer in transformers:
stream = stream.pipe(transformer)
return stream |
def delete(queue, items):
'''
Delete an item or items from a queue
'''
with _conn(commit=True) as cur:
if isinstance(items, dict):
cmd = str("""DELETE FROM {0} WHERE data = '{1}'""").format( # future lint: disable=blacklisted-function
queue,
salt.utils.json.dumps(items))
log.debug('SQL Query: %s', cmd)
cur.execute(cmd)
return True
if isinstance(items, list):
items = [(salt.utils.json.dumps(el),) for el in items]
cmd = 'DELETE FROM {0} WHERE data = %s'.format(queue)
log.debug('SQL Query: %s', cmd)
cur.executemany(cmd, items)
return True | Delete an item or items from a queue | Below is the the instruction that describes the task:
### Input:
Delete an item or items from a queue
### Response:
def delete(queue, items):
'''
Delete an item or items from a queue
'''
with _conn(commit=True) as cur:
if isinstance(items, dict):
cmd = str("""DELETE FROM {0} WHERE data = '{1}'""").format( # future lint: disable=blacklisted-function
queue,
salt.utils.json.dumps(items))
log.debug('SQL Query: %s', cmd)
cur.execute(cmd)
return True
if isinstance(items, list):
items = [(salt.utils.json.dumps(el),) for el in items]
cmd = 'DELETE FROM {0} WHERE data = %s'.format(queue)
log.debug('SQL Query: %s', cmd)
cur.executemany(cmd, items)
return True |
def aggregate(self, aggregates=None, drilldowns=None, cuts=None,
order=None, page=None, page_size=None, page_max=None):
"""Main aggregation function. This is used to compute a given set of
aggregates, grouped by a given set of drilldown dimensions (i.e.
dividers). The query can also be filtered and sorted. """
def prep(cuts, drilldowns=False, aggregates=False, columns=None):
q = select(columns)
bindings = []
cuts, q, bindings = Cuts(self).apply(q, bindings, cuts)
attributes = None
if drilldowns is not False:
attributes, q, bindings = Drilldowns(self).apply(
q,
bindings,
drilldowns
)
if aggregates is not False:
aggregates, q, bindings = Aggregates(self).apply(
q,
bindings,
aggregates
)
q = self.restrict_joins(q, bindings)
return q, bindings, attributes, aggregates, cuts
# Count
count = count_results(self, prep(cuts,
drilldowns=drilldowns,
columns=[1])[0])
# Summary
summary = first_result(self, prep(cuts,
aggregates=aggregates)[0].limit(1))
# Results
q, bindings, attributes, aggregates, cuts = \
prep(cuts, drilldowns=drilldowns, aggregates=aggregates)
page, q = Pagination(self).apply(q, page, page_size, page_max)
ordering, q, bindings = Ordering(self).apply(q, bindings, order)
q = self.restrict_joins(q, bindings)
cells = list(generate_results(self, q))
return {
'total_cell_count': count,
'cells': cells,
'summary': summary,
'cell': cuts,
'aggregates': aggregates,
'attributes': attributes,
'order': ordering,
'page': page['page'],
'page_size': page['page_size']
} | Main aggregation function. This is used to compute a given set of
aggregates, grouped by a given set of drilldown dimensions (i.e.
dividers). The query can also be filtered and sorted. | Below is the the instruction that describes the task:
### Input:
Main aggregation function. This is used to compute a given set of
aggregates, grouped by a given set of drilldown dimensions (i.e.
dividers). The query can also be filtered and sorted.
### Response:
def aggregate(self, aggregates=None, drilldowns=None, cuts=None,
order=None, page=None, page_size=None, page_max=None):
"""Main aggregation function. This is used to compute a given set of
aggregates, grouped by a given set of drilldown dimensions (i.e.
dividers). The query can also be filtered and sorted. """
def prep(cuts, drilldowns=False, aggregates=False, columns=None):
q = select(columns)
bindings = []
cuts, q, bindings = Cuts(self).apply(q, bindings, cuts)
attributes = None
if drilldowns is not False:
attributes, q, bindings = Drilldowns(self).apply(
q,
bindings,
drilldowns
)
if aggregates is not False:
aggregates, q, bindings = Aggregates(self).apply(
q,
bindings,
aggregates
)
q = self.restrict_joins(q, bindings)
return q, bindings, attributes, aggregates, cuts
# Count
count = count_results(self, prep(cuts,
drilldowns=drilldowns,
columns=[1])[0])
# Summary
summary = first_result(self, prep(cuts,
aggregates=aggregates)[0].limit(1))
# Results
q, bindings, attributes, aggregates, cuts = \
prep(cuts, drilldowns=drilldowns, aggregates=aggregates)
page, q = Pagination(self).apply(q, page, page_size, page_max)
ordering, q, bindings = Ordering(self).apply(q, bindings, order)
q = self.restrict_joins(q, bindings)
cells = list(generate_results(self, q))
return {
'total_cell_count': count,
'cells': cells,
'summary': summary,
'cell': cuts,
'aggregates': aggregates,
'attributes': attributes,
'order': ordering,
'page': page['page'],
'page_size': page['page_size']
} |
def add_size_info (self):
"""Get size of URL content from HTTP header."""
if self.headers and "Content-Length" in self.headers and \
"Transfer-Encoding" not in self.headers:
# Note that content-encoding causes size differences since
# the content data is always decoded.
try:
self.size = int(self.getheader("Content-Length"))
except (ValueError, OverflowError):
pass
else:
self.size = -1 | Get size of URL content from HTTP header. | Below is the the instruction that describes the task:
### Input:
Get size of URL content from HTTP header.
### Response:
def add_size_info (self):
"""Get size of URL content from HTTP header."""
if self.headers and "Content-Length" in self.headers and \
"Transfer-Encoding" not in self.headers:
# Note that content-encoding causes size differences since
# the content data is always decoded.
try:
self.size = int(self.getheader("Content-Length"))
except (ValueError, OverflowError):
pass
else:
self.size = -1 |
def get_parameter_dict(self, include_frozen=False):
"""
Get an ordered dictionary of the parameters
Args:
include_frozen (Optional[bool]): Should the frozen parameters be
included in the returned value? (default: ``False``)
"""
return OrderedDict(zip(
self.get_parameter_names(include_frozen=include_frozen),
self.get_parameter_vector(include_frozen=include_frozen),
)) | Get an ordered dictionary of the parameters
Args:
include_frozen (Optional[bool]): Should the frozen parameters be
included in the returned value? (default: ``False``) | Below is the the instruction that describes the task:
### Input:
Get an ordered dictionary of the parameters
Args:
include_frozen (Optional[bool]): Should the frozen parameters be
included in the returned value? (default: ``False``)
### Response:
def get_parameter_dict(self, include_frozen=False):
"""
Get an ordered dictionary of the parameters
Args:
include_frozen (Optional[bool]): Should the frozen parameters be
included in the returned value? (default: ``False``)
"""
return OrderedDict(zip(
self.get_parameter_names(include_frozen=include_frozen),
self.get_parameter_vector(include_frozen=include_frozen),
)) |
def _other_dpss_method(N, NW, Kmax):
"""Returns the Discrete Prolate Spheroidal Sequences of orders [0,Kmax-1]
for a given frequency-spacing multiple NW and sequence length N.
See dpss function that is the official version. This version is indepedant
of the C code and relies on Scipy function. However, it is slower by a factor 3
Tridiagonal form of DPSS calculation from:
"""
# here we want to set up an optimization problem to find a sequence
# whose energy is maximally concentrated within band [-W,W].
# Thus, the measure lambda(T,W) is the ratio between the energy within
# that band, and the total energy. This leads to the eigen-system
# (A - (l1)I)v = 0, where the eigenvector corresponding to the largest
# eigenvalue is the sequence with maximally concentrated energy. The
# collection of eigenvectors of this system are called Slepian sequences,
# or discrete prolate spheroidal sequences (DPSS). Only the first K,
# K = 2NW/dt orders of DPSS will exhibit good spectral concentration
# [see http://en.wikipedia.org/wiki/Spectral_concentration_problem]
# Here I set up an alternative symmetric tri-diagonal eigenvalue problem
# such that
# (B - (l2)I)v = 0, and v are our DPSS (but eigenvalues l2 != l1)
# the main diagonal = ([N-1-2*t]/2)**2 cos(2PIW), t=[0,1,2,...,N-1]
# and the first off-diangonal = t(N-t)/2, t=[1,2,...,N-1]
# [see Percival and Walden, 1993]
from scipy import linalg as la
Kmax = int(Kmax)
W = float(NW)/N
ab = np.zeros((2,N), 'd')
nidx = np.arange(N)
ab[0,1:] = nidx[1:]*(N-nidx[1:])/2.
ab[1] = ((N-1-2*nidx)/2.)**2 * np.cos(2*np.pi*W)
# only calculate the highest Kmax-1 eigenvectors
l,v = la.eig_banded(ab, select='i', select_range=(N-Kmax, N-1))
dpss = v.transpose()[::-1]
# By convention (Percival and Walden, 1993 pg 379)
# * symmetric tapers (k=0,2,4,...) should have a positive average.
# * antisymmetric tapers should begin with a positive lobe
fix_symmetric = (dpss[0::2].sum(axis=1) < 0)
for i, f in enumerate(fix_symmetric):
if f:
dpss[2*i] *= -1
fix_skew = (dpss[1::2,1] < 0)
for i, f in enumerate(fix_skew):
if f:
dpss[2*i+1] *= -1
# Now find the eigenvalues of the original
# Use the autocovariance sequence technique from Percival and Walden, 1993
# pg 390
# XXX : why debias false? it's all messed up o.w., even with means
# on the order of 1e-2
acvs = _autocov(dpss, debias=False) * N
r = 4*W*np.sinc(2*W*nidx)
r[0] = 2*W
eigvals = np.dot(acvs, r)
return dpss, eigvals | Returns the Discrete Prolate Spheroidal Sequences of orders [0,Kmax-1]
for a given frequency-spacing multiple NW and sequence length N.
See dpss function that is the official version. This version is indepedant
of the C code and relies on Scipy function. However, it is slower by a factor 3
Tridiagonal form of DPSS calculation from: | Below is the the instruction that describes the task:
### Input:
Returns the Discrete Prolate Spheroidal Sequences of orders [0,Kmax-1]
for a given frequency-spacing multiple NW and sequence length N.
See dpss function that is the official version. This version is indepedant
of the C code and relies on Scipy function. However, it is slower by a factor 3
Tridiagonal form of DPSS calculation from:
### Response:
def _other_dpss_method(N, NW, Kmax):
"""Returns the Discrete Prolate Spheroidal Sequences of orders [0,Kmax-1]
for a given frequency-spacing multiple NW and sequence length N.
See dpss function that is the official version. This version is indepedant
of the C code and relies on Scipy function. However, it is slower by a factor 3
Tridiagonal form of DPSS calculation from:
"""
# here we want to set up an optimization problem to find a sequence
# whose energy is maximally concentrated within band [-W,W].
# Thus, the measure lambda(T,W) is the ratio between the energy within
# that band, and the total energy. This leads to the eigen-system
# (A - (l1)I)v = 0, where the eigenvector corresponding to the largest
# eigenvalue is the sequence with maximally concentrated energy. The
# collection of eigenvectors of this system are called Slepian sequences,
# or discrete prolate spheroidal sequences (DPSS). Only the first K,
# K = 2NW/dt orders of DPSS will exhibit good spectral concentration
# [see http://en.wikipedia.org/wiki/Spectral_concentration_problem]
# Here I set up an alternative symmetric tri-diagonal eigenvalue problem
# such that
# (B - (l2)I)v = 0, and v are our DPSS (but eigenvalues l2 != l1)
# the main diagonal = ([N-1-2*t]/2)**2 cos(2PIW), t=[0,1,2,...,N-1]
# and the first off-diangonal = t(N-t)/2, t=[1,2,...,N-1]
# [see Percival and Walden, 1993]
from scipy import linalg as la
Kmax = int(Kmax)
W = float(NW)/N
ab = np.zeros((2,N), 'd')
nidx = np.arange(N)
ab[0,1:] = nidx[1:]*(N-nidx[1:])/2.
ab[1] = ((N-1-2*nidx)/2.)**2 * np.cos(2*np.pi*W)
# only calculate the highest Kmax-1 eigenvectors
l,v = la.eig_banded(ab, select='i', select_range=(N-Kmax, N-1))
dpss = v.transpose()[::-1]
# By convention (Percival and Walden, 1993 pg 379)
# * symmetric tapers (k=0,2,4,...) should have a positive average.
# * antisymmetric tapers should begin with a positive lobe
fix_symmetric = (dpss[0::2].sum(axis=1) < 0)
for i, f in enumerate(fix_symmetric):
if f:
dpss[2*i] *= -1
fix_skew = (dpss[1::2,1] < 0)
for i, f in enumerate(fix_skew):
if f:
dpss[2*i+1] *= -1
# Now find the eigenvalues of the original
# Use the autocovariance sequence technique from Percival and Walden, 1993
# pg 390
# XXX : why debias false? it's all messed up o.w., even with means
# on the order of 1e-2
acvs = _autocov(dpss, debias=False) * N
r = 4*W*np.sinc(2*W*nidx)
r[0] = 2*W
eigvals = np.dot(acvs, r)
return dpss, eigvals |
def attach_http_service(cls, http_service: HTTPService):
""" Attaches a service for hosting
:param http_service: A HTTPService instance
"""
if cls._http_service is None:
cls._http_service = http_service
cls._set_bus(http_service)
else:
warnings.warn('HTTP service is already attached') | Attaches a service for hosting
:param http_service: A HTTPService instance | Below is the the instruction that describes the task:
### Input:
Attaches a service for hosting
:param http_service: A HTTPService instance
### Response:
def attach_http_service(cls, http_service: HTTPService):
""" Attaches a service for hosting
:param http_service: A HTTPService instance
"""
if cls._http_service is None:
cls._http_service = http_service
cls._set_bus(http_service)
else:
warnings.warn('HTTP service is already attached') |
def open(self, url):
"""
Open a WSDL at the specified I{url}.
First, the WSDL attempted to be retrieved from
the I{object cache}. After unpickled from the cache, the
I{options} attribute is restored.
If not found, it is downloaded and instantiated using the
I{fn} constructor and added to the cache for the next open().
@param url: A WSDL url.
@type url: str.
@return: The WSDL object.
@rtype: I{Definitions}
"""
cache = self.cache()
id = self.mangle(url, 'wsdl')
d = cache.get(id)
if d is None:
d = self.fn(url, self.options)
cache.put(id, d)
else:
d.options = self.options
for imp in d.imports:
imp.imported.options = self.options
return d | Open a WSDL at the specified I{url}.
First, the WSDL attempted to be retrieved from
the I{object cache}. After unpickled from the cache, the
I{options} attribute is restored.
If not found, it is downloaded and instantiated using the
I{fn} constructor and added to the cache for the next open().
@param url: A WSDL url.
@type url: str.
@return: The WSDL object.
@rtype: I{Definitions} | Below is the the instruction that describes the task:
### Input:
Open a WSDL at the specified I{url}.
First, the WSDL attempted to be retrieved from
the I{object cache}. After unpickled from the cache, the
I{options} attribute is restored.
If not found, it is downloaded and instantiated using the
I{fn} constructor and added to the cache for the next open().
@param url: A WSDL url.
@type url: str.
@return: The WSDL object.
@rtype: I{Definitions}
### Response:
def open(self, url):
"""
Open a WSDL at the specified I{url}.
First, the WSDL attempted to be retrieved from
the I{object cache}. After unpickled from the cache, the
I{options} attribute is restored.
If not found, it is downloaded and instantiated using the
I{fn} constructor and added to the cache for the next open().
@param url: A WSDL url.
@type url: str.
@return: The WSDL object.
@rtype: I{Definitions}
"""
cache = self.cache()
id = self.mangle(url, 'wsdl')
d = cache.get(id)
if d is None:
d = self.fn(url, self.options)
cache.put(id, d)
else:
d.options = self.options
for imp in d.imports:
imp.imported.options = self.options
return d |
def parse_sections(self, offset):
"""Fetch the PE file sections.
The sections will be readily available in the "sections" attribute.
Its attributes will contain all the section information plus "data"
a buffer containing the section's data.
The "Characteristics" member will be processed and attributes
representing the section characteristics (with the 'IMAGE_SCN_'
string trimmed from the constant's names) will be added to the
section instance.
Refer to the SectionStructure class for additional info.
"""
self.sections = []
for i in xrange(self.FILE_HEADER.NumberOfSections):
section = SectionStructure( self.__IMAGE_SECTION_HEADER_format__, pe=self )
if not section:
break
section_offset = offset + section.sizeof() * i
section.set_file_offset(section_offset)
section.__unpack__(self.__data__[section_offset : section_offset + section.sizeof()])
self.__structures__.append(section)
if section.SizeOfRawData > len(self.__data__):
self.__warnings.append(
('Error parsing section %d. ' % i) +
'SizeOfRawData is larger than file.')
if adjust_FileAlignment( section.PointerToRawData,
self.OPTIONAL_HEADER.FileAlignment ) > len(self.__data__):
self.__warnings.append(
('Error parsing section %d. ' % i) +
'PointerToRawData points beyond the end of the file.')
if section.Misc_VirtualSize > 0x10000000:
self.__warnings.append(
('Suspicious value found parsing section %d. ' % i) +
'VirtualSize is extremely large > 256MiB.')
if adjust_SectionAlignment( section.VirtualAddress,
self.OPTIONAL_HEADER.SectionAlignment, self.OPTIONAL_HEADER.FileAlignment ) > 0x10000000:
self.__warnings.append(
('Suspicious value found parsing section %d. ' % i) +
'VirtualAddress is beyond 0x10000000.')
#
# Some packer used a non-aligned PointerToRawData in the sections,
# which causes several common tools not to load the section data
# properly as they blindly read from the indicated offset.
# It seems that Windows will round the offset down to the largest
# offset multiple of FileAlignment which is smaller than
# PointerToRawData. The following code will do the same.
#
#alignment = self.OPTIONAL_HEADER.FileAlignment
#self.update_section_data(section)
if ( self.OPTIONAL_HEADER.FileAlignment != 0 and
( section.PointerToRawData % self.OPTIONAL_HEADER.FileAlignment) != 0):
self.__warnings.append(
('Error parsing section %d. ' % i) +
'Suspicious value for FileAlignment in the Optional Header. ' +
'Normally the PointerToRawData entry of the sections\' structures ' +
'is a multiple of FileAlignment, this might imply the file ' +
'is trying to confuse tools which parse this incorrectly')
section_flags = retrieve_flags(SECTION_CHARACTERISTICS, 'IMAGE_SCN_')
# Set the section's flags according the the Characteristics member
set_flags(section, section.Characteristics, section_flags)
if ( section.__dict__.get('IMAGE_SCN_MEM_WRITE', False) and
section.__dict__.get('IMAGE_SCN_MEM_EXECUTE', False) ):
self.__warnings.append(
('Suspicious flags set for section %d. ' % i) +
'Both IMAGE_SCN_MEM_WRITE and IMAGE_SCN_MEM_EXECUTE are set. ' +
'This might indicate a packed executable.')
self.sections.append(section)
if self.FILE_HEADER.NumberOfSections > 0 and self.sections:
return offset + self.sections[0].sizeof()*self.FILE_HEADER.NumberOfSections
else:
return offset | Fetch the PE file sections.
The sections will be readily available in the "sections" attribute.
Its attributes will contain all the section information plus "data"
a buffer containing the section's data.
The "Characteristics" member will be processed and attributes
representing the section characteristics (with the 'IMAGE_SCN_'
string trimmed from the constant's names) will be added to the
section instance.
Refer to the SectionStructure class for additional info. | Below is the the instruction that describes the task:
### Input:
Fetch the PE file sections.
The sections will be readily available in the "sections" attribute.
Its attributes will contain all the section information plus "data"
a buffer containing the section's data.
The "Characteristics" member will be processed and attributes
representing the section characteristics (with the 'IMAGE_SCN_'
string trimmed from the constant's names) will be added to the
section instance.
Refer to the SectionStructure class for additional info.
### Response:
def parse_sections(self, offset):
"""Fetch the PE file sections.
The sections will be readily available in the "sections" attribute.
Its attributes will contain all the section information plus "data"
a buffer containing the section's data.
The "Characteristics" member will be processed and attributes
representing the section characteristics (with the 'IMAGE_SCN_'
string trimmed from the constant's names) will be added to the
section instance.
Refer to the SectionStructure class for additional info.
"""
self.sections = []
for i in xrange(self.FILE_HEADER.NumberOfSections):
section = SectionStructure( self.__IMAGE_SECTION_HEADER_format__, pe=self )
if not section:
break
section_offset = offset + section.sizeof() * i
section.set_file_offset(section_offset)
section.__unpack__(self.__data__[section_offset : section_offset + section.sizeof()])
self.__structures__.append(section)
if section.SizeOfRawData > len(self.__data__):
self.__warnings.append(
('Error parsing section %d. ' % i) +
'SizeOfRawData is larger than file.')
if adjust_FileAlignment( section.PointerToRawData,
self.OPTIONAL_HEADER.FileAlignment ) > len(self.__data__):
self.__warnings.append(
('Error parsing section %d. ' % i) +
'PointerToRawData points beyond the end of the file.')
if section.Misc_VirtualSize > 0x10000000:
self.__warnings.append(
('Suspicious value found parsing section %d. ' % i) +
'VirtualSize is extremely large > 256MiB.')
if adjust_SectionAlignment( section.VirtualAddress,
self.OPTIONAL_HEADER.SectionAlignment, self.OPTIONAL_HEADER.FileAlignment ) > 0x10000000:
self.__warnings.append(
('Suspicious value found parsing section %d. ' % i) +
'VirtualAddress is beyond 0x10000000.')
#
# Some packer used a non-aligned PointerToRawData in the sections,
# which causes several common tools not to load the section data
# properly as they blindly read from the indicated offset.
# It seems that Windows will round the offset down to the largest
# offset multiple of FileAlignment which is smaller than
# PointerToRawData. The following code will do the same.
#
#alignment = self.OPTIONAL_HEADER.FileAlignment
#self.update_section_data(section)
if ( self.OPTIONAL_HEADER.FileAlignment != 0 and
( section.PointerToRawData % self.OPTIONAL_HEADER.FileAlignment) != 0):
self.__warnings.append(
('Error parsing section %d. ' % i) +
'Suspicious value for FileAlignment in the Optional Header. ' +
'Normally the PointerToRawData entry of the sections\' structures ' +
'is a multiple of FileAlignment, this might imply the file ' +
'is trying to confuse tools which parse this incorrectly')
section_flags = retrieve_flags(SECTION_CHARACTERISTICS, 'IMAGE_SCN_')
# Set the section's flags according the the Characteristics member
set_flags(section, section.Characteristics, section_flags)
if ( section.__dict__.get('IMAGE_SCN_MEM_WRITE', False) and
section.__dict__.get('IMAGE_SCN_MEM_EXECUTE', False) ):
self.__warnings.append(
('Suspicious flags set for section %d. ' % i) +
'Both IMAGE_SCN_MEM_WRITE and IMAGE_SCN_MEM_EXECUTE are set. ' +
'This might indicate a packed executable.')
self.sections.append(section)
if self.FILE_HEADER.NumberOfSections > 0 and self.sections:
return offset + self.sections[0].sizeof()*self.FILE_HEADER.NumberOfSections
else:
return offset |
def visit(self, visitor, predicate=None, **kw):
"""
Apply a function to matching nodes in the (sub)tree rooted at self.
:param visitor: A callable accepting a Node object as single argument..
:param predicate: A callable accepting a Node object as single argument and \
returning a boolean signaling whether Node matches; if `None` all nodes match.
:param kw: Addtional keyword arguments are passed through to self.walk.
"""
predicate = predicate or bool
for n in self.walk(**kw):
if predicate(n):
visitor(n) | Apply a function to matching nodes in the (sub)tree rooted at self.
:param visitor: A callable accepting a Node object as single argument..
:param predicate: A callable accepting a Node object as single argument and \
returning a boolean signaling whether Node matches; if `None` all nodes match.
:param kw: Addtional keyword arguments are passed through to self.walk. | Below is the the instruction that describes the task:
### Input:
Apply a function to matching nodes in the (sub)tree rooted at self.
:param visitor: A callable accepting a Node object as single argument..
:param predicate: A callable accepting a Node object as single argument and \
returning a boolean signaling whether Node matches; if `None` all nodes match.
:param kw: Addtional keyword arguments are passed through to self.walk.
### Response:
def visit(self, visitor, predicate=None, **kw):
"""
Apply a function to matching nodes in the (sub)tree rooted at self.
:param visitor: A callable accepting a Node object as single argument..
:param predicate: A callable accepting a Node object as single argument and \
returning a boolean signaling whether Node matches; if `None` all nodes match.
:param kw: Addtional keyword arguments are passed through to self.walk.
"""
predicate = predicate or bool
for n in self.walk(**kw):
if predicate(n):
visitor(n) |
def sample_categorical(prob, rng):
"""Sample from independent categorical distributions
Each batch is an independent categorical distribution.
Parameters
----------
prob : numpy.ndarray
Probability of the categorical distribution. Shape --> (batch_num, category_num)
rng : numpy.random.RandomState
Returns
-------
ret : numpy.ndarray
Sampling result. Shape --> (batch_num,)
"""
ret = numpy.empty(prob.shape[0], dtype=numpy.float32)
for ind in range(prob.shape[0]):
ret[ind] = numpy.searchsorted(numpy.cumsum(prob[ind]), rng.rand()).clip(min=0.0,
max=prob.shape[
1] - 0.5)
return ret | Sample from independent categorical distributions
Each batch is an independent categorical distribution.
Parameters
----------
prob : numpy.ndarray
Probability of the categorical distribution. Shape --> (batch_num, category_num)
rng : numpy.random.RandomState
Returns
-------
ret : numpy.ndarray
Sampling result. Shape --> (batch_num,) | Below is the the instruction that describes the task:
### Input:
Sample from independent categorical distributions
Each batch is an independent categorical distribution.
Parameters
----------
prob : numpy.ndarray
Probability of the categorical distribution. Shape --> (batch_num, category_num)
rng : numpy.random.RandomState
Returns
-------
ret : numpy.ndarray
Sampling result. Shape --> (batch_num,)
### Response:
def sample_categorical(prob, rng):
"""Sample from independent categorical distributions
Each batch is an independent categorical distribution.
Parameters
----------
prob : numpy.ndarray
Probability of the categorical distribution. Shape --> (batch_num, category_num)
rng : numpy.random.RandomState
Returns
-------
ret : numpy.ndarray
Sampling result. Shape --> (batch_num,)
"""
ret = numpy.empty(prob.shape[0], dtype=numpy.float32)
for ind in range(prob.shape[0]):
ret[ind] = numpy.searchsorted(numpy.cumsum(prob[ind]), rng.rand()).clip(min=0.0,
max=prob.shape[
1] - 0.5)
return ret |
def get_module_verbosity_flags(*labels):
""" checks for standard flags for enableing module specific verbosity """
verbose_prefix_list = ['--verbose-', '--verb', '--verb-']
veryverbose_prefix_list = ['--veryverbose-', '--veryverb', '--veryverb-']
verbose_flags = tuple(
[prefix + lbl for prefix, lbl in
itertools.product(verbose_prefix_list, labels)])
veryverbose_flags = tuple(
[prefix + lbl for prefix, lbl in
itertools.product(veryverbose_prefix_list, labels)])
veryverbose_module = get_argflag(veryverbose_flags) or VERYVERBOSE
verbose_module = (get_argflag(verbose_flags) or veryverbose_module or VERBOSE)
if veryverbose_module:
verbose_module = 2
return verbose_module, veryverbose_module | checks for standard flags for enableing module specific verbosity | Below is the the instruction that describes the task:
### Input:
checks for standard flags for enableing module specific verbosity
### Response:
def get_module_verbosity_flags(*labels):
""" checks for standard flags for enableing module specific verbosity """
verbose_prefix_list = ['--verbose-', '--verb', '--verb-']
veryverbose_prefix_list = ['--veryverbose-', '--veryverb', '--veryverb-']
verbose_flags = tuple(
[prefix + lbl for prefix, lbl in
itertools.product(verbose_prefix_list, labels)])
veryverbose_flags = tuple(
[prefix + lbl for prefix, lbl in
itertools.product(veryverbose_prefix_list, labels)])
veryverbose_module = get_argflag(veryverbose_flags) or VERYVERBOSE
verbose_module = (get_argflag(verbose_flags) or veryverbose_module or VERBOSE)
if veryverbose_module:
verbose_module = 2
return verbose_module, veryverbose_module |
def from_export(cls, endpoint):
# type: (ExportEndpoint) -> EndpointDescription
"""
Converts an ExportEndpoint bean to an EndpointDescription
:param endpoint: An ExportEndpoint bean
:return: An EndpointDescription bean
"""
assert isinstance(endpoint, ExportEndpoint)
# Service properties
properties = endpoint.get_properties()
# Set import keys
properties[pelix.remote.PROP_ENDPOINT_ID] = endpoint.uid
properties[pelix.remote.PROP_IMPORTED_CONFIGS] = endpoint.configurations
properties[
pelix.remote.PROP_EXPORTED_INTERFACES
] = endpoint.specifications
# Remove export keys
for key in (
pelix.remote.PROP_EXPORTED_CONFIGS,
pelix.remote.PROP_EXPORTED_INTERFACES,
pelix.remote.PROP_EXPORTED_INTENTS,
pelix.remote.PROP_EXPORTED_INTENTS_EXTRA,
):
try:
del properties[key]
except KeyError:
pass
# Other information
properties[pelix.remote.PROP_ENDPOINT_NAME] = endpoint.name
properties[
pelix.remote.PROP_ENDPOINT_FRAMEWORK_UUID
] = endpoint.framework
return EndpointDescription(None, properties) | Converts an ExportEndpoint bean to an EndpointDescription
:param endpoint: An ExportEndpoint bean
:return: An EndpointDescription bean | Below is the the instruction that describes the task:
### Input:
Converts an ExportEndpoint bean to an EndpointDescription
:param endpoint: An ExportEndpoint bean
:return: An EndpointDescription bean
### Response:
def from_export(cls, endpoint):
# type: (ExportEndpoint) -> EndpointDescription
"""
Converts an ExportEndpoint bean to an EndpointDescription
:param endpoint: An ExportEndpoint bean
:return: An EndpointDescription bean
"""
assert isinstance(endpoint, ExportEndpoint)
# Service properties
properties = endpoint.get_properties()
# Set import keys
properties[pelix.remote.PROP_ENDPOINT_ID] = endpoint.uid
properties[pelix.remote.PROP_IMPORTED_CONFIGS] = endpoint.configurations
properties[
pelix.remote.PROP_EXPORTED_INTERFACES
] = endpoint.specifications
# Remove export keys
for key in (
pelix.remote.PROP_EXPORTED_CONFIGS,
pelix.remote.PROP_EXPORTED_INTERFACES,
pelix.remote.PROP_EXPORTED_INTENTS,
pelix.remote.PROP_EXPORTED_INTENTS_EXTRA,
):
try:
del properties[key]
except KeyError:
pass
# Other information
properties[pelix.remote.PROP_ENDPOINT_NAME] = endpoint.name
properties[
pelix.remote.PROP_ENDPOINT_FRAMEWORK_UUID
] = endpoint.framework
return EndpointDescription(None, properties) |
def addHydrogens(molecule, usedPyroles=None):
"""(molecule) -> add implicit hydrogens to a molecule.
If the atom has specified valences and the atom must be
charged then a Valence Error is raised"""
for atom in molecule.atoms:
# if the atom has an explicit hcount, we can't set the
# hcount
if atom.has_explicit_hcount:
atom.hcount = atom.explicit_hcount
continue
if atom.valences:
for valence in atom.valences:
hcount = max(0, int(valence - atom.sumBondOrders() + atom.charge))
if hcount >= 0:
break
else:
if usedPyroles and not usedPyroles.has_key(atom.handle):
#print atom.symbol, atom.valences, atom.hcount, atom.charge,\
# atom.sumBondOrders()
#print [x.bondtype for x in atom.bonds]
#print molecule.cansmiles()
raise PinkyError("Valence error in atom %s"%molecule.atoms.index(atom))
pass
#hcount = int(hcount)
atom.hcount = hcount
return molecule | (molecule) -> add implicit hydrogens to a molecule.
If the atom has specified valences and the atom must be
charged then a Valence Error is raised | Below is the the instruction that describes the task:
### Input:
(molecule) -> add implicit hydrogens to a molecule.
If the atom has specified valences and the atom must be
charged then a Valence Error is raised
### Response:
def addHydrogens(molecule, usedPyroles=None):
"""(molecule) -> add implicit hydrogens to a molecule.
If the atom has specified valences and the atom must be
charged then a Valence Error is raised"""
for atom in molecule.atoms:
# if the atom has an explicit hcount, we can't set the
# hcount
if atom.has_explicit_hcount:
atom.hcount = atom.explicit_hcount
continue
if atom.valences:
for valence in atom.valences:
hcount = max(0, int(valence - atom.sumBondOrders() + atom.charge))
if hcount >= 0:
break
else:
if usedPyroles and not usedPyroles.has_key(atom.handle):
#print atom.symbol, atom.valences, atom.hcount, atom.charge,\
# atom.sumBondOrders()
#print [x.bondtype for x in atom.bonds]
#print molecule.cansmiles()
raise PinkyError("Valence error in atom %s"%molecule.atoms.index(atom))
pass
#hcount = int(hcount)
atom.hcount = hcount
return molecule |
def _set_load_interval(self, v, load=False):
"""
Setter method for load_interval, mapped from YANG variable /mpls_config/router/mpls/mpls_cmds_holder/policy/load_interval (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_load_interval is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_load_interval() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'30..300']}), default=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32)(300), is_leaf=True, yang_name="load-interval", rest_name="load-interval", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Polling interval for MPLS LSP traffic statistics', u'hidden': u'full', u'cli-full-no': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='uint32', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """load_interval must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'30..300']}), default=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32)(300), is_leaf=True, yang_name="load-interval", rest_name="load-interval", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Polling interval for MPLS LSP traffic statistics', u'hidden': u'full', u'cli-full-no': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='uint32', is_config=True)""",
})
self.__load_interval = t
if hasattr(self, '_set'):
self._set() | Setter method for load_interval, mapped from YANG variable /mpls_config/router/mpls/mpls_cmds_holder/policy/load_interval (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_load_interval is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_load_interval() directly. | Below is the the instruction that describes the task:
### Input:
Setter method for load_interval, mapped from YANG variable /mpls_config/router/mpls/mpls_cmds_holder/policy/load_interval (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_load_interval is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_load_interval() directly.
### Response:
def _set_load_interval(self, v, load=False):
"""
Setter method for load_interval, mapped from YANG variable /mpls_config/router/mpls/mpls_cmds_holder/policy/load_interval (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_load_interval is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_load_interval() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'30..300']}), default=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32)(300), is_leaf=True, yang_name="load-interval", rest_name="load-interval", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Polling interval for MPLS LSP traffic statistics', u'hidden': u'full', u'cli-full-no': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='uint32', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """load_interval must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'30..300']}), default=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32)(300), is_leaf=True, yang_name="load-interval", rest_name="load-interval", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Polling interval for MPLS LSP traffic statistics', u'hidden': u'full', u'cli-full-no': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='uint32', is_config=True)""",
})
self.__load_interval = t
if hasattr(self, '_set'):
self._set() |
def cd_to(path, mkdir=False):
"""make a generator like cd, but use it for function
Usage::
>>> @cd_to("/")
... def say_where():
... print(os.getcwd())
...
>>> say_where()
/
"""
def cd_to_decorator(func):
@functools.wraps(func)
def _cd_and_exec(*args, **kwargs):
with cd(path, mkdir):
return func(*args, **kwargs)
return _cd_and_exec
return cd_to_decorator | make a generator like cd, but use it for function
Usage::
>>> @cd_to("/")
... def say_where():
... print(os.getcwd())
...
>>> say_where()
/ | Below is the the instruction that describes the task:
### Input:
make a generator like cd, but use it for function
Usage::
>>> @cd_to("/")
... def say_where():
... print(os.getcwd())
...
>>> say_where()
/
### Response:
def cd_to(path, mkdir=False):
"""make a generator like cd, but use it for function
Usage::
>>> @cd_to("/")
... def say_where():
... print(os.getcwd())
...
>>> say_where()
/
"""
def cd_to_decorator(func):
@functools.wraps(func)
def _cd_and_exec(*args, **kwargs):
with cd(path, mkdir):
return func(*args, **kwargs)
return _cd_and_exec
return cd_to_decorator |
def sparql_query(self, query, flush=None, limit=None):
"""
Run a Sparql query.
:param query: sparql query string
:rtype: list of dictionary
"""
return self.find_statements(query, language='sparql', type='tuples',
flush=flush, limit=limit) | Run a Sparql query.
:param query: sparql query string
:rtype: list of dictionary | Below is the the instruction that describes the task:
### Input:
Run a Sparql query.
:param query: sparql query string
:rtype: list of dictionary
### Response:
def sparql_query(self, query, flush=None, limit=None):
"""
Run a Sparql query.
:param query: sparql query string
:rtype: list of dictionary
"""
return self.find_statements(query, language='sparql', type='tuples',
flush=flush, limit=limit) |
def _preprocess(self, struct1, struct2, niggli=True):
"""
Rescales, finds the reduced structures (primitive and niggli),
and finds fu, the supercell size to make struct1 comparable to
s2
"""
struct1 = struct1.copy()
struct2 = struct2.copy()
if niggli:
struct1 = struct1.get_reduced_structure(reduction_algo="niggli")
struct2 = struct2.get_reduced_structure(reduction_algo="niggli")
# primitive cell transformation
if self._primitive_cell:
struct1 = struct1.get_primitive_structure()
struct2 = struct2.get_primitive_structure()
if self._supercell:
fu, s1_supercell = self._get_supercell_size(struct1, struct2)
else:
fu, s1_supercell = 1, True
mult = fu if s1_supercell else 1/fu
# rescale lattice to same volume
if self._scale:
ratio = (struct2.volume / (struct1.volume * mult)) ** (1 / 6)
nl1 = Lattice(struct1.lattice.matrix * ratio)
struct1.lattice = nl1
nl2 = Lattice(struct2.lattice.matrix / ratio)
struct2.lattice = nl2
return struct1, struct2, fu, s1_supercell | Rescales, finds the reduced structures (primitive and niggli),
and finds fu, the supercell size to make struct1 comparable to
s2 | Below is the the instruction that describes the task:
### Input:
Rescales, finds the reduced structures (primitive and niggli),
and finds fu, the supercell size to make struct1 comparable to
s2
### Response:
def _preprocess(self, struct1, struct2, niggli=True):
"""
Rescales, finds the reduced structures (primitive and niggli),
and finds fu, the supercell size to make struct1 comparable to
s2
"""
struct1 = struct1.copy()
struct2 = struct2.copy()
if niggli:
struct1 = struct1.get_reduced_structure(reduction_algo="niggli")
struct2 = struct2.get_reduced_structure(reduction_algo="niggli")
# primitive cell transformation
if self._primitive_cell:
struct1 = struct1.get_primitive_structure()
struct2 = struct2.get_primitive_structure()
if self._supercell:
fu, s1_supercell = self._get_supercell_size(struct1, struct2)
else:
fu, s1_supercell = 1, True
mult = fu if s1_supercell else 1/fu
# rescale lattice to same volume
if self._scale:
ratio = (struct2.volume / (struct1.volume * mult)) ** (1 / 6)
nl1 = Lattice(struct1.lattice.matrix * ratio)
struct1.lattice = nl1
nl2 = Lattice(struct2.lattice.matrix / ratio)
struct2.lattice = nl2
return struct1, struct2, fu, s1_supercell |
Subsets and Splits