code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def calcAbsSegCoords(self):
''' Calculate absolute seg coords by translating the relative seg coords -- used for LFP calc'''
from .. import sim
p3dsoma = self.getSomaPos()
pop = self.tags['pop']
morphSegCoords = sim.net.pops[pop]._morphSegCoords
# rotated coordinates around z axis first then shift relative to the soma
self._segCoords = {}
p3dsoma = p3dsoma[np.newaxis].T # trasnpose 1d array to enable matrix calculation
self._segCoords['p0'] = p3dsoma + morphSegCoords['p0']
self._segCoords['p1'] = p3dsoma + morphSegCoords['p1'] | Calculate absolute seg coords by translating the relative seg coords -- used for LFP calc | Below is the the instruction that describes the task:
### Input:
Calculate absolute seg coords by translating the relative seg coords -- used for LFP calc
### Response:
def calcAbsSegCoords(self):
''' Calculate absolute seg coords by translating the relative seg coords -- used for LFP calc'''
from .. import sim
p3dsoma = self.getSomaPos()
pop = self.tags['pop']
morphSegCoords = sim.net.pops[pop]._morphSegCoords
# rotated coordinates around z axis first then shift relative to the soma
self._segCoords = {}
p3dsoma = p3dsoma[np.newaxis].T # trasnpose 1d array to enable matrix calculation
self._segCoords['p0'] = p3dsoma + morphSegCoords['p0']
self._segCoords['p1'] = p3dsoma + morphSegCoords['p1'] |
def _set_pwm(self, raw_values):
"""
Set pwm values on the controlled pins.
:param raw_values: Raw values to set (0-255).
"""
for i in range(len(self._pins)):
self._pi.set_PWM_dutycycle(self._pins[i], raw_values[i]) | Set pwm values on the controlled pins.
:param raw_values: Raw values to set (0-255). | Below is the the instruction that describes the task:
### Input:
Set pwm values on the controlled pins.
:param raw_values: Raw values to set (0-255).
### Response:
def _set_pwm(self, raw_values):
"""
Set pwm values on the controlled pins.
:param raw_values: Raw values to set (0-255).
"""
for i in range(len(self._pins)):
self._pi.set_PWM_dutycycle(self._pins[i], raw_values[i]) |
def _init_vocab(self, token_generator, add_reserved_tokens=True):
"""Initialize vocabulary with tokens from token_generator."""
self._id_to_token = {}
non_reserved_start_index = 0
if add_reserved_tokens:
self._id_to_token.update(enumerate(RESERVED_TOKENS))
non_reserved_start_index = len(RESERVED_TOKENS)
self._id_to_token.update(
enumerate(token_generator, start=non_reserved_start_index))
# _token_to_id is the reverse of _id_to_token
self._token_to_id = dict((v, k)
for k, v in six.iteritems(self._id_to_token)) | Initialize vocabulary with tokens from token_generator. | Below is the the instruction that describes the task:
### Input:
Initialize vocabulary with tokens from token_generator.
### Response:
def _init_vocab(self, token_generator, add_reserved_tokens=True):
"""Initialize vocabulary with tokens from token_generator."""
self._id_to_token = {}
non_reserved_start_index = 0
if add_reserved_tokens:
self._id_to_token.update(enumerate(RESERVED_TOKENS))
non_reserved_start_index = len(RESERVED_TOKENS)
self._id_to_token.update(
enumerate(token_generator, start=non_reserved_start_index))
# _token_to_id is the reverse of _id_to_token
self._token_to_id = dict((v, k)
for k, v in six.iteritems(self._id_to_token)) |
def typewrite(message, interval=0.0, pause=None, _pause=True):
"""Performs a keyboard key press down, followed by a release, for each of
the characters in message.
The message argument can also be list of strings, in which case any valid
keyboard name can be used.
Since this performs a sequence of keyboard presses and does not hold down
keys, it cannot be used to perform keyboard shortcuts. Use the hotkey()
function for that.
Args:
message (str, list): If a string, then the characters to be pressed. If a
list, then the key names of the keys to press in order. The valid names
are listed in KEYBOARD_KEYS.
interval (float, optional): The number of seconds in between each press.
0.0 by default, for no pause in between presses.
Returns:
None
"""
interval = float(interval)
_failSafeCheck()
for c in message:
if len(c) > 1:
c = c.lower()
press(c, _pause=False)
time.sleep(interval)
_failSafeCheck()
_autoPause(pause, _pause) | Performs a keyboard key press down, followed by a release, for each of
the characters in message.
The message argument can also be list of strings, in which case any valid
keyboard name can be used.
Since this performs a sequence of keyboard presses and does not hold down
keys, it cannot be used to perform keyboard shortcuts. Use the hotkey()
function for that.
Args:
message (str, list): If a string, then the characters to be pressed. If a
list, then the key names of the keys to press in order. The valid names
are listed in KEYBOARD_KEYS.
interval (float, optional): The number of seconds in between each press.
0.0 by default, for no pause in between presses.
Returns:
None | Below is the the instruction that describes the task:
### Input:
Performs a keyboard key press down, followed by a release, for each of
the characters in message.
The message argument can also be list of strings, in which case any valid
keyboard name can be used.
Since this performs a sequence of keyboard presses and does not hold down
keys, it cannot be used to perform keyboard shortcuts. Use the hotkey()
function for that.
Args:
message (str, list): If a string, then the characters to be pressed. If a
list, then the key names of the keys to press in order. The valid names
are listed in KEYBOARD_KEYS.
interval (float, optional): The number of seconds in between each press.
0.0 by default, for no pause in between presses.
Returns:
None
### Response:
def typewrite(message, interval=0.0, pause=None, _pause=True):
"""Performs a keyboard key press down, followed by a release, for each of
the characters in message.
The message argument can also be list of strings, in which case any valid
keyboard name can be used.
Since this performs a sequence of keyboard presses and does not hold down
keys, it cannot be used to perform keyboard shortcuts. Use the hotkey()
function for that.
Args:
message (str, list): If a string, then the characters to be pressed. If a
list, then the key names of the keys to press in order. The valid names
are listed in KEYBOARD_KEYS.
interval (float, optional): The number of seconds in between each press.
0.0 by default, for no pause in between presses.
Returns:
None
"""
interval = float(interval)
_failSafeCheck()
for c in message:
if len(c) > 1:
c = c.lower()
press(c, _pause=False)
time.sleep(interval)
_failSafeCheck()
_autoPause(pause, _pause) |
def plotConvergenceByDistantConnectionChance(results, featureRange, columnRange, longDistanceConnectionsRange, numTrials):
"""
Plots the convergence graph: iterations vs number of columns.
Each curve shows the convergence for a given number of unique features.
"""
########################################################################
#
# Accumulate all the results per column in a convergence array.
#
# Convergence[f, c, t] = how long it took it to converge with f unique
# features, c columns and topology t.
convergence = numpy.zeros((len(featureRange), len(longDistanceConnectionsRange), len(columnRange)))
for r in results:
print longDistanceConnectionsRange.index(r["longDistanceConnections"])
print columnRange.index(r["numColumns"])
convergence[featureRange.index(r["numFeatures"]),
longDistanceConnectionsRange.index(r["longDistanceConnections"]),
columnRange.index(r["numColumns"])] += r["convergencePoint"]
convergence /= numTrials
# For each column, print convergence as fct of number of unique features
for i, c in enumerate(columnRange):
for j, r in enumerate(longDistanceConnectionsRange):
print c, r, convergence[:, j, i]
# Print everything anyway for debugging
print "Average convergence array=", convergence
########################################################################
#
# Create the plot. x-axis=
plt.figure(figsize=(8, 6), dpi=80)
plotPath = os.path.join("plots", "convergence_by_random_connection_chance.pdf")
# Plot each curve
legendList = []
colormap = plt.get_cmap("jet")
colorList = [colormap(x) for x in numpy.linspace(0., 1.,
len(featureRange)*len(longDistanceConnectionsRange))]
for i, r in enumerate(longDistanceConnectionsRange):
for j, f in enumerate(featureRange):
currentColor = i*len(featureRange) + j
print columnRange
print convergence[j, i, :]
legendList.append('Connection_prob = {}, num features = {}'.format(r, f))
plt.plot(columnRange, convergence[j, i, :], color=colorList[currentColor])
# format
plt.legend(legendList, loc = "lower left")
plt.xlabel("Number of columns")
plt.xticks(columnRange)
plt.yticks(range(0,int(convergence.max())+1))
plt.ylabel("Average number of touches")
plt.title("Number of touches to recognize one object (multiple columns)")
# save
plt.show()
plt.savefig(plotPath)
plt.close() | Plots the convergence graph: iterations vs number of columns.
Each curve shows the convergence for a given number of unique features. | Below is the the instruction that describes the task:
### Input:
Plots the convergence graph: iterations vs number of columns.
Each curve shows the convergence for a given number of unique features.
### Response:
def plotConvergenceByDistantConnectionChance(results, featureRange, columnRange, longDistanceConnectionsRange, numTrials):
"""
Plots the convergence graph: iterations vs number of columns.
Each curve shows the convergence for a given number of unique features.
"""
########################################################################
#
# Accumulate all the results per column in a convergence array.
#
# Convergence[f, c, t] = how long it took it to converge with f unique
# features, c columns and topology t.
convergence = numpy.zeros((len(featureRange), len(longDistanceConnectionsRange), len(columnRange)))
for r in results:
print longDistanceConnectionsRange.index(r["longDistanceConnections"])
print columnRange.index(r["numColumns"])
convergence[featureRange.index(r["numFeatures"]),
longDistanceConnectionsRange.index(r["longDistanceConnections"]),
columnRange.index(r["numColumns"])] += r["convergencePoint"]
convergence /= numTrials
# For each column, print convergence as fct of number of unique features
for i, c in enumerate(columnRange):
for j, r in enumerate(longDistanceConnectionsRange):
print c, r, convergence[:, j, i]
# Print everything anyway for debugging
print "Average convergence array=", convergence
########################################################################
#
# Create the plot. x-axis=
plt.figure(figsize=(8, 6), dpi=80)
plotPath = os.path.join("plots", "convergence_by_random_connection_chance.pdf")
# Plot each curve
legendList = []
colormap = plt.get_cmap("jet")
colorList = [colormap(x) for x in numpy.linspace(0., 1.,
len(featureRange)*len(longDistanceConnectionsRange))]
for i, r in enumerate(longDistanceConnectionsRange):
for j, f in enumerate(featureRange):
currentColor = i*len(featureRange) + j
print columnRange
print convergence[j, i, :]
legendList.append('Connection_prob = {}, num features = {}'.format(r, f))
plt.plot(columnRange, convergence[j, i, :], color=colorList[currentColor])
# format
plt.legend(legendList, loc = "lower left")
plt.xlabel("Number of columns")
plt.xticks(columnRange)
plt.yticks(range(0,int(convergence.max())+1))
plt.ylabel("Average number of touches")
plt.title("Number of touches to recognize one object (multiple columns)")
# save
plt.show()
plt.savefig(plotPath)
plt.close() |
def divide_separate_words(string_matrix: List[List[str]]) -> List[List[str]]:
"""
As part of processing, some words obviously need to be separated.
:param string_matrix: a data matrix: a list wrapping a list of strings, with each sublist being a sentence.
:return:
>>> divide_separate_words([['ita vero'], ['quid', 'est', 'veritas']])
[['ita', 'vero'], ['quid', 'est', 'veritas']]
"""
new_X = []
for sentence in string_matrix:
data_row = [] # type: List[str]
for word in sentence:
if ' ' in word:
data_row += word.split()
else:
data_row.append(word)
new_X.append(data_row)
return new_X | As part of processing, some words obviously need to be separated.
:param string_matrix: a data matrix: a list wrapping a list of strings, with each sublist being a sentence.
:return:
>>> divide_separate_words([['ita vero'], ['quid', 'est', 'veritas']])
[['ita', 'vero'], ['quid', 'est', 'veritas']] | Below is the the instruction that describes the task:
### Input:
As part of processing, some words obviously need to be separated.
:param string_matrix: a data matrix: a list wrapping a list of strings, with each sublist being a sentence.
:return:
>>> divide_separate_words([['ita vero'], ['quid', 'est', 'veritas']])
[['ita', 'vero'], ['quid', 'est', 'veritas']]
### Response:
def divide_separate_words(string_matrix: List[List[str]]) -> List[List[str]]:
"""
As part of processing, some words obviously need to be separated.
:param string_matrix: a data matrix: a list wrapping a list of strings, with each sublist being a sentence.
:return:
>>> divide_separate_words([['ita vero'], ['quid', 'est', 'veritas']])
[['ita', 'vero'], ['quid', 'est', 'veritas']]
"""
new_X = []
for sentence in string_matrix:
data_row = [] # type: List[str]
for word in sentence:
if ' ' in word:
data_row += word.split()
else:
data_row.append(word)
new_X.append(data_row)
return new_X |
def write_skills_data(self, data=None):
""" Write skills data hash if it has been modified. """
data = data or self.skills_data
if skills_data_hash(data) != self.skills_data_hash:
write_skills_data(data)
self.skills_data_hash = skills_data_hash(data) | Write skills data hash if it has been modified. | Below is the the instruction that describes the task:
### Input:
Write skills data hash if it has been modified.
### Response:
def write_skills_data(self, data=None):
""" Write skills data hash if it has been modified. """
data = data or self.skills_data
if skills_data_hash(data) != self.skills_data_hash:
write_skills_data(data)
self.skills_data_hash = skills_data_hash(data) |
def _get_expanded_active_specs(specs):
"""
This function removes any unnecessary bundles, apps, libs, and services that aren't needed by
the activated_bundles. It also expands inside specs.apps.depends.libs all libs that are needed
indirectly by each app
"""
_filter_active(constants.CONFIG_BUNDLES_KEY, specs)
_filter_active('apps', specs)
_expand_libs_in_apps(specs)
_filter_active('libs', specs)
_filter_active('services', specs)
_add_active_assets(specs) | This function removes any unnecessary bundles, apps, libs, and services that aren't needed by
the activated_bundles. It also expands inside specs.apps.depends.libs all libs that are needed
indirectly by each app | Below is the the instruction that describes the task:
### Input:
This function removes any unnecessary bundles, apps, libs, and services that aren't needed by
the activated_bundles. It also expands inside specs.apps.depends.libs all libs that are needed
indirectly by each app
### Response:
def _get_expanded_active_specs(specs):
"""
This function removes any unnecessary bundles, apps, libs, and services that aren't needed by
the activated_bundles. It also expands inside specs.apps.depends.libs all libs that are needed
indirectly by each app
"""
_filter_active(constants.CONFIG_BUNDLES_KEY, specs)
_filter_active('apps', specs)
_expand_libs_in_apps(specs)
_filter_active('libs', specs)
_filter_active('services', specs)
_add_active_assets(specs) |
def destroy(self, uuid):
"""
Destroy a kvm domain by uuid
:param uuid: uuid of the kvm container (same as the used in create)
:return:
"""
args = {
'uuid': uuid,
}
self._domain_action_chk.check(args)
self._client.sync('kvm.destroy', args) | Destroy a kvm domain by uuid
:param uuid: uuid of the kvm container (same as the used in create)
:return: | Below is the the instruction that describes the task:
### Input:
Destroy a kvm domain by uuid
:param uuid: uuid of the kvm container (same as the used in create)
:return:
### Response:
def destroy(self, uuid):
"""
Destroy a kvm domain by uuid
:param uuid: uuid of the kvm container (same as the used in create)
:return:
"""
args = {
'uuid': uuid,
}
self._domain_action_chk.check(args)
self._client.sync('kvm.destroy', args) |
def remote_pdb_handler(signum, frame):
""" Handler to drop us into a remote debugger upon receiving SIGUSR1 """
try:
from remote_pdb import RemotePdb
rdb = RemotePdb(host="127.0.0.1", port=0)
rdb.set_trace(frame=frame)
except ImportError:
log.warning(
"remote_pdb unavailable. Please install remote_pdb to "
"allow remote debugging."
)
# Restore signal handler for later
signal.signal(signum, remote_pdb_handler) | Handler to drop us into a remote debugger upon receiving SIGUSR1 | Below is the the instruction that describes the task:
### Input:
Handler to drop us into a remote debugger upon receiving SIGUSR1
### Response:
def remote_pdb_handler(signum, frame):
""" Handler to drop us into a remote debugger upon receiving SIGUSR1 """
try:
from remote_pdb import RemotePdb
rdb = RemotePdb(host="127.0.0.1", port=0)
rdb.set_trace(frame=frame)
except ImportError:
log.warning(
"remote_pdb unavailable. Please install remote_pdb to "
"allow remote debugging."
)
# Restore signal handler for later
signal.signal(signum, remote_pdb_handler) |
def get_job_collection(self, cloud_service_id, job_collection_id):
'''
The Get Job Collection operation gets the details of a job collection
cloud_service_id:
The cloud service id
job_collection_id:
Name of the hosted service.
'''
_validate_not_none('cloud_service_id', cloud_service_id)
_validate_not_none('job_collection_id', job_collection_id)
path = self._get_job_collection_path(
cloud_service_id, job_collection_id)
return self._perform_get(path, Resource) | The Get Job Collection operation gets the details of a job collection
cloud_service_id:
The cloud service id
job_collection_id:
Name of the hosted service. | Below is the the instruction that describes the task:
### Input:
The Get Job Collection operation gets the details of a job collection
cloud_service_id:
The cloud service id
job_collection_id:
Name of the hosted service.
### Response:
def get_job_collection(self, cloud_service_id, job_collection_id):
'''
The Get Job Collection operation gets the details of a job collection
cloud_service_id:
The cloud service id
job_collection_id:
Name of the hosted service.
'''
_validate_not_none('cloud_service_id', cloud_service_id)
_validate_not_none('job_collection_id', job_collection_id)
path = self._get_job_collection_path(
cloud_service_id, job_collection_id)
return self._perform_get(path, Resource) |
def bpopmax(self, timeout=0):
"""
Atomically remove the highest-scoring item from the set, blocking until
an item becomes available or timeout is reached (0 for no timeout,
default).
Returns a 2-tuple of (item, score).
"""
res = self.database.bzpopmax(self.key, timeout)
if res is not None:
return (res[1], res[2]) | Atomically remove the highest-scoring item from the set, blocking until
an item becomes available or timeout is reached (0 for no timeout,
default).
Returns a 2-tuple of (item, score). | Below is the the instruction that describes the task:
### Input:
Atomically remove the highest-scoring item from the set, blocking until
an item becomes available or timeout is reached (0 for no timeout,
default).
Returns a 2-tuple of (item, score).
### Response:
def bpopmax(self, timeout=0):
"""
Atomically remove the highest-scoring item from the set, blocking until
an item becomes available or timeout is reached (0 for no timeout,
default).
Returns a 2-tuple of (item, score).
"""
res = self.database.bzpopmax(self.key, timeout)
if res is not None:
return (res[1], res[2]) |
def from_http_status(status_code, message, **kwargs):
"""Create a :class:`GoogleAPICallError` from an HTTP status code.
Args:
status_code (int): The HTTP status code.
message (str): The exception message.
kwargs: Additional arguments passed to the :class:`GoogleAPICallError`
constructor.
Returns:
GoogleAPICallError: An instance of the appropriate subclass of
:class:`GoogleAPICallError`.
"""
error_class = exception_class_for_http_status(status_code)
error = error_class(message, **kwargs)
if error.code is None:
error.code = status_code
return error | Create a :class:`GoogleAPICallError` from an HTTP status code.
Args:
status_code (int): The HTTP status code.
message (str): The exception message.
kwargs: Additional arguments passed to the :class:`GoogleAPICallError`
constructor.
Returns:
GoogleAPICallError: An instance of the appropriate subclass of
:class:`GoogleAPICallError`. | Below is the the instruction that describes the task:
### Input:
Create a :class:`GoogleAPICallError` from an HTTP status code.
Args:
status_code (int): The HTTP status code.
message (str): The exception message.
kwargs: Additional arguments passed to the :class:`GoogleAPICallError`
constructor.
Returns:
GoogleAPICallError: An instance of the appropriate subclass of
:class:`GoogleAPICallError`.
### Response:
def from_http_status(status_code, message, **kwargs):
"""Create a :class:`GoogleAPICallError` from an HTTP status code.
Args:
status_code (int): The HTTP status code.
message (str): The exception message.
kwargs: Additional arguments passed to the :class:`GoogleAPICallError`
constructor.
Returns:
GoogleAPICallError: An instance of the appropriate subclass of
:class:`GoogleAPICallError`.
"""
error_class = exception_class_for_http_status(status_code)
error = error_class(message, **kwargs)
if error.code is None:
error.code = status_code
return error |
def sync_status(self):
"""Synchronize DOI status DataCite MDS.
:returns: `True` if is sync successfully.
"""
status = None
try:
try:
self.api.doi_get(self.pid.pid_value)
status = PIDStatus.REGISTERED
except DataCiteGoneError:
status = PIDStatus.DELETED
except DataCiteNoContentError:
status = PIDStatus.REGISTERED
except DataCiteNotFoundError:
pass
if status is None:
try:
self.api.metadata_get(self.pid.pid_value)
status = PIDStatus.RESERVED
except DataCiteGoneError:
status = PIDStatus.DELETED
except DataCiteNoContentError:
status = PIDStatus.REGISTERED
except DataCiteNotFoundError:
pass
except (DataCiteError, HttpError):
logger.exception("Failed to sync status from DataCite",
extra=dict(pid=self.pid))
raise
if status is None:
status = PIDStatus.NEW
self.pid.sync_status(status)
logger.info("Successfully synced status from DataCite",
extra=dict(pid=self.pid))
return True | Synchronize DOI status DataCite MDS.
:returns: `True` if is sync successfully. | Below is the the instruction that describes the task:
### Input:
Synchronize DOI status DataCite MDS.
:returns: `True` if is sync successfully.
### Response:
def sync_status(self):
"""Synchronize DOI status DataCite MDS.
:returns: `True` if is sync successfully.
"""
status = None
try:
try:
self.api.doi_get(self.pid.pid_value)
status = PIDStatus.REGISTERED
except DataCiteGoneError:
status = PIDStatus.DELETED
except DataCiteNoContentError:
status = PIDStatus.REGISTERED
except DataCiteNotFoundError:
pass
if status is None:
try:
self.api.metadata_get(self.pid.pid_value)
status = PIDStatus.RESERVED
except DataCiteGoneError:
status = PIDStatus.DELETED
except DataCiteNoContentError:
status = PIDStatus.REGISTERED
except DataCiteNotFoundError:
pass
except (DataCiteError, HttpError):
logger.exception("Failed to sync status from DataCite",
extra=dict(pid=self.pid))
raise
if status is None:
status = PIDStatus.NEW
self.pid.sync_status(status)
logger.info("Successfully synced status from DataCite",
extra=dict(pid=self.pid))
return True |
def setActions( self, actions ):
"""
Sets the list of actions that will be used for this shortcut dialog \
when editing.
:param actions | [<QAction>, ..]
"""
self.uiActionTREE.blockSignals(True)
self.uiActionTREE.setUpdatesEnabled(False)
self.uiActionTREE.clear()
for action in actions:
self.uiActionTREE.addTopLevelItem(ActionItem(action))
self.uiActionTREE.sortByColumn(0, Qt.AscendingOrder)
self.uiActionTREE.blockSignals(False)
self.uiActionTREE.setUpdatesEnabled(True) | Sets the list of actions that will be used for this shortcut dialog \
when editing.
:param actions | [<QAction>, ..] | Below is the the instruction that describes the task:
### Input:
Sets the list of actions that will be used for this shortcut dialog \
when editing.
:param actions | [<QAction>, ..]
### Response:
def setActions( self, actions ):
"""
Sets the list of actions that will be used for this shortcut dialog \
when editing.
:param actions | [<QAction>, ..]
"""
self.uiActionTREE.blockSignals(True)
self.uiActionTREE.setUpdatesEnabled(False)
self.uiActionTREE.clear()
for action in actions:
self.uiActionTREE.addTopLevelItem(ActionItem(action))
self.uiActionTREE.sortByColumn(0, Qt.AscendingOrder)
self.uiActionTREE.blockSignals(False)
self.uiActionTREE.setUpdatesEnabled(True) |
def set_fallback_resolution(self, x_pixels_per_inch, y_pixels_per_inch):
"""
Set the horizontal and vertical resolution for image fallbacks.
When certain operations aren't supported natively by a backend,
cairo will fallback by rendering operations to an image
and then overlaying that image onto the output.
For backends that are natively vector-oriented,
this method can be used to set the resolution
used for these image fallbacks,
(larger values will result in more detailed images,
but also larger file sizes).
Some examples of natively vector-oriented backends are
the ps, pdf, and svg backends.
For backends that are natively raster-oriented,
image fallbacks are still possible,
but they are always performed at the native device resolution.
So this method has no effect on those backends.
.. note::
The fallback resolution only takes effect
at the time of completing a page
(with :meth:`show_page` or :meth:`copy_page`)
so there is currently no way to have
more than one fallback resolution in effect on a single page.
The default fallback resoultion is
300 pixels per inch in both dimensions.
:param x_pixels_per_inch: horizontal resolution in pixels per inch
:type x_pixels_per_inch: float
:param y_pixels_per_inch: vertical resolution in pixels per inch
:type y_pixels_per_inch: float
"""
cairo.cairo_surface_set_fallback_resolution(
self._pointer, x_pixels_per_inch, y_pixels_per_inch)
self._check_status() | Set the horizontal and vertical resolution for image fallbacks.
When certain operations aren't supported natively by a backend,
cairo will fallback by rendering operations to an image
and then overlaying that image onto the output.
For backends that are natively vector-oriented,
this method can be used to set the resolution
used for these image fallbacks,
(larger values will result in more detailed images,
but also larger file sizes).
Some examples of natively vector-oriented backends are
the ps, pdf, and svg backends.
For backends that are natively raster-oriented,
image fallbacks are still possible,
but they are always performed at the native device resolution.
So this method has no effect on those backends.
.. note::
The fallback resolution only takes effect
at the time of completing a page
(with :meth:`show_page` or :meth:`copy_page`)
so there is currently no way to have
more than one fallback resolution in effect on a single page.
The default fallback resoultion is
300 pixels per inch in both dimensions.
:param x_pixels_per_inch: horizontal resolution in pixels per inch
:type x_pixels_per_inch: float
:param y_pixels_per_inch: vertical resolution in pixels per inch
:type y_pixels_per_inch: float | Below is the the instruction that describes the task:
### Input:
Set the horizontal and vertical resolution for image fallbacks.
When certain operations aren't supported natively by a backend,
cairo will fallback by rendering operations to an image
and then overlaying that image onto the output.
For backends that are natively vector-oriented,
this method can be used to set the resolution
used for these image fallbacks,
(larger values will result in more detailed images,
but also larger file sizes).
Some examples of natively vector-oriented backends are
the ps, pdf, and svg backends.
For backends that are natively raster-oriented,
image fallbacks are still possible,
but they are always performed at the native device resolution.
So this method has no effect on those backends.
.. note::
The fallback resolution only takes effect
at the time of completing a page
(with :meth:`show_page` or :meth:`copy_page`)
so there is currently no way to have
more than one fallback resolution in effect on a single page.
The default fallback resoultion is
300 pixels per inch in both dimensions.
:param x_pixels_per_inch: horizontal resolution in pixels per inch
:type x_pixels_per_inch: float
:param y_pixels_per_inch: vertical resolution in pixels per inch
:type y_pixels_per_inch: float
### Response:
def set_fallback_resolution(self, x_pixels_per_inch, y_pixels_per_inch):
"""
Set the horizontal and vertical resolution for image fallbacks.
When certain operations aren't supported natively by a backend,
cairo will fallback by rendering operations to an image
and then overlaying that image onto the output.
For backends that are natively vector-oriented,
this method can be used to set the resolution
used for these image fallbacks,
(larger values will result in more detailed images,
but also larger file sizes).
Some examples of natively vector-oriented backends are
the ps, pdf, and svg backends.
For backends that are natively raster-oriented,
image fallbacks are still possible,
but they are always performed at the native device resolution.
So this method has no effect on those backends.
.. note::
The fallback resolution only takes effect
at the time of completing a page
(with :meth:`show_page` or :meth:`copy_page`)
so there is currently no way to have
more than one fallback resolution in effect on a single page.
The default fallback resoultion is
300 pixels per inch in both dimensions.
:param x_pixels_per_inch: horizontal resolution in pixels per inch
:type x_pixels_per_inch: float
:param y_pixels_per_inch: vertical resolution in pixels per inch
:type y_pixels_per_inch: float
"""
cairo.cairo_surface_set_fallback_resolution(
self._pointer, x_pixels_per_inch, y_pixels_per_inch)
self._check_status() |
def sync_request(self, command, payload, retry=2):
"""Request data."""
loop = asyncio.get_event_loop()
task = loop.create_task(self.request(command, payload, retry))
return loop.run_until_complete(task) | Request data. | Below is the the instruction that describes the task:
### Input:
Request data.
### Response:
def sync_request(self, command, payload, retry=2):
"""Request data."""
loop = asyncio.get_event_loop()
task = loop.create_task(self.request(command, payload, retry))
return loop.run_until_complete(task) |
def _compute_vline_scores(self):
"""Does the hard work to prepare ``vline_score``.
"""
M, N, L = self.M, self.N, self.L
vline_score = {}
for x in range(M):
laststart = [0 if (x, 0, 1, k) in self else None for k in range(L)]
for y in range(N):
block = [0] * (y + 1)
for k in range(L):
if (x, y, 1, k) not in self:
laststart[k] = None
elif laststart[k] is None:
laststart[k] = y
block[y] += 1
elif y and (x, y, 1, k) not in self[x, y - 1, 1, k]:
laststart[k] = y
else:
for y1 in range(laststart[k], y + 1):
block[y1] += 1
for y1 in range(y + 1):
vline_score[x, y1, y] = block[y1]
self._vline_score = vline_score | Does the hard work to prepare ``vline_score``. | Below is the the instruction that describes the task:
### Input:
Does the hard work to prepare ``vline_score``.
### Response:
def _compute_vline_scores(self):
"""Does the hard work to prepare ``vline_score``.
"""
M, N, L = self.M, self.N, self.L
vline_score = {}
for x in range(M):
laststart = [0 if (x, 0, 1, k) in self else None for k in range(L)]
for y in range(N):
block = [0] * (y + 1)
for k in range(L):
if (x, y, 1, k) not in self:
laststart[k] = None
elif laststart[k] is None:
laststart[k] = y
block[y] += 1
elif y and (x, y, 1, k) not in self[x, y - 1, 1, k]:
laststart[k] = y
else:
for y1 in range(laststart[k], y + 1):
block[y1] += 1
for y1 in range(y + 1):
vline_score[x, y1, y] = block[y1]
self._vline_score = vline_score |
def put(self, filename, handle):
"""
Upload a distribution archive to the configured Amazon S3 bucket.
If the :attr:`~.Config.s3_cache_readonly` configuration option is
enabled this method does nothing.
:param filename: The filename of the distribution archive (a string).
:param handle: A file-like object that provides access to the
distribution archive.
:raises: :exc:`.CacheBackendError` when any underlying method fails.
"""
if self.config.s3_cache_readonly:
logger.info('Skipping upload to S3 bucket (using S3 in read only mode).')
else:
timer = Timer()
self.check_prerequisites()
with PatchedBotoConfig():
from boto.s3.key import Key
raw_key = self.get_cache_key(filename)
logger.info("Uploading distribution archive to S3 bucket: %s", raw_key)
key = Key(self.s3_bucket)
key.key = raw_key
try:
key.set_contents_from_file(handle)
except Exception as e:
logger.info("Encountered error writing to S3 bucket, "
"falling back to read only mode (exception: %s)", e)
self.config.s3_cache_readonly = True
else:
logger.info("Finished uploading distribution archive to S3 bucket in %s.", timer) | Upload a distribution archive to the configured Amazon S3 bucket.
If the :attr:`~.Config.s3_cache_readonly` configuration option is
enabled this method does nothing.
:param filename: The filename of the distribution archive (a string).
:param handle: A file-like object that provides access to the
distribution archive.
:raises: :exc:`.CacheBackendError` when any underlying method fails. | Below is the the instruction that describes the task:
### Input:
Upload a distribution archive to the configured Amazon S3 bucket.
If the :attr:`~.Config.s3_cache_readonly` configuration option is
enabled this method does nothing.
:param filename: The filename of the distribution archive (a string).
:param handle: A file-like object that provides access to the
distribution archive.
:raises: :exc:`.CacheBackendError` when any underlying method fails.
### Response:
def put(self, filename, handle):
"""
Upload a distribution archive to the configured Amazon S3 bucket.
If the :attr:`~.Config.s3_cache_readonly` configuration option is
enabled this method does nothing.
:param filename: The filename of the distribution archive (a string).
:param handle: A file-like object that provides access to the
distribution archive.
:raises: :exc:`.CacheBackendError` when any underlying method fails.
"""
if self.config.s3_cache_readonly:
logger.info('Skipping upload to S3 bucket (using S3 in read only mode).')
else:
timer = Timer()
self.check_prerequisites()
with PatchedBotoConfig():
from boto.s3.key import Key
raw_key = self.get_cache_key(filename)
logger.info("Uploading distribution archive to S3 bucket: %s", raw_key)
key = Key(self.s3_bucket)
key.key = raw_key
try:
key.set_contents_from_file(handle)
except Exception as e:
logger.info("Encountered error writing to S3 bucket, "
"falling back to read only mode (exception: %s)", e)
self.config.s3_cache_readonly = True
else:
logger.info("Finished uploading distribution archive to S3 bucket in %s.", timer) |
async def _send_sleep(self, request: Request, stack: Stack):
"""
Sleep for the amount of time specified in the Sleep layer
"""
duration = stack.get_layer(lyr.Sleep).duration
await sleep(duration) | Sleep for the amount of time specified in the Sleep layer | Below is the the instruction that describes the task:
### Input:
Sleep for the amount of time specified in the Sleep layer
### Response:
async def _send_sleep(self, request: Request, stack: Stack):
"""
Sleep for the amount of time specified in the Sleep layer
"""
duration = stack.get_layer(lyr.Sleep).duration
await sleep(duration) |
def load(self, shapefile=None):
"""Opens a shapefile from a filename or file-like
object. Normally this method would be called by the
constructor with the file name as an argument."""
if shapefile:
(shapeName, ext) = os.path.splitext(shapefile)
self.shapeName = shapeName
self.load_shp(shapeName)
self.load_shx(shapeName)
self.load_dbf(shapeName)
if not (self.shp or self.dbf):
raise ShapefileException("Unable to open %s.dbf or %s.shp." % (shapeName, shapeName))
if self.shp:
self.__shpHeader()
if self.dbf:
self.__dbfHeader() | Opens a shapefile from a filename or file-like
object. Normally this method would be called by the
constructor with the file name as an argument. | Below is the the instruction that describes the task:
### Input:
Opens a shapefile from a filename or file-like
object. Normally this method would be called by the
constructor with the file name as an argument.
### Response:
def load(self, shapefile=None):
"""Opens a shapefile from a filename or file-like
object. Normally this method would be called by the
constructor with the file name as an argument."""
if shapefile:
(shapeName, ext) = os.path.splitext(shapefile)
self.shapeName = shapeName
self.load_shp(shapeName)
self.load_shx(shapeName)
self.load_dbf(shapeName)
if not (self.shp or self.dbf):
raise ShapefileException("Unable to open %s.dbf or %s.shp." % (shapeName, shapeName))
if self.shp:
self.__shpHeader()
if self.dbf:
self.__dbfHeader() |
def get_call_signature(fn: FunctionType,
args: ArgsType,
kwargs: KwargsType,
debug_cache: bool = False) -> str:
"""
Takes a function and its args/kwargs, and produces a string description
of the function call (the call signature) suitable for use indirectly as a
cache key. The string is a JSON representation. See ``make_cache_key`` for
a more suitable actual cache key.
"""
# Note that the function won't have the __self__ argument (as in
# fn.__self__), at this point, even if it's a member function.
try:
call_sig = json_encode((fn.__qualname__, args, kwargs))
except TypeError:
log.critical(
"\nTo decorate using @django_cache_function without specifying "
"cache_key, the decorated function's owning class and its "
"parameters must be JSON-serializable (see jsonfunc.py, "
"django_cache_fn.py).\n")
raise
if debug_cache:
log.debug("Making call signature {!r}", call_sig)
return call_sig | Takes a function and its args/kwargs, and produces a string description
of the function call (the call signature) suitable for use indirectly as a
cache key. The string is a JSON representation. See ``make_cache_key`` for
a more suitable actual cache key. | Below is the the instruction that describes the task:
### Input:
Takes a function and its args/kwargs, and produces a string description
of the function call (the call signature) suitable for use indirectly as a
cache key. The string is a JSON representation. See ``make_cache_key`` for
a more suitable actual cache key.
### Response:
def get_call_signature(fn: FunctionType,
args: ArgsType,
kwargs: KwargsType,
debug_cache: bool = False) -> str:
"""
Takes a function and its args/kwargs, and produces a string description
of the function call (the call signature) suitable for use indirectly as a
cache key. The string is a JSON representation. See ``make_cache_key`` for
a more suitable actual cache key.
"""
# Note that the function won't have the __self__ argument (as in
# fn.__self__), at this point, even if it's a member function.
try:
call_sig = json_encode((fn.__qualname__, args, kwargs))
except TypeError:
log.critical(
"\nTo decorate using @django_cache_function without specifying "
"cache_key, the decorated function's owning class and its "
"parameters must be JSON-serializable (see jsonfunc.py, "
"django_cache_fn.py).\n")
raise
if debug_cache:
log.debug("Making call signature {!r}", call_sig)
return call_sig |
def query(query, use_sudo=True, **kwargs):
"""
Run a MySQL query.
"""
func = use_sudo and run_as_root or run
user = kwargs.get('mysql_user') or env.get('mysql_user')
password = kwargs.get('mysql_password') or env.get('mysql_password')
options = [
'--batch',
'--raw',
'--skip-column-names',
]
if user:
options.append('--user=%s' % quote(user))
if password:
options.append('--password=%s' % quote(password))
options = ' '.join(options)
return func('mysql %(options)s --execute=%(query)s' % {
'options': options,
'query': quote(query),
}) | Run a MySQL query. | Below is the the instruction that describes the task:
### Input:
Run a MySQL query.
### Response:
def query(query, use_sudo=True, **kwargs):
"""
Run a MySQL query.
"""
func = use_sudo and run_as_root or run
user = kwargs.get('mysql_user') or env.get('mysql_user')
password = kwargs.get('mysql_password') or env.get('mysql_password')
options = [
'--batch',
'--raw',
'--skip-column-names',
]
if user:
options.append('--user=%s' % quote(user))
if password:
options.append('--password=%s' % quote(password))
options = ' '.join(options)
return func('mysql %(options)s --execute=%(query)s' % {
'options': options,
'query': quote(query),
}) |
def generation_time(self):
"""
The generation time as set by Yamcs.
:type: :class:`~datetime.datetime`
"""
entry = self._proto.commandQueueEntry
if entry.HasField('generationTimeUTC'):
return parse_isostring(entry.generationTimeUTC)
return None | The generation time as set by Yamcs.
:type: :class:`~datetime.datetime` | Below is the the instruction that describes the task:
### Input:
The generation time as set by Yamcs.
:type: :class:`~datetime.datetime`
### Response:
def generation_time(self):
"""
The generation time as set by Yamcs.
:type: :class:`~datetime.datetime`
"""
entry = self._proto.commandQueueEntry
if entry.HasField('generationTimeUTC'):
return parse_isostring(entry.generationTimeUTC)
return None |
def get_include_path():
""" Default include path using a tricky sys
calls.
"""
f1 = os.path.basename(sys.argv[0]).lower() # script filename
f2 = os.path.basename(sys.executable).lower() # Executable filename
# If executable filename and script name are the same, we are
if f1 == f2 or f2 == f1 + '.exe': # under a "compiled" python binary
result = os.path.dirname(os.path.realpath(sys.executable))
else:
result = os.path.dirname(os.path.realpath(__file__))
return result | Default include path using a tricky sys
calls. | Below is the the instruction that describes the task:
### Input:
Default include path using a tricky sys
calls.
### Response:
def get_include_path():
""" Default include path using a tricky sys
calls.
"""
f1 = os.path.basename(sys.argv[0]).lower() # script filename
f2 = os.path.basename(sys.executable).lower() # Executable filename
# If executable filename and script name are the same, we are
if f1 == f2 or f2 == f1 + '.exe': # under a "compiled" python binary
result = os.path.dirname(os.path.realpath(sys.executable))
else:
result = os.path.dirname(os.path.realpath(__file__))
return result |
async def post_data(self, path, data=None, headers=None, timeout=None):
"""Perform a POST request."""
url = self.base_url + path
_LOGGER.debug('POST URL: %s', url)
self._log_data(data, False)
resp = None
try:
resp = await self._session.post(
url, headers=headers, data=data,
timeout=DEFAULT_TIMEOUT if timeout is None else timeout)
if resp.content_length is not None:
resp_data = await resp.read()
else:
resp_data = None
self._log_data(resp_data, True)
return resp_data, resp.status
except Exception as ex:
if resp is not None:
resp.close()
raise ex
finally:
if resp is not None:
await resp.release() | Perform a POST request. | Below is the the instruction that describes the task:
### Input:
Perform a POST request.
### Response:
async def post_data(self, path, data=None, headers=None, timeout=None):
"""Perform a POST request."""
url = self.base_url + path
_LOGGER.debug('POST URL: %s', url)
self._log_data(data, False)
resp = None
try:
resp = await self._session.post(
url, headers=headers, data=data,
timeout=DEFAULT_TIMEOUT if timeout is None else timeout)
if resp.content_length is not None:
resp_data = await resp.read()
else:
resp_data = None
self._log_data(resp_data, True)
return resp_data, resp.status
except Exception as ex:
if resp is not None:
resp.close()
raise ex
finally:
if resp is not None:
await resp.release() |
def fetch_replace_restriction(self, ):
"""Fetch whether unloading is restricted
:returns: True, if unloading is restricted
:rtype: :class:`bool`
:raises: None
"""
inter = self.get_refobjinter()
restricted = self.status() is None
return restricted or inter.fetch_action_restriction(self, 'replace') | Fetch whether unloading is restricted
:returns: True, if unloading is restricted
:rtype: :class:`bool`
:raises: None | Below is the the instruction that describes the task:
### Input:
Fetch whether unloading is restricted
:returns: True, if unloading is restricted
:rtype: :class:`bool`
:raises: None
### Response:
def fetch_replace_restriction(self, ):
"""Fetch whether unloading is restricted
:returns: True, if unloading is restricted
:rtype: :class:`bool`
:raises: None
"""
inter = self.get_refobjinter()
restricted = self.status() is None
return restricted or inter.fetch_action_restriction(self, 'replace') |
def timestring(self, pattern="%Y-%m-%d %H:%M:%S", timezone=None):
"""Returns a time string.
:param pattern = "%Y-%m-%d %H:%M:%S"
The format used. By default, an ISO-type format is used. The
syntax here is identical to the one used by time.strftime() and
time.strptime().
:param timezone = self.timezone
The timezone (in seconds west of UTC) to return the value in. By
default, the timezone used when constructing the class is used
(local one by default). To use UTC, use timezone = 0. To use the
local tz, use timezone = chronyk.LOCALTZ.
"""
if timezone is None:
timezone = self.timezone
timestamp = self.__timestamp__ - timezone
timestamp -= LOCALTZ
return _strftime(pattern, _gmtime(timestamp)) | Returns a time string.
:param pattern = "%Y-%m-%d %H:%M:%S"
The format used. By default, an ISO-type format is used. The
syntax here is identical to the one used by time.strftime() and
time.strptime().
:param timezone = self.timezone
The timezone (in seconds west of UTC) to return the value in. By
default, the timezone used when constructing the class is used
(local one by default). To use UTC, use timezone = 0. To use the
local tz, use timezone = chronyk.LOCALTZ. | Below is the the instruction that describes the task:
### Input:
Returns a time string.
:param pattern = "%Y-%m-%d %H:%M:%S"
The format used. By default, an ISO-type format is used. The
syntax here is identical to the one used by time.strftime() and
time.strptime().
:param timezone = self.timezone
The timezone (in seconds west of UTC) to return the value in. By
default, the timezone used when constructing the class is used
(local one by default). To use UTC, use timezone = 0. To use the
local tz, use timezone = chronyk.LOCALTZ.
### Response:
def timestring(self, pattern="%Y-%m-%d %H:%M:%S", timezone=None):
"""Returns a time string.
:param pattern = "%Y-%m-%d %H:%M:%S"
The format used. By default, an ISO-type format is used. The
syntax here is identical to the one used by time.strftime() and
time.strptime().
:param timezone = self.timezone
The timezone (in seconds west of UTC) to return the value in. By
default, the timezone used when constructing the class is used
(local one by default). To use UTC, use timezone = 0. To use the
local tz, use timezone = chronyk.LOCALTZ.
"""
if timezone is None:
timezone = self.timezone
timestamp = self.__timestamp__ - timezone
timestamp -= LOCALTZ
return _strftime(pattern, _gmtime(timestamp)) |
def from_html_one(html_code, **kwargs):
"""
Generates a PrettyTables from a string of HTML code which contains only a
single <table>
"""
tables = from_html(html_code, **kwargs)
try:
assert len(tables) == 1
except AssertionError:
raise Exception("More than one <table> in provided HTML code! Use from_html instead.")
return tables[0] | Generates a PrettyTables from a string of HTML code which contains only a
single <table> | Below is the the instruction that describes the task:
### Input:
Generates a PrettyTables from a string of HTML code which contains only a
single <table>
### Response:
def from_html_one(html_code, **kwargs):
"""
Generates a PrettyTables from a string of HTML code which contains only a
single <table>
"""
tables = from_html(html_code, **kwargs)
try:
assert len(tables) == 1
except AssertionError:
raise Exception("More than one <table> in provided HTML code! Use from_html instead.")
return tables[0] |
def get_corrections_dict(self, entry):
"""
Returns the corrections applied to a particular entry.
Args:
entry: A ComputedEntry object.
Returns:
({correction_name: value})
"""
corrections = {}
for c in self.corrections:
val = c.get_correction(entry)
if val != 0:
corrections[str(c)] = val
return corrections | Returns the corrections applied to a particular entry.
Args:
entry: A ComputedEntry object.
Returns:
({correction_name: value}) | Below is the the instruction that describes the task:
### Input:
Returns the corrections applied to a particular entry.
Args:
entry: A ComputedEntry object.
Returns:
({correction_name: value})
### Response:
def get_corrections_dict(self, entry):
"""
Returns the corrections applied to a particular entry.
Args:
entry: A ComputedEntry object.
Returns:
({correction_name: value})
"""
corrections = {}
for c in self.corrections:
val = c.get_correction(entry)
if val != 0:
corrections[str(c)] = val
return corrections |
def change_owner(self, new_owner):
"""
Changes ownership of an organization.
"""
old_owner = self.owner.organization_user
self.owner.organization_user = new_owner
self.owner.save()
# Owner changed signal
owner_changed.send(sender=self, old=old_owner, new=new_owner) | Changes ownership of an organization. | Below is the the instruction that describes the task:
### Input:
Changes ownership of an organization.
### Response:
def change_owner(self, new_owner):
"""
Changes ownership of an organization.
"""
old_owner = self.owner.organization_user
self.owner.organization_user = new_owner
self.owner.save()
# Owner changed signal
owner_changed.send(sender=self, old=old_owner, new=new_owner) |
def add(self, pattern_txt):
"""Add a pattern to the list.
Args:
pattern_txt (str list): the pattern, as a list of lines.
"""
self.patterns[len(pattern_txt)] = pattern_txt
low = 0
high = len(pattern_txt) - 1
while not pattern_txt[low]:
low += 1
while not pattern_txt[high]:
high -= 1
min_pattern = pattern_txt[low:high + 1]
self.min_patterns[len(min_pattern)] = min_pattern | Add a pattern to the list.
Args:
pattern_txt (str list): the pattern, as a list of lines. | Below is the the instruction that describes the task:
### Input:
Add a pattern to the list.
Args:
pattern_txt (str list): the pattern, as a list of lines.
### Response:
def add(self, pattern_txt):
"""Add a pattern to the list.
Args:
pattern_txt (str list): the pattern, as a list of lines.
"""
self.patterns[len(pattern_txt)] = pattern_txt
low = 0
high = len(pattern_txt) - 1
while not pattern_txt[low]:
low += 1
while not pattern_txt[high]:
high -= 1
min_pattern = pattern_txt[low:high + 1]
self.min_patterns[len(min_pattern)] = min_pattern |
def gt(self, other, axis="columns", level=None):
"""Checks element-wise that this is greater than other.
Args:
other: A DataFrame or Series or scalar to compare to.
axis: The axis to perform the gt over.
level: The Multilevel index level to apply gt over.
Returns:
A new DataFrame filled with Booleans.
"""
return self._binary_op("gt", other, axis=axis, level=level) | Checks element-wise that this is greater than other.
Args:
other: A DataFrame or Series or scalar to compare to.
axis: The axis to perform the gt over.
level: The Multilevel index level to apply gt over.
Returns:
A new DataFrame filled with Booleans. | Below is the the instruction that describes the task:
### Input:
Checks element-wise that this is greater than other.
Args:
other: A DataFrame or Series or scalar to compare to.
axis: The axis to perform the gt over.
level: The Multilevel index level to apply gt over.
Returns:
A new DataFrame filled with Booleans.
### Response:
def gt(self, other, axis="columns", level=None):
"""Checks element-wise that this is greater than other.
Args:
other: A DataFrame or Series or scalar to compare to.
axis: The axis to perform the gt over.
level: The Multilevel index level to apply gt over.
Returns:
A new DataFrame filled with Booleans.
"""
return self._binary_op("gt", other, axis=axis, level=level) |
def save_figure(self,event=None, transparent=False, dpi=600):
""" save figure image to file"""
if self.panel is not None:
self.panel.save_figure(event=event,
transparent=transparent, dpi=dpi) | save figure image to file | Below is the the instruction that describes the task:
### Input:
save figure image to file
### Response:
def save_figure(self,event=None, transparent=False, dpi=600):
""" save figure image to file"""
if self.panel is not None:
self.panel.save_figure(event=event,
transparent=transparent, dpi=dpi) |
def get_account(config, environment, stage=None):
"""Find environment name in config object and return AWS account."""
if environment is None and stage:
environment = get_environment(config, stage)
account = None
for env in config.get('environments', []):
if env.get('name') == environment:
account = env.get('account')
role = env.get('role')
username = os.environ.get(env.get('rs_username_var')) \
if env.get('rs_username_var') else None
apikey = os.environ.get(env.get('rs_apikey_var')) \
if env.get('rs_apikey_var') else None
if not account:
sys.exit(ACCT_NOT_FOUND_ERROR.format(environment))
return account, role, username, apikey | Find environment name in config object and return AWS account. | Below is the the instruction that describes the task:
### Input:
Find environment name in config object and return AWS account.
### Response:
def get_account(config, environment, stage=None):
"""Find environment name in config object and return AWS account."""
if environment is None and stage:
environment = get_environment(config, stage)
account = None
for env in config.get('environments', []):
if env.get('name') == environment:
account = env.get('account')
role = env.get('role')
username = os.environ.get(env.get('rs_username_var')) \
if env.get('rs_username_var') else None
apikey = os.environ.get(env.get('rs_apikey_var')) \
if env.get('rs_apikey_var') else None
if not account:
sys.exit(ACCT_NOT_FOUND_ERROR.format(environment))
return account, role, username, apikey |
def load_amazon():
"""Amazon product co-purchasing network and ground-truth communities.
Network was collected by crawling Amazon website. It is based on Customers Who Bought
This Item Also Bought feature of the Amazon website. If a product i is frequently
co-purchased with product j, the graph contains an undirected edge from i to j.
Each product category provided by Amazon defines each ground-truth community.
"""
dataset_path = _load('amazon')
X = _load_csv(dataset_path, 'data')
y = X.pop('label').values
graph = nx.Graph(nx.read_gml(os.path.join(dataset_path, 'graph.gml')))
return Dataset(load_amazon.__doc__, X, y, normalized_mutual_info_score, graph=graph) | Amazon product co-purchasing network and ground-truth communities.
Network was collected by crawling Amazon website. It is based on Customers Who Bought
This Item Also Bought feature of the Amazon website. If a product i is frequently
co-purchased with product j, the graph contains an undirected edge from i to j.
Each product category provided by Amazon defines each ground-truth community. | Below is the the instruction that describes the task:
### Input:
Amazon product co-purchasing network and ground-truth communities.
Network was collected by crawling Amazon website. It is based on Customers Who Bought
This Item Also Bought feature of the Amazon website. If a product i is frequently
co-purchased with product j, the graph contains an undirected edge from i to j.
Each product category provided by Amazon defines each ground-truth community.
### Response:
def load_amazon():
"""Amazon product co-purchasing network and ground-truth communities.
Network was collected by crawling Amazon website. It is based on Customers Who Bought
This Item Also Bought feature of the Amazon website. If a product i is frequently
co-purchased with product j, the graph contains an undirected edge from i to j.
Each product category provided by Amazon defines each ground-truth community.
"""
dataset_path = _load('amazon')
X = _load_csv(dataset_path, 'data')
y = X.pop('label').values
graph = nx.Graph(nx.read_gml(os.path.join(dataset_path, 'graph.gml')))
return Dataset(load_amazon.__doc__, X, y, normalized_mutual_info_score, graph=graph) |
def cofold(self, strand1, strand2, temp=37.0, dangles=2, nolp=False,
nogu=False, noclosinggu=False, constraints=None,
canonicalbponly=False, partition=-1, pfscale=None, gquad=False):
'''Run the RNAcofold command and retrieve the result in a dictionary.
:param strand1: Strand 1 for running RNAcofold.
:type strand1: coral.DNA or coral.RNA
:param strand1: Strand 2 for running RNAcofold.
:type strand2: coral.DNA or coral.RNA
:param temp: Temperature at which to run the calculations.
:type temp: float
:param dangles: How to treat dangling end energies. Set to 0 to ignore
dangling ends. Set to 1 to limit unpaired bases to
at most one dangling end (default for MFE calc). Set to
2 (the default) to remove the limit in 1. Set to 3 to
allow coaxial stacking of adjacent helices in
.multi-loops
:type dangles: int
:param nolp: Produce structures without lonely pairs (isolated single
base pairs).
:type nolp: bool
:param nogu: Do not allow GU pairs.
:type nogu: bool
:param noclosinggu: Do not allow GU pairs at the end of helices.
:type noclosinggu: bool
:param constraints: Any structural constraints to use. Format is
defined at
http://www.tbi.univie.ac.at/RNA/RNAfold.1.html
:type constraints: str
:param canonicalbponly: Remove non-canonical base pairs from the
structure constraint (if applicable).
:type canonicalbponly: bool
:param partition: Calculates the partition function for the sequence.
:type partition: int
:param pfscale: Scaling factor for the partition function.
:type pfScale: float
:param gquad: Incorporate G-Quadruplex formation into the structure
prediction.
:type gquad: bool
:returns: Dictionary of calculated values, defaulting to values of MFE
('mfe': float) and dotbracket structure ('dotbracket': str).
More keys are added depending on keyword arguments.
:rtype: dict
'''
cmd_args = []
cmd_kwargs = {'--temp=': str(temp)}
cmd_kwargs['--dangles='] = dangles
if nolp:
cmd_args.append('--noLP')
if nogu:
cmd_args.append('--noGU')
if noclosinggu:
cmd_args.append('--noClosingGU')
if constraints is not None:
cmd_args.append('--constraint')
if canonicalbponly:
cmd_args.append('--canonicalBPonly')
if partition:
cmd_args.append('--partfunc')
if pfscale is not None:
cmd_kwargs['pfScale'] = float(pfscale)
if gquad:
cmd_args.append('--gquad')
inputs = ['>strands\n{}&{}'.format(str(strand1), str(strand2))]
if constraints is not None:
inputs.append(constraints)
rnafold_output = self._run('RNAcofold', inputs, cmd_args, cmd_kwargs)
# Process the output
output = {}
lines = rnafold_output.splitlines()
# Line 1 is the name of the sequence input, line 2 is the sequence
lines.pop(0)
lines.pop(0)
# Line 3 is the dotbracket + mfe for strand1
line3 = lines.pop(0)
output['dotbracket'] = self._lparse(line3, '^(.*) \(')
output['mfe'] = float(self._lparse(line3, ' \((.*)\)$'))
# Optional outputs
if partition:
# Line 4 is 'a coarse representation of the pair probabilities' and
# the ensemble free energy
line4 = lines.pop(0)
output['coarse'] = self._lparse(line4, '^(.*) \[')
output['ensemble'] = float(self._lparse(line4, ' \[(.*)\]$'))
# Line 5 is the centroid structure, its free energy, and distance
# to the ensemble
line5 = lines.pop(0)
'ensemble (.*),'
output['frequency'] = float(self._lparse(line5, 'ensemble (.*),'))
output['deltaG'] = float(self._lparse(line5, 'binding=(.*)$'))
# Parse the postscript file (the only place the probability matrix
# is)
with open(os.path.join(self._tempdir, 'strands_dp.ps')) as f:
pattern = 'start of base pair probability data\n(.*)\nshowpage'
dotplot_file = f.read()
dotplot_data = re.search(pattern, dotplot_file,
flags=re.DOTALL).group(1).split('\n')
# Dimension of the dotplot - compares seq1, seq2 to self and
# to each other (concatenation of seq1 and seq2 = axis)
dim = len(strand1) + len(strand2)
ensemble_probs = np.zeros((dim, dim))
optimal_probs = np.zeros((dim, dim))
for point in dotplot_data:
point_split = point.split(' ')
# Use zero indexing
i = int(point_split[0]) - 1
j = int(point_split[1]) - 1
sqprob = float(point_split[2])
probtype = point_split[3]
if probtype == 'ubox':
ensemble_probs[i][j] = sqprob**2
else:
optimal_probs[i][j] = sqprob**2
output['ensemble_matrix'] = ensemble_probs
output['optimal_matrix'] = optimal_probs
return output | Run the RNAcofold command and retrieve the result in a dictionary.
:param strand1: Strand 1 for running RNAcofold.
:type strand1: coral.DNA or coral.RNA
:param strand1: Strand 2 for running RNAcofold.
:type strand2: coral.DNA or coral.RNA
:param temp: Temperature at which to run the calculations.
:type temp: float
:param dangles: How to treat dangling end energies. Set to 0 to ignore
dangling ends. Set to 1 to limit unpaired bases to
at most one dangling end (default for MFE calc). Set to
2 (the default) to remove the limit in 1. Set to 3 to
allow coaxial stacking of adjacent helices in
.multi-loops
:type dangles: int
:param nolp: Produce structures without lonely pairs (isolated single
base pairs).
:type nolp: bool
:param nogu: Do not allow GU pairs.
:type nogu: bool
:param noclosinggu: Do not allow GU pairs at the end of helices.
:type noclosinggu: bool
:param constraints: Any structural constraints to use. Format is
defined at
http://www.tbi.univie.ac.at/RNA/RNAfold.1.html
:type constraints: str
:param canonicalbponly: Remove non-canonical base pairs from the
structure constraint (if applicable).
:type canonicalbponly: bool
:param partition: Calculates the partition function for the sequence.
:type partition: int
:param pfscale: Scaling factor for the partition function.
:type pfScale: float
:param gquad: Incorporate G-Quadruplex formation into the structure
prediction.
:type gquad: bool
:returns: Dictionary of calculated values, defaulting to values of MFE
('mfe': float) and dotbracket structure ('dotbracket': str).
More keys are added depending on keyword arguments.
:rtype: dict | Below is the the instruction that describes the task:
### Input:
Run the RNAcofold command and retrieve the result in a dictionary.
:param strand1: Strand 1 for running RNAcofold.
:type strand1: coral.DNA or coral.RNA
:param strand1: Strand 2 for running RNAcofold.
:type strand2: coral.DNA or coral.RNA
:param temp: Temperature at which to run the calculations.
:type temp: float
:param dangles: How to treat dangling end energies. Set to 0 to ignore
dangling ends. Set to 1 to limit unpaired bases to
at most one dangling end (default for MFE calc). Set to
2 (the default) to remove the limit in 1. Set to 3 to
allow coaxial stacking of adjacent helices in
.multi-loops
:type dangles: int
:param nolp: Produce structures without lonely pairs (isolated single
base pairs).
:type nolp: bool
:param nogu: Do not allow GU pairs.
:type nogu: bool
:param noclosinggu: Do not allow GU pairs at the end of helices.
:type noclosinggu: bool
:param constraints: Any structural constraints to use. Format is
defined at
http://www.tbi.univie.ac.at/RNA/RNAfold.1.html
:type constraints: str
:param canonicalbponly: Remove non-canonical base pairs from the
structure constraint (if applicable).
:type canonicalbponly: bool
:param partition: Calculates the partition function for the sequence.
:type partition: int
:param pfscale: Scaling factor for the partition function.
:type pfScale: float
:param gquad: Incorporate G-Quadruplex formation into the structure
prediction.
:type gquad: bool
:returns: Dictionary of calculated values, defaulting to values of MFE
('mfe': float) and dotbracket structure ('dotbracket': str).
More keys are added depending on keyword arguments.
:rtype: dict
### Response:
def cofold(self, strand1, strand2, temp=37.0, dangles=2, nolp=False,
nogu=False, noclosinggu=False, constraints=None,
canonicalbponly=False, partition=-1, pfscale=None, gquad=False):
'''Run the RNAcofold command and retrieve the result in a dictionary.
:param strand1: Strand 1 for running RNAcofold.
:type strand1: coral.DNA or coral.RNA
:param strand1: Strand 2 for running RNAcofold.
:type strand2: coral.DNA or coral.RNA
:param temp: Temperature at which to run the calculations.
:type temp: float
:param dangles: How to treat dangling end energies. Set to 0 to ignore
dangling ends. Set to 1 to limit unpaired bases to
at most one dangling end (default for MFE calc). Set to
2 (the default) to remove the limit in 1. Set to 3 to
allow coaxial stacking of adjacent helices in
.multi-loops
:type dangles: int
:param nolp: Produce structures without lonely pairs (isolated single
base pairs).
:type nolp: bool
:param nogu: Do not allow GU pairs.
:type nogu: bool
:param noclosinggu: Do not allow GU pairs at the end of helices.
:type noclosinggu: bool
:param constraints: Any structural constraints to use. Format is
defined at
http://www.tbi.univie.ac.at/RNA/RNAfold.1.html
:type constraints: str
:param canonicalbponly: Remove non-canonical base pairs from the
structure constraint (if applicable).
:type canonicalbponly: bool
:param partition: Calculates the partition function for the sequence.
:type partition: int
:param pfscale: Scaling factor for the partition function.
:type pfScale: float
:param gquad: Incorporate G-Quadruplex formation into the structure
prediction.
:type gquad: bool
:returns: Dictionary of calculated values, defaulting to values of MFE
('mfe': float) and dotbracket structure ('dotbracket': str).
More keys are added depending on keyword arguments.
:rtype: dict
'''
cmd_args = []
cmd_kwargs = {'--temp=': str(temp)}
cmd_kwargs['--dangles='] = dangles
if nolp:
cmd_args.append('--noLP')
if nogu:
cmd_args.append('--noGU')
if noclosinggu:
cmd_args.append('--noClosingGU')
if constraints is not None:
cmd_args.append('--constraint')
if canonicalbponly:
cmd_args.append('--canonicalBPonly')
if partition:
cmd_args.append('--partfunc')
if pfscale is not None:
cmd_kwargs['pfScale'] = float(pfscale)
if gquad:
cmd_args.append('--gquad')
inputs = ['>strands\n{}&{}'.format(str(strand1), str(strand2))]
if constraints is not None:
inputs.append(constraints)
rnafold_output = self._run('RNAcofold', inputs, cmd_args, cmd_kwargs)
# Process the output
output = {}
lines = rnafold_output.splitlines()
# Line 1 is the name of the sequence input, line 2 is the sequence
lines.pop(0)
lines.pop(0)
# Line 3 is the dotbracket + mfe for strand1
line3 = lines.pop(0)
output['dotbracket'] = self._lparse(line3, '^(.*) \(')
output['mfe'] = float(self._lparse(line3, ' \((.*)\)$'))
# Optional outputs
if partition:
# Line 4 is 'a coarse representation of the pair probabilities' and
# the ensemble free energy
line4 = lines.pop(0)
output['coarse'] = self._lparse(line4, '^(.*) \[')
output['ensemble'] = float(self._lparse(line4, ' \[(.*)\]$'))
# Line 5 is the centroid structure, its free energy, and distance
# to the ensemble
line5 = lines.pop(0)
'ensemble (.*),'
output['frequency'] = float(self._lparse(line5, 'ensemble (.*),'))
output['deltaG'] = float(self._lparse(line5, 'binding=(.*)$'))
# Parse the postscript file (the only place the probability matrix
# is)
with open(os.path.join(self._tempdir, 'strands_dp.ps')) as f:
pattern = 'start of base pair probability data\n(.*)\nshowpage'
dotplot_file = f.read()
dotplot_data = re.search(pattern, dotplot_file,
flags=re.DOTALL).group(1).split('\n')
# Dimension of the dotplot - compares seq1, seq2 to self and
# to each other (concatenation of seq1 and seq2 = axis)
dim = len(strand1) + len(strand2)
ensemble_probs = np.zeros((dim, dim))
optimal_probs = np.zeros((dim, dim))
for point in dotplot_data:
point_split = point.split(' ')
# Use zero indexing
i = int(point_split[0]) - 1
j = int(point_split[1]) - 1
sqprob = float(point_split[2])
probtype = point_split[3]
if probtype == 'ubox':
ensemble_probs[i][j] = sqprob**2
else:
optimal_probs[i][j] = sqprob**2
output['ensemble_matrix'] = ensemble_probs
output['optimal_matrix'] = optimal_probs
return output |
def add(self):
"""
Save the current :code.`PyFunceble.CONFIGURATION['to_test']`
into the current timestamp.
"""
if PyFunceble.CONFIGURATION["inactive_database"]:
# The database subsystem is activated.
# We get the timestamp to use as index.
timestamp = str(self._timestamp())
if (
"inactive_db" in PyFunceble.INTERN
and PyFunceble.INTERN["file_to_test"]
in PyFunceble.INTERN["inactive_db"]
):
# * The file path is not into the database.
if (
timestamp
in PyFunceble.INTERN["inactive_db"][
PyFunceble.INTERN["file_to_test"]
]
):
# The timetamp is already into the database related to the file we
# are testing.
if (
PyFunceble.INTERN["to_test"]
not in PyFunceble.INTERN["inactive_db"][
PyFunceble.INTERN["file_to_test"]
][timestamp]
):
# The currently tested element is not into the database related
# to the file we are testing.
# We append the currently tested element into the database.
PyFunceble.INTERN["inactive_db"][
PyFunceble.INTERN["file_to_test"]
][timestamp].append(PyFunceble.INTERN["to_test"])
else:
# The timetamp is not into the database related to the file we
# are testing.
# We append the index and the database element into the databse
# related to the file we are testing.
PyFunceble.INTERN["inactive_db"][
PyFunceble.INTERN["file_to_test"]
].update({timestamp: [PyFunceble.INTERN["to_test"]]})
if (
"to_test"
in PyFunceble.INTERN["inactive_db"][
PyFunceble.INTERN["file_to_test"]
]
and PyFunceble.INTERN["to_test"]
in PyFunceble.INTERN["inactive_db"][
PyFunceble.INTERN["file_to_test"]
]["to_test"]
):
# * The `to_test` index is into the database related to the file we
# are testing.
# and
# * The element we are testing is into the `to_test` index related to
# the file we are testing.
# We remove the element from the list of element to test.
PyFunceble.INTERN["inactive_db"][PyFunceble.INTERN["file_to_test"]][
"to_test"
].remove(PyFunceble.INTERN["to_test"])
else:
# The file path is not into the database.
# We initiate the file path and its content into the database.
PyFunceble.INTERN["inactive_db"] = {
PyFunceble.INTERN["file_to_test"]: {
timestamp: [PyFunceble.INTERN["to_test"]]
}
}
# And we save the data into the database.
self._backup() | Save the current :code.`PyFunceble.CONFIGURATION['to_test']`
into the current timestamp. | Below is the the instruction that describes the task:
### Input:
Save the current :code.`PyFunceble.CONFIGURATION['to_test']`
into the current timestamp.
### Response:
def add(self):
"""
Save the current :code.`PyFunceble.CONFIGURATION['to_test']`
into the current timestamp.
"""
if PyFunceble.CONFIGURATION["inactive_database"]:
# The database subsystem is activated.
# We get the timestamp to use as index.
timestamp = str(self._timestamp())
if (
"inactive_db" in PyFunceble.INTERN
and PyFunceble.INTERN["file_to_test"]
in PyFunceble.INTERN["inactive_db"]
):
# * The file path is not into the database.
if (
timestamp
in PyFunceble.INTERN["inactive_db"][
PyFunceble.INTERN["file_to_test"]
]
):
# The timetamp is already into the database related to the file we
# are testing.
if (
PyFunceble.INTERN["to_test"]
not in PyFunceble.INTERN["inactive_db"][
PyFunceble.INTERN["file_to_test"]
][timestamp]
):
# The currently tested element is not into the database related
# to the file we are testing.
# We append the currently tested element into the database.
PyFunceble.INTERN["inactive_db"][
PyFunceble.INTERN["file_to_test"]
][timestamp].append(PyFunceble.INTERN["to_test"])
else:
# The timetamp is not into the database related to the file we
# are testing.
# We append the index and the database element into the databse
# related to the file we are testing.
PyFunceble.INTERN["inactive_db"][
PyFunceble.INTERN["file_to_test"]
].update({timestamp: [PyFunceble.INTERN["to_test"]]})
if (
"to_test"
in PyFunceble.INTERN["inactive_db"][
PyFunceble.INTERN["file_to_test"]
]
and PyFunceble.INTERN["to_test"]
in PyFunceble.INTERN["inactive_db"][
PyFunceble.INTERN["file_to_test"]
]["to_test"]
):
# * The `to_test` index is into the database related to the file we
# are testing.
# and
# * The element we are testing is into the `to_test` index related to
# the file we are testing.
# We remove the element from the list of element to test.
PyFunceble.INTERN["inactive_db"][PyFunceble.INTERN["file_to_test"]][
"to_test"
].remove(PyFunceble.INTERN["to_test"])
else:
# The file path is not into the database.
# We initiate the file path and its content into the database.
PyFunceble.INTERN["inactive_db"] = {
PyFunceble.INTERN["file_to_test"]: {
timestamp: [PyFunceble.INTERN["to_test"]]
}
}
# And we save the data into the database.
self._backup() |
def _AbortJoin(self, timeout=None):
"""Aborts all registered processes by joining with the parent process.
Args:
timeout (int): number of seconds to wait for processes to join, where
None represents no timeout.
"""
for pid, process in iter(self._processes_per_pid.items()):
logger.debug('Waiting for process: {0:s} (PID: {1:d}).'.format(
process.name, pid))
process.join(timeout=timeout)
if not process.is_alive():
logger.debug('Process {0:s} (PID: {1:d}) stopped.'.format(
process.name, pid)) | Aborts all registered processes by joining with the parent process.
Args:
timeout (int): number of seconds to wait for processes to join, where
None represents no timeout. | Below is the the instruction that describes the task:
### Input:
Aborts all registered processes by joining with the parent process.
Args:
timeout (int): number of seconds to wait for processes to join, where
None represents no timeout.
### Response:
def _AbortJoin(self, timeout=None):
"""Aborts all registered processes by joining with the parent process.
Args:
timeout (int): number of seconds to wait for processes to join, where
None represents no timeout.
"""
for pid, process in iter(self._processes_per_pid.items()):
logger.debug('Waiting for process: {0:s} (PID: {1:d}).'.format(
process.name, pid))
process.join(timeout=timeout)
if not process.is_alive():
logger.debug('Process {0:s} (PID: {1:d}) stopped.'.format(
process.name, pid)) |
def calc_inbag(n_samples, forest):
"""
Derive samples used to create trees in scikit-learn RandomForest objects.
Recovers the samples in each tree from the random state of that tree using
:func:`forest._generate_sample_indices`.
Parameters
----------
n_samples : int
The number of samples used to fit the scikit-learn RandomForest object.
forest : RandomForest
Regressor or Classifier object that is already fit by scikit-learn.
Returns
-------
Array that records how many times a data point was placed in a tree.
Columns are individual trees. Rows are the number of times a sample was
used in a tree.
"""
if not forest.bootstrap:
e_s = "Cannot calculate the inbag from a forest that has "
e_s = " bootstrap=False"
raise ValueError(e_s)
n_trees = forest.n_estimators
inbag = np.zeros((n_samples, n_trees))
sample_idx = []
for t_idx in range(n_trees):
sample_idx.append(
_generate_sample_indices(forest.estimators_[t_idx].random_state,
n_samples))
inbag[:, t_idx] = np.bincount(sample_idx[-1], minlength=n_samples)
return inbag | Derive samples used to create trees in scikit-learn RandomForest objects.
Recovers the samples in each tree from the random state of that tree using
:func:`forest._generate_sample_indices`.
Parameters
----------
n_samples : int
The number of samples used to fit the scikit-learn RandomForest object.
forest : RandomForest
Regressor or Classifier object that is already fit by scikit-learn.
Returns
-------
Array that records how many times a data point was placed in a tree.
Columns are individual trees. Rows are the number of times a sample was
used in a tree. | Below is the the instruction that describes the task:
### Input:
Derive samples used to create trees in scikit-learn RandomForest objects.
Recovers the samples in each tree from the random state of that tree using
:func:`forest._generate_sample_indices`.
Parameters
----------
n_samples : int
The number of samples used to fit the scikit-learn RandomForest object.
forest : RandomForest
Regressor or Classifier object that is already fit by scikit-learn.
Returns
-------
Array that records how many times a data point was placed in a tree.
Columns are individual trees. Rows are the number of times a sample was
used in a tree.
### Response:
def calc_inbag(n_samples, forest):
"""
Derive samples used to create trees in scikit-learn RandomForest objects.
Recovers the samples in each tree from the random state of that tree using
:func:`forest._generate_sample_indices`.
Parameters
----------
n_samples : int
The number of samples used to fit the scikit-learn RandomForest object.
forest : RandomForest
Regressor or Classifier object that is already fit by scikit-learn.
Returns
-------
Array that records how many times a data point was placed in a tree.
Columns are individual trees. Rows are the number of times a sample was
used in a tree.
"""
if not forest.bootstrap:
e_s = "Cannot calculate the inbag from a forest that has "
e_s = " bootstrap=False"
raise ValueError(e_s)
n_trees = forest.n_estimators
inbag = np.zeros((n_samples, n_trees))
sample_idx = []
for t_idx in range(n_trees):
sample_idx.append(
_generate_sample_indices(forest.estimators_[t_idx].random_state,
n_samples))
inbag[:, t_idx] = np.bincount(sample_idx[-1], minlength=n_samples)
return inbag |
def _get_ID2position_mapper(self, position_mapper):
'''
Defines a position parser that is used
to map between sample IDs and positions.
Parameters
--------------
{_bases_position_mapper}
TODO: Fix the name to work with more than 26 letters
of the alphabet.
'''
def num_parser(x, order):
i, j = unravel_index(int(x - 1), self.shape, order=order)
return (self.row_labels[i], self.col_labels[j])
if hasattr(position_mapper, '__call__'):
mapper = position_mapper
elif isinstance(position_mapper, collections.Mapping):
mapper = lambda x: position_mapper[x]
elif position_mapper == 'name':
mapper = lambda x: (x[0], int(x[1:]))
elif position_mapper in ('row_first_enumerator', 'number'):
mapper = lambda x: num_parser(x, 'F')
elif position_mapper == 'col_first_enumerator':
mapper = lambda x: num_parser(x, 'C')
else:
msg = '"{}" is not a known key_to_position_parser.'.format(position_mapper)
raise ValueError(msg)
return mapper | Defines a position parser that is used
to map between sample IDs and positions.
Parameters
--------------
{_bases_position_mapper}
TODO: Fix the name to work with more than 26 letters
of the alphabet. | Below is the the instruction that describes the task:
### Input:
Defines a position parser that is used
to map between sample IDs and positions.
Parameters
--------------
{_bases_position_mapper}
TODO: Fix the name to work with more than 26 letters
of the alphabet.
### Response:
def _get_ID2position_mapper(self, position_mapper):
'''
Defines a position parser that is used
to map between sample IDs and positions.
Parameters
--------------
{_bases_position_mapper}
TODO: Fix the name to work with more than 26 letters
of the alphabet.
'''
def num_parser(x, order):
i, j = unravel_index(int(x - 1), self.shape, order=order)
return (self.row_labels[i], self.col_labels[j])
if hasattr(position_mapper, '__call__'):
mapper = position_mapper
elif isinstance(position_mapper, collections.Mapping):
mapper = lambda x: position_mapper[x]
elif position_mapper == 'name':
mapper = lambda x: (x[0], int(x[1:]))
elif position_mapper in ('row_first_enumerator', 'number'):
mapper = lambda x: num_parser(x, 'F')
elif position_mapper == 'col_first_enumerator':
mapper = lambda x: num_parser(x, 'C')
else:
msg = '"{}" is not a known key_to_position_parser.'.format(position_mapper)
raise ValueError(msg)
return mapper |
def delete_snapshot(self, path, snapshotname, **kwargs):
"""Delete a snapshot of a directory"""
response = self._delete(path, 'DELETESNAPSHOT', snapshotname=snapshotname, **kwargs)
assert not response.content | Delete a snapshot of a directory | Below is the the instruction that describes the task:
### Input:
Delete a snapshot of a directory
### Response:
def delete_snapshot(self, path, snapshotname, **kwargs):
"""Delete a snapshot of a directory"""
response = self._delete(path, 'DELETESNAPSHOT', snapshotname=snapshotname, **kwargs)
assert not response.content |
def list_containers(active=True, defined=True,
as_object=False, config_path=None):
"""
List the containers on the system.
"""
if config_path:
if not os.path.exists(config_path):
return tuple()
try:
entries = _lxc.list_containers(active=active, defined=defined,
config_path=config_path)
except ValueError:
return tuple()
else:
try:
entries = _lxc.list_containers(active=active, defined=defined)
except ValueError:
return tuple()
if as_object:
return tuple([Container(name, config_path) for name in entries])
else:
return entries | List the containers on the system. | Below is the the instruction that describes the task:
### Input:
List the containers on the system.
### Response:
def list_containers(active=True, defined=True,
as_object=False, config_path=None):
"""
List the containers on the system.
"""
if config_path:
if not os.path.exists(config_path):
return tuple()
try:
entries = _lxc.list_containers(active=active, defined=defined,
config_path=config_path)
except ValueError:
return tuple()
else:
try:
entries = _lxc.list_containers(active=active, defined=defined)
except ValueError:
return tuple()
if as_object:
return tuple([Container(name, config_path) for name in entries])
else:
return entries |
def transform_non_affine(self, x, mask_out_of_range=True):
"""
Transform a Nx1 numpy array.
Parameters
----------
x : array
Data to be transformed.
mask_out_of_range : bool, optional
Whether to mask input values out of range.
Return
------
array or masked array
Transformed data.
"""
# Mask out-of-range values
if mask_out_of_range:
x_masked = np.ma.masked_where((x < self._xmin) | (x > self._xmax),
x)
else:
x_masked = x
# Calculate s and return
return np.interp(x_masked, self._x_range, self._s_range) | Transform a Nx1 numpy array.
Parameters
----------
x : array
Data to be transformed.
mask_out_of_range : bool, optional
Whether to mask input values out of range.
Return
------
array or masked array
Transformed data. | Below is the the instruction that describes the task:
### Input:
Transform a Nx1 numpy array.
Parameters
----------
x : array
Data to be transformed.
mask_out_of_range : bool, optional
Whether to mask input values out of range.
Return
------
array or masked array
Transformed data.
### Response:
def transform_non_affine(self, x, mask_out_of_range=True):
"""
Transform a Nx1 numpy array.
Parameters
----------
x : array
Data to be transformed.
mask_out_of_range : bool, optional
Whether to mask input values out of range.
Return
------
array or masked array
Transformed data.
"""
# Mask out-of-range values
if mask_out_of_range:
x_masked = np.ma.masked_where((x < self._xmin) | (x > self._xmax),
x)
else:
x_masked = x
# Calculate s and return
return np.interp(x_masked, self._x_range, self._s_range) |
def search(self, template: str, first: bool = False) -> _Result:
"""Search the :class:`Element <Element>` for the given parse
template.
:param template: The Parse template to use.
"""
elements = [r for r in findall(template, self.xml)]
return _get_first_or_list(elements, first) | Search the :class:`Element <Element>` for the given parse
template.
:param template: The Parse template to use. | Below is the the instruction that describes the task:
### Input:
Search the :class:`Element <Element>` for the given parse
template.
:param template: The Parse template to use.
### Response:
def search(self, template: str, first: bool = False) -> _Result:
"""Search the :class:`Element <Element>` for the given parse
template.
:param template: The Parse template to use.
"""
elements = [r for r in findall(template, self.xml)]
return _get_first_or_list(elements, first) |
def retrieve(self, thing, thing_type=None):
"""
Retrieve a report from VirusTotal based on a hash, IP, domain, file or URL or ScanID. NOTE: URLs must include the scheme
(e.g. http://)\n
:param thing: a file name on the local system, a URL or list of URLs,
an IP or list of IPs, a domain or list of domains, a hash or list of hashes
:param thing_type: Optional, a hint to the function as to what you are sending it
:return: Returns a a dictionary with thing as key and the API json response as the value
If thing was a list of things to query the results will be a dictionary with every thing in the list
as a key
:raises TypeError: if it gets something other than a URL, IP domain, hash or ScanID
:raises TypeError: if VirusTotal returns something we can't parse.
"""
# trust the user-supplied type over the automatic identification
thing_id = self._whatis(thing)
if thing_type is None:
thing_type = thing_id
query_parameters = {}
# Query API for URL(s)
if thing_type == API_Constants.URL: # Get the scan results for a given URL or list of URLs.
query = API_Constants.CONST_API_URL + API_Constants.API_ACTION_GET_URL_REPORT
if not isinstance(thing, list):
thing = [thing]
grouped_urls = self._grouped(thing, self._urls_per_retrieve) # break list of URLS down to API limits
results = {}
for group in grouped_urls:
query_parameters = {"resource": "\n".join([url for url in group])}
self._limit_call_handler()
try:
response = self._post_query(query, query_parameters)
except:
raise TypeError
# If we get a list of URLs that has N urls and N mod '_url_per_retrieve' is 1
# for example [url, url, url], when limit is 2, the last query will not return a list
if not isinstance(response, list):
response = [response]
for index, scanid in enumerate(group):
results[scanid] = response[index]
result = results
# Query API for domain(s)
elif thing_type == API_Constants.DOMAIN:
query = API_Constants.CONST_API_URL + API_Constants.API_ACTION_GET_DOMAIN_REPORT
if not isinstance(thing, list):
thing = [thing]
results = {}
for domain in thing:
query_parameters["domain"] = domain
self._limit_call_handler()
response = self._get_query(query, query_parameters)
results[domain] = response
result = results
# Query API for IP(s)
elif thing_type == API_Constants.IP:
query = API_Constants.CONST_API_URL + API_Constants.API_ACTION_GET_IP_REPORT
if not isinstance(thing, list):
thing = [thing]
results = {}
for ip in thing:
query_parameters["ip"] = ip
self._limit_call_handler()
try:
response = self._get_query(query, query_parameters)
except:
raise TypeError
results[ip] = response
result = results
# Query API for HASH, bulk HASH queries not possible
elif thing_type == API_Constants.HASH:
query = API_Constants.CONST_API_URL + API_Constants.API_ACTION_GET_FILE_REPORT
results = {}
if not isinstance(thing, list):
thing = [thing]
query_parameters["resource"] = ", ".join(thing)
self._limit_call_handler()
response = self._get_query(query, query_parameters)
if not isinstance(response, list):
response = [response]
for index, hash in enumerate(thing):
results[hash] = response[index]
result = results
elif thing_type == "scanid":
query = API_Constants.CONST_API_URL + API_Constants.API_ACTION_GET_URL_REPORT
if not isinstance(thing, list):
thing = [thing]
results = {}
for scanid in thing:
query_parameters["resource"] = scanid
self._limit_call_handler()
try:
response = self._post_query(query, query_parameters)
except:
raise TypeError
results[scanid] = response
result = results
else:
raise TypeError("Unimplemented '%s'." % thing_type)
return result | Retrieve a report from VirusTotal based on a hash, IP, domain, file or URL or ScanID. NOTE: URLs must include the scheme
(e.g. http://)\n
:param thing: a file name on the local system, a URL or list of URLs,
an IP or list of IPs, a domain or list of domains, a hash or list of hashes
:param thing_type: Optional, a hint to the function as to what you are sending it
:return: Returns a a dictionary with thing as key and the API json response as the value
If thing was a list of things to query the results will be a dictionary with every thing in the list
as a key
:raises TypeError: if it gets something other than a URL, IP domain, hash or ScanID
:raises TypeError: if VirusTotal returns something we can't parse. | Below is the the instruction that describes the task:
### Input:
Retrieve a report from VirusTotal based on a hash, IP, domain, file or URL or ScanID. NOTE: URLs must include the scheme
(e.g. http://)\n
:param thing: a file name on the local system, a URL or list of URLs,
an IP or list of IPs, a domain or list of domains, a hash or list of hashes
:param thing_type: Optional, a hint to the function as to what you are sending it
:return: Returns a a dictionary with thing as key and the API json response as the value
If thing was a list of things to query the results will be a dictionary with every thing in the list
as a key
:raises TypeError: if it gets something other than a URL, IP domain, hash or ScanID
:raises TypeError: if VirusTotal returns something we can't parse.
### Response:
def retrieve(self, thing, thing_type=None):
"""
Retrieve a report from VirusTotal based on a hash, IP, domain, file or URL or ScanID. NOTE: URLs must include the scheme
(e.g. http://)\n
:param thing: a file name on the local system, a URL or list of URLs,
an IP or list of IPs, a domain or list of domains, a hash or list of hashes
:param thing_type: Optional, a hint to the function as to what you are sending it
:return: Returns a a dictionary with thing as key and the API json response as the value
If thing was a list of things to query the results will be a dictionary with every thing in the list
as a key
:raises TypeError: if it gets something other than a URL, IP domain, hash or ScanID
:raises TypeError: if VirusTotal returns something we can't parse.
"""
# trust the user-supplied type over the automatic identification
thing_id = self._whatis(thing)
if thing_type is None:
thing_type = thing_id
query_parameters = {}
# Query API for URL(s)
if thing_type == API_Constants.URL: # Get the scan results for a given URL or list of URLs.
query = API_Constants.CONST_API_URL + API_Constants.API_ACTION_GET_URL_REPORT
if not isinstance(thing, list):
thing = [thing]
grouped_urls = self._grouped(thing, self._urls_per_retrieve) # break list of URLS down to API limits
results = {}
for group in grouped_urls:
query_parameters = {"resource": "\n".join([url for url in group])}
self._limit_call_handler()
try:
response = self._post_query(query, query_parameters)
except:
raise TypeError
# If we get a list of URLs that has N urls and N mod '_url_per_retrieve' is 1
# for example [url, url, url], when limit is 2, the last query will not return a list
if not isinstance(response, list):
response = [response]
for index, scanid in enumerate(group):
results[scanid] = response[index]
result = results
# Query API for domain(s)
elif thing_type == API_Constants.DOMAIN:
query = API_Constants.CONST_API_URL + API_Constants.API_ACTION_GET_DOMAIN_REPORT
if not isinstance(thing, list):
thing = [thing]
results = {}
for domain in thing:
query_parameters["domain"] = domain
self._limit_call_handler()
response = self._get_query(query, query_parameters)
results[domain] = response
result = results
# Query API for IP(s)
elif thing_type == API_Constants.IP:
query = API_Constants.CONST_API_URL + API_Constants.API_ACTION_GET_IP_REPORT
if not isinstance(thing, list):
thing = [thing]
results = {}
for ip in thing:
query_parameters["ip"] = ip
self._limit_call_handler()
try:
response = self._get_query(query, query_parameters)
except:
raise TypeError
results[ip] = response
result = results
# Query API for HASH, bulk HASH queries not possible
elif thing_type == API_Constants.HASH:
query = API_Constants.CONST_API_URL + API_Constants.API_ACTION_GET_FILE_REPORT
results = {}
if not isinstance(thing, list):
thing = [thing]
query_parameters["resource"] = ", ".join(thing)
self._limit_call_handler()
response = self._get_query(query, query_parameters)
if not isinstance(response, list):
response = [response]
for index, hash in enumerate(thing):
results[hash] = response[index]
result = results
elif thing_type == "scanid":
query = API_Constants.CONST_API_URL + API_Constants.API_ACTION_GET_URL_REPORT
if not isinstance(thing, list):
thing = [thing]
results = {}
for scanid in thing:
query_parameters["resource"] = scanid
self._limit_call_handler()
try:
response = self._post_query(query, query_parameters)
except:
raise TypeError
results[scanid] = response
result = results
else:
raise TypeError("Unimplemented '%s'." % thing_type)
return result |
def __allocate_clusters(self):
"""!
@brief Performs cluster allocation and builds ordering diagram that is based on reachability-distances.
"""
self.__initialize(self.__sample_pointer)
for optic_object in self.__optics_objects:
if optic_object.processed is False:
self.__expand_cluster_order(optic_object)
self.__extract_clusters() | !
@brief Performs cluster allocation and builds ordering diagram that is based on reachability-distances. | Below is the the instruction that describes the task:
### Input:
!
@brief Performs cluster allocation and builds ordering diagram that is based on reachability-distances.
### Response:
def __allocate_clusters(self):
"""!
@brief Performs cluster allocation and builds ordering diagram that is based on reachability-distances.
"""
self.__initialize(self.__sample_pointer)
for optic_object in self.__optics_objects:
if optic_object.processed is False:
self.__expand_cluster_order(optic_object)
self.__extract_clusters() |
def _setup_images(directory, brightness, saturation, hue, preserve_transparency):
"""
Apply modifiers to the images of a theme
Modifies the images using the PIL.ImageEnhance module. Using
this function, theme images are modified to given them a
unique look and feel. Works best with PNG-based images.
"""
for file_name in os.listdir(directory):
with open(os.path.join(directory, file_name), "rb") as fi:
image = Image.open(fi).convert("RGBA")
# Only perform required operations
if brightness != 1.0:
enhancer = ImageEnhance.Brightness(image)
image = enhancer.enhance(brightness)
if saturation != 1.0:
enhancer = ImageEnhance.Color(image)
image = enhancer.enhance(saturation)
if hue != 1.0:
image = imgops.shift_hue(image, hue)
if preserve_transparency is True:
image = imgops.make_transparent(image)
# Save the new image
image.save(os.path.join(directory, file_name.replace("gif", "png")))
image.close()
for file_name in (item for item in os.listdir(directory) if item.endswith(".gif")):
os.remove(os.path.join(directory, file_name)) | Apply modifiers to the images of a theme
Modifies the images using the PIL.ImageEnhance module. Using
this function, theme images are modified to given them a
unique look and feel. Works best with PNG-based images. | Below is the the instruction that describes the task:
### Input:
Apply modifiers to the images of a theme
Modifies the images using the PIL.ImageEnhance module. Using
this function, theme images are modified to given them a
unique look and feel. Works best with PNG-based images.
### Response:
def _setup_images(directory, brightness, saturation, hue, preserve_transparency):
"""
Apply modifiers to the images of a theme
Modifies the images using the PIL.ImageEnhance module. Using
this function, theme images are modified to given them a
unique look and feel. Works best with PNG-based images.
"""
for file_name in os.listdir(directory):
with open(os.path.join(directory, file_name), "rb") as fi:
image = Image.open(fi).convert("RGBA")
# Only perform required operations
if brightness != 1.0:
enhancer = ImageEnhance.Brightness(image)
image = enhancer.enhance(brightness)
if saturation != 1.0:
enhancer = ImageEnhance.Color(image)
image = enhancer.enhance(saturation)
if hue != 1.0:
image = imgops.shift_hue(image, hue)
if preserve_transparency is True:
image = imgops.make_transparent(image)
# Save the new image
image.save(os.path.join(directory, file_name.replace("gif", "png")))
image.close()
for file_name in (item for item in os.listdir(directory) if item.endswith(".gif")):
os.remove(os.path.join(directory, file_name)) |
def get_codon(seq, codon_no, start_offset):
"""
This function takes a sequece and a codon number and returns the codon
found in the sequence at that position
"""
seq = seq.replace("-","")
codon_start_pos = int(codon_no - 1)*3 - start_offset
codon = seq[codon_start_pos:codon_start_pos + 3]
return codon | This function takes a sequece and a codon number and returns the codon
found in the sequence at that position | Below is the the instruction that describes the task:
### Input:
This function takes a sequece and a codon number and returns the codon
found in the sequence at that position
### Response:
def get_codon(seq, codon_no, start_offset):
"""
This function takes a sequece and a codon number and returns the codon
found in the sequence at that position
"""
seq = seq.replace("-","")
codon_start_pos = int(codon_no - 1)*3 - start_offset
codon = seq[codon_start_pos:codon_start_pos + 3]
return codon |
def actionAngleTorus_hessian_c(pot,jr,jphi,jz,
tol=0.003,dJ=0.001):
"""
NAME:
actionAngleTorus_hessian_c
PURPOSE:
compute dO/dJ on a single torus
INPUT:
pot - Potential object or list thereof
jr - radial action (scalar)
jphi - azimuthal action (scalar)
jz - vertical action (scalar)
tol= (0.003) goal for |dJ|/|J| along the torus
dJ= (0.001) action difference when computing derivatives (Hessian or Jacobian)
OUTPUT:
(dO/dJ,Omegar,Omegaphi,Omegaz,Autofit error flag)
Note: dO/dJ is *not* symmetrized here
HISTORY:
2016-07-15 - Written - Bovy (UofT)
"""
#Parse the potential
from galpy.orbit.integrateFullOrbit import _parse_pot
npot, pot_type, pot_args= _parse_pot(pot,potfortorus=True)
#Set up result
dOdJT= numpy.empty(9)
Omegar= numpy.empty(1)
Omegaphi= numpy.empty(1)
Omegaz= numpy.empty(1)
flag= ctypes.c_int(0)
#Set up the C code
ndarrayFlags= ('C_CONTIGUOUS','WRITEABLE')
actionAngleTorus_HessFunc= _lib.actionAngleTorus_hessianFreqs
actionAngleTorus_HessFunc.argtypes=\
[ctypes.c_double,
ctypes.c_double,
ctypes.c_double,
ctypes.c_int,
ndpointer(dtype=numpy.int32,flags=ndarrayFlags),
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ctypes.c_double,
ctypes.c_double,
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ctypes.POINTER(ctypes.c_int)]
#Array requirements
dOdJT= numpy.require(dOdJT,dtype=numpy.float64,requirements=['C','W'])
Omegar= numpy.require(Omegar,dtype=numpy.float64,requirements=['C','W'])
Omegaphi= numpy.require(Omegaphi,dtype=numpy.float64,requirements=['C','W'])
Omegaz= numpy.require(Omegaz,dtype=numpy.float64,requirements=['C','W'])
#Run the C code
actionAngleTorus_HessFunc(ctypes.c_double(jr),
ctypes.c_double(jphi),
ctypes.c_double(jz),
ctypes.c_int(npot),
pot_type,
pot_args,
ctypes.c_double(tol),
ctypes.c_double(dJ),
dOdJT,
Omegar,Omegaphi,Omegaz,
ctypes.byref(flag))
return (dOdJT.reshape((3,3)).T,Omegar[0],Omegaphi[0],Omegaz[0],flag.value) | NAME:
actionAngleTorus_hessian_c
PURPOSE:
compute dO/dJ on a single torus
INPUT:
pot - Potential object or list thereof
jr - radial action (scalar)
jphi - azimuthal action (scalar)
jz - vertical action (scalar)
tol= (0.003) goal for |dJ|/|J| along the torus
dJ= (0.001) action difference when computing derivatives (Hessian or Jacobian)
OUTPUT:
(dO/dJ,Omegar,Omegaphi,Omegaz,Autofit error flag)
Note: dO/dJ is *not* symmetrized here
HISTORY:
2016-07-15 - Written - Bovy (UofT) | Below is the the instruction that describes the task:
### Input:
NAME:
actionAngleTorus_hessian_c
PURPOSE:
compute dO/dJ on a single torus
INPUT:
pot - Potential object or list thereof
jr - radial action (scalar)
jphi - azimuthal action (scalar)
jz - vertical action (scalar)
tol= (0.003) goal for |dJ|/|J| along the torus
dJ= (0.001) action difference when computing derivatives (Hessian or Jacobian)
OUTPUT:
(dO/dJ,Omegar,Omegaphi,Omegaz,Autofit error flag)
Note: dO/dJ is *not* symmetrized here
HISTORY:
2016-07-15 - Written - Bovy (UofT)
### Response:
def actionAngleTorus_hessian_c(pot,jr,jphi,jz,
tol=0.003,dJ=0.001):
"""
NAME:
actionAngleTorus_hessian_c
PURPOSE:
compute dO/dJ on a single torus
INPUT:
pot - Potential object or list thereof
jr - radial action (scalar)
jphi - azimuthal action (scalar)
jz - vertical action (scalar)
tol= (0.003) goal for |dJ|/|J| along the torus
dJ= (0.001) action difference when computing derivatives (Hessian or Jacobian)
OUTPUT:
(dO/dJ,Omegar,Omegaphi,Omegaz,Autofit error flag)
Note: dO/dJ is *not* symmetrized here
HISTORY:
2016-07-15 - Written - Bovy (UofT)
"""
#Parse the potential
from galpy.orbit.integrateFullOrbit import _parse_pot
npot, pot_type, pot_args= _parse_pot(pot,potfortorus=True)
#Set up result
dOdJT= numpy.empty(9)
Omegar= numpy.empty(1)
Omegaphi= numpy.empty(1)
Omegaz= numpy.empty(1)
flag= ctypes.c_int(0)
#Set up the C code
ndarrayFlags= ('C_CONTIGUOUS','WRITEABLE')
actionAngleTorus_HessFunc= _lib.actionAngleTorus_hessianFreqs
actionAngleTorus_HessFunc.argtypes=\
[ctypes.c_double,
ctypes.c_double,
ctypes.c_double,
ctypes.c_int,
ndpointer(dtype=numpy.int32,flags=ndarrayFlags),
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ctypes.c_double,
ctypes.c_double,
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ctypes.POINTER(ctypes.c_int)]
#Array requirements
dOdJT= numpy.require(dOdJT,dtype=numpy.float64,requirements=['C','W'])
Omegar= numpy.require(Omegar,dtype=numpy.float64,requirements=['C','W'])
Omegaphi= numpy.require(Omegaphi,dtype=numpy.float64,requirements=['C','W'])
Omegaz= numpy.require(Omegaz,dtype=numpy.float64,requirements=['C','W'])
#Run the C code
actionAngleTorus_HessFunc(ctypes.c_double(jr),
ctypes.c_double(jphi),
ctypes.c_double(jz),
ctypes.c_int(npot),
pot_type,
pot_args,
ctypes.c_double(tol),
ctypes.c_double(dJ),
dOdJT,
Omegar,Omegaphi,Omegaz,
ctypes.byref(flag))
return (dOdJT.reshape((3,3)).T,Omegar[0],Omegaphi[0],Omegaz[0],flag.value) |
def contains(self, stimtype):
"""Returns whether the specified stimlus type is a component in this stimulus
:param stimtype: :class:`AbstractStimulusComponent<sparkle.stim.abstract_component.AbstractStimulusComponent>` subclass class name to test for membership in the components of this stimulus
:type stimtype: str
:returns: bool -- if the stimtype is in the model
"""
for track in self._segments:
for component in track:
if component.__class__.__name__ == stimtype:
return True
return False | Returns whether the specified stimlus type is a component in this stimulus
:param stimtype: :class:`AbstractStimulusComponent<sparkle.stim.abstract_component.AbstractStimulusComponent>` subclass class name to test for membership in the components of this stimulus
:type stimtype: str
:returns: bool -- if the stimtype is in the model | Below is the the instruction that describes the task:
### Input:
Returns whether the specified stimlus type is a component in this stimulus
:param stimtype: :class:`AbstractStimulusComponent<sparkle.stim.abstract_component.AbstractStimulusComponent>` subclass class name to test for membership in the components of this stimulus
:type stimtype: str
:returns: bool -- if the stimtype is in the model
### Response:
def contains(self, stimtype):
"""Returns whether the specified stimlus type is a component in this stimulus
:param stimtype: :class:`AbstractStimulusComponent<sparkle.stim.abstract_component.AbstractStimulusComponent>` subclass class name to test for membership in the components of this stimulus
:type stimtype: str
:returns: bool -- if the stimtype is in the model
"""
for track in self._segments:
for component in track:
if component.__class__.__name__ == stimtype:
return True
return False |
def iter_last_tour(tourfile, clm):
"""
Extract last tour from tourfile. The clm instance is also passed in to see
if any contig is covered in the clm.
"""
row = open(tourfile).readlines()[-1]
_tour, _tour_o = separate_tour_and_o(row)
tour = []
tour_o = []
for tc, to in zip(_tour, _tour_o):
if tc not in clm.contigs:
logging.debug("Contig `{}` in file `{}` not found in `{}`"
.format(tc, tourfile, clm.idsfile))
continue
tour.append(tc)
tour_o.append(to)
return tour, tour_o | Extract last tour from tourfile. The clm instance is also passed in to see
if any contig is covered in the clm. | Below is the the instruction that describes the task:
### Input:
Extract last tour from tourfile. The clm instance is also passed in to see
if any contig is covered in the clm.
### Response:
def iter_last_tour(tourfile, clm):
"""
Extract last tour from tourfile. The clm instance is also passed in to see
if any contig is covered in the clm.
"""
row = open(tourfile).readlines()[-1]
_tour, _tour_o = separate_tour_and_o(row)
tour = []
tour_o = []
for tc, to in zip(_tour, _tour_o):
if tc not in clm.contigs:
logging.debug("Contig `{}` in file `{}` not found in `{}`"
.format(tc, tourfile, clm.idsfile))
continue
tour.append(tc)
tour_o.append(to)
return tour, tour_o |
def stats(self):
"""
Get the stats for the current :class:`Milestone`
"""
response = self.requester.get(
'/{endpoint}/{id}/stats',
endpoint=self.endpoint, id=self.id
)
return response.json() | Get the stats for the current :class:`Milestone` | Below is the the instruction that describes the task:
### Input:
Get the stats for the current :class:`Milestone`
### Response:
def stats(self):
"""
Get the stats for the current :class:`Milestone`
"""
response = self.requester.get(
'/{endpoint}/{id}/stats',
endpoint=self.endpoint, id=self.id
)
return response.json() |
def retrieve(self, order_id, id) :
"""
Retrieve a single line item
Returns a single line item of an order, according to the unique line item ID provided
:calls: ``get /orders/{order_id}/line_items/{id}``
:param int order_id: Unique identifier of a Order.
:param int id: Unique identifier of a LineItem.
:return: Dictionary that support attriubte-style access and represent LineItem resource.
:rtype: dict
"""
_, _, line_item = self.http_client.get("/orders/{order_id}/line_items/{id}".format(order_id=order_id, id=id))
return line_item | Retrieve a single line item
Returns a single line item of an order, according to the unique line item ID provided
:calls: ``get /orders/{order_id}/line_items/{id}``
:param int order_id: Unique identifier of a Order.
:param int id: Unique identifier of a LineItem.
:return: Dictionary that support attriubte-style access and represent LineItem resource.
:rtype: dict | Below is the the instruction that describes the task:
### Input:
Retrieve a single line item
Returns a single line item of an order, according to the unique line item ID provided
:calls: ``get /orders/{order_id}/line_items/{id}``
:param int order_id: Unique identifier of a Order.
:param int id: Unique identifier of a LineItem.
:return: Dictionary that support attriubte-style access and represent LineItem resource.
:rtype: dict
### Response:
def retrieve(self, order_id, id) :
"""
Retrieve a single line item
Returns a single line item of an order, according to the unique line item ID provided
:calls: ``get /orders/{order_id}/line_items/{id}``
:param int order_id: Unique identifier of a Order.
:param int id: Unique identifier of a LineItem.
:return: Dictionary that support attriubte-style access and represent LineItem resource.
:rtype: dict
"""
_, _, line_item = self.http_client.get("/orders/{order_id}/line_items/{id}".format(order_id=order_id, id=id))
return line_item |
def last_valid_index(self):
"""Returns index of last non-NaN/NULL value.
Return:
Scalar of index name.
"""
def last_valid_index_builder(df):
df.index = pandas.RangeIndex(len(df.index))
return df.apply(lambda df: df.last_valid_index())
func = self._build_mapreduce_func(last_valid_index_builder)
# We get the maximum from each column, then take the max of that to get
# last_valid_index. The `to_pandas()` here is just for a single value and
# `squeeze` will convert it to a scalar.
first_result = self._full_axis_reduce(0, func).max(axis=1).to_pandas().squeeze()
return self.index[first_result] | Returns index of last non-NaN/NULL value.
Return:
Scalar of index name. | Below is the the instruction that describes the task:
### Input:
Returns index of last non-NaN/NULL value.
Return:
Scalar of index name.
### Response:
def last_valid_index(self):
"""Returns index of last non-NaN/NULL value.
Return:
Scalar of index name.
"""
def last_valid_index_builder(df):
df.index = pandas.RangeIndex(len(df.index))
return df.apply(lambda df: df.last_valid_index())
func = self._build_mapreduce_func(last_valid_index_builder)
# We get the maximum from each column, then take the max of that to get
# last_valid_index. The `to_pandas()` here is just for a single value and
# `squeeze` will convert it to a scalar.
first_result = self._full_axis_reduce(0, func).max(axis=1).to_pandas().squeeze()
return self.index[first_result] |
def set_light_state_raw(self, hue, saturation, brightness, kelvin,
bulb=ALL_BULBS, timeout=None):
"""
Sets the (low-level) light state of one or more bulbs.
"""
with _blocking(self.lock, self.light_state, self.light_state_event,
timeout):
self.send(REQ_SET_LIGHT_STATE, bulb, 'xHHHHI',
hue, saturation, brightness, kelvin, 0)
self.send(REQ_GET_LIGHT_STATE, ALL_BULBS, '')
return self.light_state | Sets the (low-level) light state of one or more bulbs. | Below is the the instruction that describes the task:
### Input:
Sets the (low-level) light state of one or more bulbs.
### Response:
def set_light_state_raw(self, hue, saturation, brightness, kelvin,
bulb=ALL_BULBS, timeout=None):
"""
Sets the (low-level) light state of one or more bulbs.
"""
with _blocking(self.lock, self.light_state, self.light_state_event,
timeout):
self.send(REQ_SET_LIGHT_STATE, bulb, 'xHHHHI',
hue, saturation, brightness, kelvin, 0)
self.send(REQ_GET_LIGHT_STATE, ALL_BULBS, '')
return self.light_state |
def score(self, X, y=None, **kwargs):
"""
The score function is the hook for visual interaction. Pass in test
data and the visualizer will create predictions on the data and
evaluate them with respect to the test values. The evaluation will
then be passed to draw() and the result of the estimator score will
be returned.
Parameters
----------
X : array-like
X (also X_test) are the dependent variables of test set to predict
y : array-like
y (also y_test) is the independent actual variables to score against
Returns
-------
score : float
"""
self.score_ = self.estimator.score(X, y, **kwargs)
y_pred = self.predict(X)
self.draw(y, y_pred)
return self.score_ | The score function is the hook for visual interaction. Pass in test
data and the visualizer will create predictions on the data and
evaluate them with respect to the test values. The evaluation will
then be passed to draw() and the result of the estimator score will
be returned.
Parameters
----------
X : array-like
X (also X_test) are the dependent variables of test set to predict
y : array-like
y (also y_test) is the independent actual variables to score against
Returns
-------
score : float | Below is the the instruction that describes the task:
### Input:
The score function is the hook for visual interaction. Pass in test
data and the visualizer will create predictions on the data and
evaluate them with respect to the test values. The evaluation will
then be passed to draw() and the result of the estimator score will
be returned.
Parameters
----------
X : array-like
X (also X_test) are the dependent variables of test set to predict
y : array-like
y (also y_test) is the independent actual variables to score against
Returns
-------
score : float
### Response:
def score(self, X, y=None, **kwargs):
"""
The score function is the hook for visual interaction. Pass in test
data and the visualizer will create predictions on the data and
evaluate them with respect to the test values. The evaluation will
then be passed to draw() and the result of the estimator score will
be returned.
Parameters
----------
X : array-like
X (also X_test) are the dependent variables of test set to predict
y : array-like
y (also y_test) is the independent actual variables to score against
Returns
-------
score : float
"""
self.score_ = self.estimator.score(X, y, **kwargs)
y_pred = self.predict(X)
self.draw(y, y_pred)
return self.score_ |
def replace_random_tokens_bow(self,
n_samples, # type: int
replacement='', # type: str
random_state=None,
min_replace=1, # type: Union[int, float]
max_replace=1.0, # type: Union[int, float]
):
# type: (...) -> List[Tuple[str, int, np.ndarray]]
"""
Return a list of ``(text, replaced_words_count, mask)`` tuples with
n_samples versions of text with some words replaced.
If a word is replaced, all duplicate words are also replaced
from the text. By default words are replaced with '', i.e. removed.
"""
if not self.vocab:
nomask = np.array([], dtype=int)
return [('', 0, nomask)] * n_samples
min_replace, max_replace = self._get_min_max(min_replace, max_replace,
len(self.vocab))
rng = check_random_state(random_state)
replace_sizes = rng.randint(low=min_replace, high=max_replace + 1,
size=n_samples)
res = []
for num_to_replace in replace_sizes:
tokens_to_replace = set(rng.choice(self.vocab, num_to_replace,
replace=False))
idx_to_replace = [idx for idx, token in enumerate(self.tokens)
if token in tokens_to_replace]
mask = indices_to_bool_mask(idx_to_replace, len(self.tokens))
s = self.split.masked(idx_to_replace, replacement)
res.append((s.text, num_to_replace, mask))
return res | Return a list of ``(text, replaced_words_count, mask)`` tuples with
n_samples versions of text with some words replaced.
If a word is replaced, all duplicate words are also replaced
from the text. By default words are replaced with '', i.e. removed. | Below is the the instruction that describes the task:
### Input:
Return a list of ``(text, replaced_words_count, mask)`` tuples with
n_samples versions of text with some words replaced.
If a word is replaced, all duplicate words are also replaced
from the text. By default words are replaced with '', i.e. removed.
### Response:
def replace_random_tokens_bow(self,
n_samples, # type: int
replacement='', # type: str
random_state=None,
min_replace=1, # type: Union[int, float]
max_replace=1.0, # type: Union[int, float]
):
# type: (...) -> List[Tuple[str, int, np.ndarray]]
"""
Return a list of ``(text, replaced_words_count, mask)`` tuples with
n_samples versions of text with some words replaced.
If a word is replaced, all duplicate words are also replaced
from the text. By default words are replaced with '', i.e. removed.
"""
if not self.vocab:
nomask = np.array([], dtype=int)
return [('', 0, nomask)] * n_samples
min_replace, max_replace = self._get_min_max(min_replace, max_replace,
len(self.vocab))
rng = check_random_state(random_state)
replace_sizes = rng.randint(low=min_replace, high=max_replace + 1,
size=n_samples)
res = []
for num_to_replace in replace_sizes:
tokens_to_replace = set(rng.choice(self.vocab, num_to_replace,
replace=False))
idx_to_replace = [idx for idx, token in enumerate(self.tokens)
if token in tokens_to_replace]
mask = indices_to_bool_mask(idx_to_replace, len(self.tokens))
s = self.split.masked(idx_to_replace, replacement)
res.append((s.text, num_to_replace, mask))
return res |
def write_data(self, data, response_required=None, timeout=5.0, raw=False):
"""Write data on the asyncio Protocol"""
if self._transport is None:
return
if self._paused:
return
if self._waiting_for_response:
LOG.debug("queueing write %s", data)
self._queued_writes.append((data, response_required, timeout))
return
if response_required:
self._waiting_for_response = response_required
if timeout > 0:
self._timeout_task = self.loop.call_later(
timeout, self._response_required_timeout)
if not raw:
cksum = 256 - reduce(lambda x, y: x+y, map(ord, data)) % 256
data = data + '{:02X}'.format(cksum)
if int(data[0:2], 16) != len(data)-2:
LOG.debug("message length wrong: %s", data)
LOG.debug("write_data '%s'", data)
self._transport.write((data + '\r\n').encode()) | Write data on the asyncio Protocol | Below is the the instruction that describes the task:
### Input:
Write data on the asyncio Protocol
### Response:
def write_data(self, data, response_required=None, timeout=5.0, raw=False):
"""Write data on the asyncio Protocol"""
if self._transport is None:
return
if self._paused:
return
if self._waiting_for_response:
LOG.debug("queueing write %s", data)
self._queued_writes.append((data, response_required, timeout))
return
if response_required:
self._waiting_for_response = response_required
if timeout > 0:
self._timeout_task = self.loop.call_later(
timeout, self._response_required_timeout)
if not raw:
cksum = 256 - reduce(lambda x, y: x+y, map(ord, data)) % 256
data = data + '{:02X}'.format(cksum)
if int(data[0:2], 16) != len(data)-2:
LOG.debug("message length wrong: %s", data)
LOG.debug("write_data '%s'", data)
self._transport.write((data + '\r\n').encode()) |
def pemp(stat, stat0):
""" Computes empirical values identically to bioconductor/qvalue empPvals """
assert len(stat0) > 0
assert len(stat) > 0
stat = np.array(stat)
stat0 = np.array(stat0)
m = len(stat)
m0 = len(stat0)
statc = np.concatenate((stat, stat0))
v = np.array([True] * m + [False] * m0)
perm = np.argsort(-statc, kind="mergesort") # reversed sort, mergesort is stable
v = v[perm]
u = np.where(v)[0]
p = (u - np.arange(m)) / float(m0)
# ranks can be fractional, we round down to the next integer, ranking returns values starting
# with 1, not 0:
ranks = np.floor(scipy.stats.rankdata(-stat)).astype(int) - 1
p = p[ranks]
p[p <= 1.0 / m0] = 1.0 / m0
return p | Computes empirical values identically to bioconductor/qvalue empPvals | Below is the the instruction that describes the task:
### Input:
Computes empirical values identically to bioconductor/qvalue empPvals
### Response:
def pemp(stat, stat0):
""" Computes empirical values identically to bioconductor/qvalue empPvals """
assert len(stat0) > 0
assert len(stat) > 0
stat = np.array(stat)
stat0 = np.array(stat0)
m = len(stat)
m0 = len(stat0)
statc = np.concatenate((stat, stat0))
v = np.array([True] * m + [False] * m0)
perm = np.argsort(-statc, kind="mergesort") # reversed sort, mergesort is stable
v = v[perm]
u = np.where(v)[0]
p = (u - np.arange(m)) / float(m0)
# ranks can be fractional, we round down to the next integer, ranking returns values starting
# with 1, not 0:
ranks = np.floor(scipy.stats.rankdata(-stat)).astype(int) - 1
p = p[ranks]
p[p <= 1.0 / m0] = 1.0 / m0
return p |
def diff_result_to_cell(item):
'''diff.diff returns a dictionary with all the information we need,
but we want to extract the cell and change its metadata.'''
state = item['state']
if state == 'modified':
new_cell = item['modifiedvalue'].data
old_cell = item['originalvalue'].data
new_cell['metadata']['state'] = state
new_cell['metadata']['original'] = old_cell
cell = new_cell
else:
cell = item['value'].data
cell['metadata']['state'] = state
return cell | diff.diff returns a dictionary with all the information we need,
but we want to extract the cell and change its metadata. | Below is the the instruction that describes the task:
### Input:
diff.diff returns a dictionary with all the information we need,
but we want to extract the cell and change its metadata.
### Response:
def diff_result_to_cell(item):
'''diff.diff returns a dictionary with all the information we need,
but we want to extract the cell and change its metadata.'''
state = item['state']
if state == 'modified':
new_cell = item['modifiedvalue'].data
old_cell = item['originalvalue'].data
new_cell['metadata']['state'] = state
new_cell['metadata']['original'] = old_cell
cell = new_cell
else:
cell = item['value'].data
cell['metadata']['state'] = state
return cell |
def ligolw_add(xmldoc, urls, non_lsc_tables_ok = False, verbose = False, contenthandler = DefaultContentHandler):
"""
An implementation of the LIGO LW add algorithm. urls is a list of
URLs (or filenames) to load, xmldoc is the XML document tree to
which they should be added.
"""
# Input
for n, url in enumerate(urls):
if verbose:
print >>sys.stderr, "%d/%d:" % (n + 1, len(urls)),
utils.load_url(url, verbose = verbose, xmldoc = xmldoc, contenthandler = contenthandler)
# ID reassignment
if not non_lsc_tables_ok and lsctables.HasNonLSCTables(xmldoc):
raise ValueError("non-LSC tables found. Use --non-lsc-tables-ok to force")
reassign_ids(xmldoc, verbose = verbose)
# Document merge
if verbose:
print >>sys.stderr, "merging elements ..."
merge_ligolws(xmldoc)
merge_compatible_tables(xmldoc)
return xmldoc | An implementation of the LIGO LW add algorithm. urls is a list of
URLs (or filenames) to load, xmldoc is the XML document tree to
which they should be added. | Below is the the instruction that describes the task:
### Input:
An implementation of the LIGO LW add algorithm. urls is a list of
URLs (or filenames) to load, xmldoc is the XML document tree to
which they should be added.
### Response:
def ligolw_add(xmldoc, urls, non_lsc_tables_ok = False, verbose = False, contenthandler = DefaultContentHandler):
"""
An implementation of the LIGO LW add algorithm. urls is a list of
URLs (or filenames) to load, xmldoc is the XML document tree to
which they should be added.
"""
# Input
for n, url in enumerate(urls):
if verbose:
print >>sys.stderr, "%d/%d:" % (n + 1, len(urls)),
utils.load_url(url, verbose = verbose, xmldoc = xmldoc, contenthandler = contenthandler)
# ID reassignment
if not non_lsc_tables_ok and lsctables.HasNonLSCTables(xmldoc):
raise ValueError("non-LSC tables found. Use --non-lsc-tables-ok to force")
reassign_ids(xmldoc, verbose = verbose)
# Document merge
if verbose:
print >>sys.stderr, "merging elements ..."
merge_ligolws(xmldoc)
merge_compatible_tables(xmldoc)
return xmldoc |
def present(name, pipeline_objects=None,
pipeline_objects_from_pillars='boto_datapipeline_pipeline_objects',
parameter_objects=None,
parameter_objects_from_pillars='boto_datapipeline_parameter_objects',
parameter_values=None,
parameter_values_from_pillars='boto_datapipeline_parameter_values',
region=None,
key=None, keyid=None, profile=None):
'''
Ensure the data pipeline exists with matching definition.
name
Name of the service to ensure a data pipeline exists for.
pipeline_objects
Pipeline objects to use. Will override objects read from pillars.
pipeline_objects_from_pillars
The pillar key to use for lookup.
parameter_objects
Parameter objects to use. Will override objects read from pillars.
parameter_objects_from_pillars
The pillar key to use for lookup.
parameter_values
Parameter values to use. Will override values read from pillars.
parameter_values_from_pillars
The pillar key to use for lookup.
region
Region to connect to.
key
Secret key to be used.
keyid
Access key to be used.
profile
A dict with region, key and keyid, or a pillar key (string)
that contains a dict with region, key and keyid.
'''
ret = {'name': name, 'result': True, 'comment': '', 'changes': {}}
pipeline_objects = pipeline_objects or {}
parameter_objects = parameter_objects or {}
parameter_values = parameter_values or {}
present, old_pipeline_definition = _pipeline_present_with_definition(
name,
_pipeline_objects(pipeline_objects_from_pillars, pipeline_objects),
_parameter_objects(parameter_objects_from_pillars, parameter_objects),
_parameter_values(parameter_values_from_pillars, parameter_values),
region=region,
key=key,
keyid=keyid,
profile=profile,
)
if present:
ret['comment'] = 'AWS data pipeline {0} present'.format(name)
return ret
if __opts__['test']:
ret['comment'] = 'Data pipeline {0} is set to be created or updated'.format(name)
ret['result'] = None
return ret
result_create_pipeline = __salt__['boto_datapipeline.create_pipeline'](
name,
name,
region=region,
key=key,
keyid=keyid,
profile=profile,
)
if 'error' in result_create_pipeline:
ret['result'] = False
ret['comment'] = 'Failed to create data pipeline {0}: {1}'.format(
name, result_create_pipeline['error'])
return ret
pipeline_id = result_create_pipeline['result']
result_pipeline_definition = __salt__['boto_datapipeline.put_pipeline_definition'](
pipeline_id,
_pipeline_objects(pipeline_objects_from_pillars, pipeline_objects),
parameter_objects=_parameter_objects(parameter_objects_from_pillars, parameter_objects),
parameter_values=_parameter_values(parameter_values_from_pillars, parameter_values),
region=region,
key=key,
keyid=keyid,
profile=profile,
)
if 'error' in result_pipeline_definition:
if _immutable_fields_error(result_pipeline_definition):
# If update not possible, delete and retry
result_delete_pipeline = __salt__['boto_datapipeline.delete_pipeline'](
pipeline_id,
region=region,
key=key,
keyid=keyid,
profile=profile,
)
if 'error' in result_delete_pipeline:
ret['result'] = False
ret['comment'] = 'Failed to delete data pipeline {0}: {1}'.format(
pipeline_id, result_delete_pipeline['error'])
return ret
result_create_pipeline = __salt__['boto_datapipeline.create_pipeline'](
name,
name,
region=region,
key=key,
keyid=keyid,
profile=profile,
)
if 'error' in result_create_pipeline:
ret['result'] = False
ret['comment'] = 'Failed to create data pipeline {0}: {1}'.format(
name, result_create_pipeline['error'])
return ret
pipeline_id = result_create_pipeline['result']
result_pipeline_definition = __salt__['boto_datapipeline.put_pipeline_definition'](
pipeline_id,
_pipeline_objects(pipeline_objects_from_pillars, pipeline_objects),
parameter_objects=_parameter_objects(parameter_objects_from_pillars, parameter_objects),
parameter_values=_parameter_values(parameter_values_from_pillars, parameter_values),
region=region,
key=key,
keyid=keyid,
profile=profile,
)
if 'error' in result_pipeline_definition:
# Still erroring after possible retry
ret['result'] = False
ret['comment'] = 'Failed to create data pipeline {0}: {1}'.format(
name, result_pipeline_definition['error'])
return ret
result_activate_pipeline = __salt__['boto_datapipeline.activate_pipeline'](
pipeline_id,
region=region,
key=key,
keyid=keyid,
profile=profile,
)
if 'error' in result_activate_pipeline:
ret['result'] = False
ret['comment'] = 'Failed to create data pipeline {0}: {1}'.format(
name, result_pipeline_definition['error'])
return ret
pipeline_definition_result = __salt__['boto_datapipeline.get_pipeline_definition'](
pipeline_id,
version='active',
region=region,
key=key,
keyid=keyid,
profile=profile,
)
if 'error' in pipeline_definition_result:
new_pipeline_definition = {}
else:
new_pipeline_definition = _standardize(pipeline_definition_result['result'])
if not old_pipeline_definition:
ret['changes']['new'] = 'Pipeline created.'
ret['comment'] = 'Data pipeline {0} created'.format(name)
else:
ret['changes']['diff'] = _diff(old_pipeline_definition, new_pipeline_definition)
ret['comment'] = 'Data pipeline {0} updated'.format(name)
return ret | Ensure the data pipeline exists with matching definition.
name
Name of the service to ensure a data pipeline exists for.
pipeline_objects
Pipeline objects to use. Will override objects read from pillars.
pipeline_objects_from_pillars
The pillar key to use for lookup.
parameter_objects
Parameter objects to use. Will override objects read from pillars.
parameter_objects_from_pillars
The pillar key to use for lookup.
parameter_values
Parameter values to use. Will override values read from pillars.
parameter_values_from_pillars
The pillar key to use for lookup.
region
Region to connect to.
key
Secret key to be used.
keyid
Access key to be used.
profile
A dict with region, key and keyid, or a pillar key (string)
that contains a dict with region, key and keyid. | Below is the the instruction that describes the task:
### Input:
Ensure the data pipeline exists with matching definition.
name
Name of the service to ensure a data pipeline exists for.
pipeline_objects
Pipeline objects to use. Will override objects read from pillars.
pipeline_objects_from_pillars
The pillar key to use for lookup.
parameter_objects
Parameter objects to use. Will override objects read from pillars.
parameter_objects_from_pillars
The pillar key to use for lookup.
parameter_values
Parameter values to use. Will override values read from pillars.
parameter_values_from_pillars
The pillar key to use for lookup.
region
Region to connect to.
key
Secret key to be used.
keyid
Access key to be used.
profile
A dict with region, key and keyid, or a pillar key (string)
that contains a dict with region, key and keyid.
### Response:
def present(name, pipeline_objects=None,
pipeline_objects_from_pillars='boto_datapipeline_pipeline_objects',
parameter_objects=None,
parameter_objects_from_pillars='boto_datapipeline_parameter_objects',
parameter_values=None,
parameter_values_from_pillars='boto_datapipeline_parameter_values',
region=None,
key=None, keyid=None, profile=None):
'''
Ensure the data pipeline exists with matching definition.
name
Name of the service to ensure a data pipeline exists for.
pipeline_objects
Pipeline objects to use. Will override objects read from pillars.
pipeline_objects_from_pillars
The pillar key to use for lookup.
parameter_objects
Parameter objects to use. Will override objects read from pillars.
parameter_objects_from_pillars
The pillar key to use for lookup.
parameter_values
Parameter values to use. Will override values read from pillars.
parameter_values_from_pillars
The pillar key to use for lookup.
region
Region to connect to.
key
Secret key to be used.
keyid
Access key to be used.
profile
A dict with region, key and keyid, or a pillar key (string)
that contains a dict with region, key and keyid.
'''
ret = {'name': name, 'result': True, 'comment': '', 'changes': {}}
pipeline_objects = pipeline_objects or {}
parameter_objects = parameter_objects or {}
parameter_values = parameter_values or {}
present, old_pipeline_definition = _pipeline_present_with_definition(
name,
_pipeline_objects(pipeline_objects_from_pillars, pipeline_objects),
_parameter_objects(parameter_objects_from_pillars, parameter_objects),
_parameter_values(parameter_values_from_pillars, parameter_values),
region=region,
key=key,
keyid=keyid,
profile=profile,
)
if present:
ret['comment'] = 'AWS data pipeline {0} present'.format(name)
return ret
if __opts__['test']:
ret['comment'] = 'Data pipeline {0} is set to be created or updated'.format(name)
ret['result'] = None
return ret
result_create_pipeline = __salt__['boto_datapipeline.create_pipeline'](
name,
name,
region=region,
key=key,
keyid=keyid,
profile=profile,
)
if 'error' in result_create_pipeline:
ret['result'] = False
ret['comment'] = 'Failed to create data pipeline {0}: {1}'.format(
name, result_create_pipeline['error'])
return ret
pipeline_id = result_create_pipeline['result']
result_pipeline_definition = __salt__['boto_datapipeline.put_pipeline_definition'](
pipeline_id,
_pipeline_objects(pipeline_objects_from_pillars, pipeline_objects),
parameter_objects=_parameter_objects(parameter_objects_from_pillars, parameter_objects),
parameter_values=_parameter_values(parameter_values_from_pillars, parameter_values),
region=region,
key=key,
keyid=keyid,
profile=profile,
)
if 'error' in result_pipeline_definition:
if _immutable_fields_error(result_pipeline_definition):
# If update not possible, delete and retry
result_delete_pipeline = __salt__['boto_datapipeline.delete_pipeline'](
pipeline_id,
region=region,
key=key,
keyid=keyid,
profile=profile,
)
if 'error' in result_delete_pipeline:
ret['result'] = False
ret['comment'] = 'Failed to delete data pipeline {0}: {1}'.format(
pipeline_id, result_delete_pipeline['error'])
return ret
result_create_pipeline = __salt__['boto_datapipeline.create_pipeline'](
name,
name,
region=region,
key=key,
keyid=keyid,
profile=profile,
)
if 'error' in result_create_pipeline:
ret['result'] = False
ret['comment'] = 'Failed to create data pipeline {0}: {1}'.format(
name, result_create_pipeline['error'])
return ret
pipeline_id = result_create_pipeline['result']
result_pipeline_definition = __salt__['boto_datapipeline.put_pipeline_definition'](
pipeline_id,
_pipeline_objects(pipeline_objects_from_pillars, pipeline_objects),
parameter_objects=_parameter_objects(parameter_objects_from_pillars, parameter_objects),
parameter_values=_parameter_values(parameter_values_from_pillars, parameter_values),
region=region,
key=key,
keyid=keyid,
profile=profile,
)
if 'error' in result_pipeline_definition:
# Still erroring after possible retry
ret['result'] = False
ret['comment'] = 'Failed to create data pipeline {0}: {1}'.format(
name, result_pipeline_definition['error'])
return ret
result_activate_pipeline = __salt__['boto_datapipeline.activate_pipeline'](
pipeline_id,
region=region,
key=key,
keyid=keyid,
profile=profile,
)
if 'error' in result_activate_pipeline:
ret['result'] = False
ret['comment'] = 'Failed to create data pipeline {0}: {1}'.format(
name, result_pipeline_definition['error'])
return ret
pipeline_definition_result = __salt__['boto_datapipeline.get_pipeline_definition'](
pipeline_id,
version='active',
region=region,
key=key,
keyid=keyid,
profile=profile,
)
if 'error' in pipeline_definition_result:
new_pipeline_definition = {}
else:
new_pipeline_definition = _standardize(pipeline_definition_result['result'])
if not old_pipeline_definition:
ret['changes']['new'] = 'Pipeline created.'
ret['comment'] = 'Data pipeline {0} created'.format(name)
else:
ret['changes']['diff'] = _diff(old_pipeline_definition, new_pipeline_definition)
ret['comment'] = 'Data pipeline {0} updated'.format(name)
return ret |
def parse_swf (url_data):
"""Parse a SWF file for URLs."""
linkfinder = linkparse.swf_url_re.finditer
for mo in linkfinder(url_data.get_content()):
url = mo.group()
url_data.add_url(url) | Parse a SWF file for URLs. | Below is the the instruction that describes the task:
### Input:
Parse a SWF file for URLs.
### Response:
def parse_swf (url_data):
"""Parse a SWF file for URLs."""
linkfinder = linkparse.swf_url_re.finditer
for mo in linkfinder(url_data.get_content()):
url = mo.group()
url_data.add_url(url) |
def fetch(self, bank, key):
'''
Fetch data using the specified module
:param bank:
The name of the location inside the cache which will hold the key
and its associated data.
:param key:
The name of the key (or file inside a directory) which will hold
the data. File extensions should not be provided, as they will be
added by the driver itself.
:return:
Return a python object fetched from the cache or an empty dict if
the given path or key not found.
:raises SaltCacheError:
Raises an exception if cache driver detected an error accessing data
in the cache backend (auth, permissions, etc).
'''
fun = '{0}.fetch'.format(self.driver)
return self.modules[fun](bank, key, **self._kwargs) | Fetch data using the specified module
:param bank:
The name of the location inside the cache which will hold the key
and its associated data.
:param key:
The name of the key (or file inside a directory) which will hold
the data. File extensions should not be provided, as they will be
added by the driver itself.
:return:
Return a python object fetched from the cache or an empty dict if
the given path or key not found.
:raises SaltCacheError:
Raises an exception if cache driver detected an error accessing data
in the cache backend (auth, permissions, etc). | Below is the the instruction that describes the task:
### Input:
Fetch data using the specified module
:param bank:
The name of the location inside the cache which will hold the key
and its associated data.
:param key:
The name of the key (or file inside a directory) which will hold
the data. File extensions should not be provided, as they will be
added by the driver itself.
:return:
Return a python object fetched from the cache or an empty dict if
the given path or key not found.
:raises SaltCacheError:
Raises an exception if cache driver detected an error accessing data
in the cache backend (auth, permissions, etc).
### Response:
def fetch(self, bank, key):
'''
Fetch data using the specified module
:param bank:
The name of the location inside the cache which will hold the key
and its associated data.
:param key:
The name of the key (or file inside a directory) which will hold
the data. File extensions should not be provided, as they will be
added by the driver itself.
:return:
Return a python object fetched from the cache or an empty dict if
the given path or key not found.
:raises SaltCacheError:
Raises an exception if cache driver detected an error accessing data
in the cache backend (auth, permissions, etc).
'''
fun = '{0}.fetch'.format(self.driver)
return self.modules[fun](bank, key, **self._kwargs) |
def process_raw_data(cls, raw_data):
"""Create a new model using raw API response."""
properties = raw_data.get("properties", {})
raw_content = properties.get("ipSecConfiguration", None)
if raw_content is not None:
ip_sec = IPSecConfiguration.from_raw_data(raw_content)
properties["ipSecConfiguration"] = ip_sec
ip_addresses = []
for raw_content in properties.get("ipAddresses", []):
ip_addresses.append(IPAddress.from_raw_data(raw_content))
properties["ipAddresses"] = ip_addresses
routes = []
for raw_content in properties.get("routes", []):
routes.append(NetworkInterfaceRoute.from_raw_data(raw_content))
properties["routes"] = routes
raw_content = properties.get("statistics", None)
if raw_content is not None:
statistics = NetworkInterfaceStatistics.from_raw_data(
raw_content)
properties["statistics"] = statistics
raw_content = properties.get("greConfiguration", None)
if raw_content is not None:
gre_configuration = GREConfiguration.from_raw_data(raw_content)
properties["greConfiguration"] = gre_configuration
raw_content = properties.get("l3Configuration", None)
if raw_content is not None:
l3_configuration = L3Configuration.from_raw_data(raw_content)
properties["l3Configuration"] = l3_configuration
raw_content = properties.get("gateway", None)
if raw_content is not None:
gateway = Resource.from_raw_data(raw_content)
properties["gateway"] = gateway
return super(NetworkConnections, cls).process_raw_data(raw_data) | Create a new model using raw API response. | Below is the the instruction that describes the task:
### Input:
Create a new model using raw API response.
### Response:
def process_raw_data(cls, raw_data):
"""Create a new model using raw API response."""
properties = raw_data.get("properties", {})
raw_content = properties.get("ipSecConfiguration", None)
if raw_content is not None:
ip_sec = IPSecConfiguration.from_raw_data(raw_content)
properties["ipSecConfiguration"] = ip_sec
ip_addresses = []
for raw_content in properties.get("ipAddresses", []):
ip_addresses.append(IPAddress.from_raw_data(raw_content))
properties["ipAddresses"] = ip_addresses
routes = []
for raw_content in properties.get("routes", []):
routes.append(NetworkInterfaceRoute.from_raw_data(raw_content))
properties["routes"] = routes
raw_content = properties.get("statistics", None)
if raw_content is not None:
statistics = NetworkInterfaceStatistics.from_raw_data(
raw_content)
properties["statistics"] = statistics
raw_content = properties.get("greConfiguration", None)
if raw_content is not None:
gre_configuration = GREConfiguration.from_raw_data(raw_content)
properties["greConfiguration"] = gre_configuration
raw_content = properties.get("l3Configuration", None)
if raw_content is not None:
l3_configuration = L3Configuration.from_raw_data(raw_content)
properties["l3Configuration"] = l3_configuration
raw_content = properties.get("gateway", None)
if raw_content is not None:
gateway = Resource.from_raw_data(raw_content)
properties["gateway"] = gateway
return super(NetworkConnections, cls).process_raw_data(raw_data) |
def record_service_agreement(storage_path, service_agreement_id, did, service_definition_id, price,
files, start_time,
status='pending'):
"""
Records the given pending service agreement.
:param storage_path: storage path for the internal db, str
:param service_agreement_id:
:param did: DID, str
:param service_definition_id: identifier of the service inside the asset DDO, str
:param price: Asset price, int
:param files:
:param start_time:
:param status:
:return:
"""
conn = sqlite3.connect(storage_path)
try:
cursor = conn.cursor()
cursor.execute(
'''CREATE TABLE IF NOT EXISTS service_agreements
(id VARCHAR PRIMARY KEY, did VARCHAR, service_definition_id INTEGER,
price INTEGER, files VARCHAR, start_time INTEGER, status VARCHAR(10));'''
)
cursor.execute(
'INSERT OR REPLACE INTO service_agreements VALUES (?,?,?,?,?,?,?)',
[service_agreement_id, did, service_definition_id, price, files, start_time,
status],
)
conn.commit()
finally:
conn.close() | Records the given pending service agreement.
:param storage_path: storage path for the internal db, str
:param service_agreement_id:
:param did: DID, str
:param service_definition_id: identifier of the service inside the asset DDO, str
:param price: Asset price, int
:param files:
:param start_time:
:param status:
:return: | Below is the the instruction that describes the task:
### Input:
Records the given pending service agreement.
:param storage_path: storage path for the internal db, str
:param service_agreement_id:
:param did: DID, str
:param service_definition_id: identifier of the service inside the asset DDO, str
:param price: Asset price, int
:param files:
:param start_time:
:param status:
:return:
### Response:
def record_service_agreement(storage_path, service_agreement_id, did, service_definition_id, price,
files, start_time,
status='pending'):
"""
Records the given pending service agreement.
:param storage_path: storage path for the internal db, str
:param service_agreement_id:
:param did: DID, str
:param service_definition_id: identifier of the service inside the asset DDO, str
:param price: Asset price, int
:param files:
:param start_time:
:param status:
:return:
"""
conn = sqlite3.connect(storage_path)
try:
cursor = conn.cursor()
cursor.execute(
'''CREATE TABLE IF NOT EXISTS service_agreements
(id VARCHAR PRIMARY KEY, did VARCHAR, service_definition_id INTEGER,
price INTEGER, files VARCHAR, start_time INTEGER, status VARCHAR(10));'''
)
cursor.execute(
'INSERT OR REPLACE INTO service_agreements VALUES (?,?,?,?,?,?,?)',
[service_agreement_id, did, service_definition_id, price, files, start_time,
status],
)
conn.commit()
finally:
conn.close() |
def get(self, *, kind: Type=None, tag: Hashable=None, **_) -> Iterator:
"""
Get an iterator of objects by kind or tag.
kind: Any type. Pass to get a subset of contained items with the given
type.
tag: Any Hashable object. Pass to get a subset of contained items with
the given tag.
Pass both kind and tag to get objects that are both that type and that
tag.
Examples:
container.get(type=MyObject)
container.get(tag="red")
container.get(type=MyObject, tag="red")
"""
if kind is None and tag is None:
raise TypeError("get() takes at least one keyword-only argument. 'kind' or 'tag'.")
kinds = self.all
tags = self.all
if kind is not None:
kinds = self.kinds[kind]
if tag is not None:
tags = self.tags[tag]
return (x for x in kinds.intersection(tags)) | Get an iterator of objects by kind or tag.
kind: Any type. Pass to get a subset of contained items with the given
type.
tag: Any Hashable object. Pass to get a subset of contained items with
the given tag.
Pass both kind and tag to get objects that are both that type and that
tag.
Examples:
container.get(type=MyObject)
container.get(tag="red")
container.get(type=MyObject, tag="red") | Below is the the instruction that describes the task:
### Input:
Get an iterator of objects by kind or tag.
kind: Any type. Pass to get a subset of contained items with the given
type.
tag: Any Hashable object. Pass to get a subset of contained items with
the given tag.
Pass both kind and tag to get objects that are both that type and that
tag.
Examples:
container.get(type=MyObject)
container.get(tag="red")
container.get(type=MyObject, tag="red")
### Response:
def get(self, *, kind: Type=None, tag: Hashable=None, **_) -> Iterator:
"""
Get an iterator of objects by kind or tag.
kind: Any type. Pass to get a subset of contained items with the given
type.
tag: Any Hashable object. Pass to get a subset of contained items with
the given tag.
Pass both kind and tag to get objects that are both that type and that
tag.
Examples:
container.get(type=MyObject)
container.get(tag="red")
container.get(type=MyObject, tag="red")
"""
if kind is None and tag is None:
raise TypeError("get() takes at least one keyword-only argument. 'kind' or 'tag'.")
kinds = self.all
tags = self.all
if kind is not None:
kinds = self.kinds[kind]
if tag is not None:
tags = self.tags[tag]
return (x for x in kinds.intersection(tags)) |
def subgroup(self, t, i):
"""Handle parenthesis."""
# (?flags)
flags = self.get_flags(i, self.version == _regex.V0)
if flags:
self.flags(flags[2:-1])
return [flags]
# (?#comment)
comments = self.get_comments(i)
if comments:
return [comments]
verbose = self.verbose
# (?flags:pattern)
flags = self.get_flags(i, (self.version == _regex.V0), True)
if flags:
t = flags
self.flags(flags[2:-1], scoped=True)
current = []
try:
while t != ')':
if not current:
current.append(t)
else:
current.extend(self.normal(t, i))
t = next(i)
except StopIteration:
pass
self.verbose = verbose
if t == ")":
current.append(t)
return current | Handle parenthesis. | Below is the the instruction that describes the task:
### Input:
Handle parenthesis.
### Response:
def subgroup(self, t, i):
"""Handle parenthesis."""
# (?flags)
flags = self.get_flags(i, self.version == _regex.V0)
if flags:
self.flags(flags[2:-1])
return [flags]
# (?#comment)
comments = self.get_comments(i)
if comments:
return [comments]
verbose = self.verbose
# (?flags:pattern)
flags = self.get_flags(i, (self.version == _regex.V0), True)
if flags:
t = flags
self.flags(flags[2:-1], scoped=True)
current = []
try:
while t != ')':
if not current:
current.append(t)
else:
current.extend(self.normal(t, i))
t = next(i)
except StopIteration:
pass
self.verbose = verbose
if t == ")":
current.append(t)
return current |
def convert_to_btc(self, amount, currency):
"""
Convert X amount to Bit Coins
"""
if isinstance(amount, Decimal):
use_decimal = True
else:
use_decimal = self._force_decimal
url = 'https://api.coindesk.com/v1/bpi/currentprice/{}.json'.format(currency)
response = requests.get(url)
if response.status_code == 200:
data = response.json()
price = data.get('bpi').get(currency, {}).get('rate_float', None)
if price:
if use_decimal:
price = Decimal(price)
try:
converted_btc = amount/price
return converted_btc
except TypeError:
raise DecimalFloatMismatchError("convert_to_btc requires amount parameter is of type Decimal when force_decimal=True")
raise RatesNotAvailableError("BitCoin Rates Source Not Ready For Given date") | Convert X amount to Bit Coins | Below is the the instruction that describes the task:
### Input:
Convert X amount to Bit Coins
### Response:
def convert_to_btc(self, amount, currency):
"""
Convert X amount to Bit Coins
"""
if isinstance(amount, Decimal):
use_decimal = True
else:
use_decimal = self._force_decimal
url = 'https://api.coindesk.com/v1/bpi/currentprice/{}.json'.format(currency)
response = requests.get(url)
if response.status_code == 200:
data = response.json()
price = data.get('bpi').get(currency, {}).get('rate_float', None)
if price:
if use_decimal:
price = Decimal(price)
try:
converted_btc = amount/price
return converted_btc
except TypeError:
raise DecimalFloatMismatchError("convert_to_btc requires amount parameter is of type Decimal when force_decimal=True")
raise RatesNotAvailableError("BitCoin Rates Source Not Ready For Given date") |
def _grabContentFromUrl(self, url):
"""
Function that abstracts capturing a URL. This method rewrites the one from Wrapper.
:param url: The URL to be processed.
:return: The response in a Json format.
"""
# Defining an empty object for the response
info = {}
# This part has to be modified...
try:
# Configuring the socket
queryURL = "http://" + self.info["host"] + ":" + self.info["port"] + "/" + url
response = urllib2.urlopen(queryURL)
# Rebuilding data to be processed
data = str(response.headers) + "\n"
data += response.read()
# Processing data as expected
info = self._createDataStructure(data)
# Try to make the errors clear for other users
except Exception, e:
errMsg = "ERROR Exception. Something seems to be wrong with the Zeronet Bundler."
raise Exception( errMsg + " " + str(e))
return info | Function that abstracts capturing a URL. This method rewrites the one from Wrapper.
:param url: The URL to be processed.
:return: The response in a Json format. | Below is the the instruction that describes the task:
### Input:
Function that abstracts capturing a URL. This method rewrites the one from Wrapper.
:param url: The URL to be processed.
:return: The response in a Json format.
### Response:
def _grabContentFromUrl(self, url):
"""
Function that abstracts capturing a URL. This method rewrites the one from Wrapper.
:param url: The URL to be processed.
:return: The response in a Json format.
"""
# Defining an empty object for the response
info = {}
# This part has to be modified...
try:
# Configuring the socket
queryURL = "http://" + self.info["host"] + ":" + self.info["port"] + "/" + url
response = urllib2.urlopen(queryURL)
# Rebuilding data to be processed
data = str(response.headers) + "\n"
data += response.read()
# Processing data as expected
info = self._createDataStructure(data)
# Try to make the errors clear for other users
except Exception, e:
errMsg = "ERROR Exception. Something seems to be wrong with the Zeronet Bundler."
raise Exception( errMsg + " " + str(e))
return info |
def save_caption(self, filename: str, mtime: datetime, caption: str) -> None:
"""Updates picture caption / Post metadata info"""
def _elliptify(caption):
pcaption = caption.replace('\n', ' ').strip()
return '[' + ((pcaption[:29] + u"\u2026") if len(pcaption) > 31 else pcaption) + ']'
filename += '.txt'
caption += '\n'
pcaption = _elliptify(caption)
caption = caption.encode("UTF-8")
with suppress(FileNotFoundError):
with open(filename, 'rb') as file:
file_caption = file.read()
if file_caption.replace(b'\r\n', b'\n') == caption.replace(b'\r\n', b'\n'):
try:
self.context.log(pcaption + ' unchanged', end=' ', flush=True)
except UnicodeEncodeError:
self.context.log('txt unchanged', end=' ', flush=True)
return None
else:
def get_filename(index):
return filename if index == 0 else '{0}_old_{2:02}{1}'.format(*os.path.splitext(filename), index)
i = 0
while os.path.isfile(get_filename(i)):
i = i + 1
for index in range(i, 0, -1):
os.rename(get_filename(index - 1), get_filename(index))
try:
self.context.log(_elliptify(file_caption.decode("UTF-8")) + ' updated', end=' ', flush=True)
except UnicodeEncodeError:
self.context.log('txt updated', end=' ', flush=True)
try:
self.context.log(pcaption, end=' ', flush=True)
except UnicodeEncodeError:
self.context.log('txt', end=' ', flush=True)
with open(filename, 'wb') as text_file:
shutil.copyfileobj(BytesIO(caption), text_file)
os.utime(filename, (datetime.now().timestamp(), mtime.timestamp())) | Updates picture caption / Post metadata info | Below is the the instruction that describes the task:
### Input:
Updates picture caption / Post metadata info
### Response:
def save_caption(self, filename: str, mtime: datetime, caption: str) -> None:
"""Updates picture caption / Post metadata info"""
def _elliptify(caption):
pcaption = caption.replace('\n', ' ').strip()
return '[' + ((pcaption[:29] + u"\u2026") if len(pcaption) > 31 else pcaption) + ']'
filename += '.txt'
caption += '\n'
pcaption = _elliptify(caption)
caption = caption.encode("UTF-8")
with suppress(FileNotFoundError):
with open(filename, 'rb') as file:
file_caption = file.read()
if file_caption.replace(b'\r\n', b'\n') == caption.replace(b'\r\n', b'\n'):
try:
self.context.log(pcaption + ' unchanged', end=' ', flush=True)
except UnicodeEncodeError:
self.context.log('txt unchanged', end=' ', flush=True)
return None
else:
def get_filename(index):
return filename if index == 0 else '{0}_old_{2:02}{1}'.format(*os.path.splitext(filename), index)
i = 0
while os.path.isfile(get_filename(i)):
i = i + 1
for index in range(i, 0, -1):
os.rename(get_filename(index - 1), get_filename(index))
try:
self.context.log(_elliptify(file_caption.decode("UTF-8")) + ' updated', end=' ', flush=True)
except UnicodeEncodeError:
self.context.log('txt updated', end=' ', flush=True)
try:
self.context.log(pcaption, end=' ', flush=True)
except UnicodeEncodeError:
self.context.log('txt', end=' ', flush=True)
with open(filename, 'wb') as text_file:
shutil.copyfileobj(BytesIO(caption), text_file)
os.utime(filename, (datetime.now().timestamp(), mtime.timestamp())) |
def get_controller_value(self, index_or_name, value_type):
"""
Returns current/min/max value of controller at given index or name.
It is much more efficient to query using an integer index rather than string name.
Name is fine for seldom updates but it's not advised to be used every second or so.
See `get_controller_list` for an example how to cache a dictionary of {name: index} pairs.
:param index_or_name integer index or string name
:param value_type one of VALUE_CURRENT, VALUE_MIN, VALUE_MAX
:return float
"""
if not isinstance(index_or_name, int):
index = self.get_controller_index(index_or_name)
else:
index = index_or_name
return self.dll.GetControllerValue(index, value_type) | Returns current/min/max value of controller at given index or name.
It is much more efficient to query using an integer index rather than string name.
Name is fine for seldom updates but it's not advised to be used every second or so.
See `get_controller_list` for an example how to cache a dictionary of {name: index} pairs.
:param index_or_name integer index or string name
:param value_type one of VALUE_CURRENT, VALUE_MIN, VALUE_MAX
:return float | Below is the the instruction that describes the task:
### Input:
Returns current/min/max value of controller at given index or name.
It is much more efficient to query using an integer index rather than string name.
Name is fine for seldom updates but it's not advised to be used every second or so.
See `get_controller_list` for an example how to cache a dictionary of {name: index} pairs.
:param index_or_name integer index or string name
:param value_type one of VALUE_CURRENT, VALUE_MIN, VALUE_MAX
:return float
### Response:
def get_controller_value(self, index_or_name, value_type):
"""
Returns current/min/max value of controller at given index or name.
It is much more efficient to query using an integer index rather than string name.
Name is fine for seldom updates but it's not advised to be used every second or so.
See `get_controller_list` for an example how to cache a dictionary of {name: index} pairs.
:param index_or_name integer index or string name
:param value_type one of VALUE_CURRENT, VALUE_MIN, VALUE_MAX
:return float
"""
if not isinstance(index_or_name, int):
index = self.get_controller_index(index_or_name)
else:
index = index_or_name
return self.dll.GetControllerValue(index, value_type) |
def default_config(level=logging.INFO, auto_init=True, new_formatter=False, **kwargs):
"""
Returns the default config dictionary and inits the logging system if requested
Keyword arguments:
level -- loglevel of the console handler (Default: logging.INFO)
auto_init -- initialize the logging system with the provided config (Default: True)
**kwargs -- additional options for the logging system
"""
formatters = {
'color': {
'()': __name__ + '.ColorFormatter',
'format': '[%(levelname)s] %(message)s'
}
}
if new_formatter:
formatters = {
'color': {
'()': __name__ + '.NewColorFormatter',
'format': '[{levelname}] {message}'
}
}
options = {
'version': 1,
'disable_existing_loggers': False,
'formatters': formatters,
'filters': {},
'handlers': {
'console': {
'class': 'logging.StreamHandler',
'formatter': 'color',
'level': level,#logging.getLevelName(level),
'stream': 'ext://sys.stderr',
}
},
'loggers': {
},
'root': {
'level': 'NOTSET',
'filters': [],
'handlers': ['console'],
}
}
options.update(kwargs)
if auto_init:
logging.config.dictConfig(options)
return options | Returns the default config dictionary and inits the logging system if requested
Keyword arguments:
level -- loglevel of the console handler (Default: logging.INFO)
auto_init -- initialize the logging system with the provided config (Default: True)
**kwargs -- additional options for the logging system | Below is the the instruction that describes the task:
### Input:
Returns the default config dictionary and inits the logging system if requested
Keyword arguments:
level -- loglevel of the console handler (Default: logging.INFO)
auto_init -- initialize the logging system with the provided config (Default: True)
**kwargs -- additional options for the logging system
### Response:
def default_config(level=logging.INFO, auto_init=True, new_formatter=False, **kwargs):
"""
Returns the default config dictionary and inits the logging system if requested
Keyword arguments:
level -- loglevel of the console handler (Default: logging.INFO)
auto_init -- initialize the logging system with the provided config (Default: True)
**kwargs -- additional options for the logging system
"""
formatters = {
'color': {
'()': __name__ + '.ColorFormatter',
'format': '[%(levelname)s] %(message)s'
}
}
if new_formatter:
formatters = {
'color': {
'()': __name__ + '.NewColorFormatter',
'format': '[{levelname}] {message}'
}
}
options = {
'version': 1,
'disable_existing_loggers': False,
'formatters': formatters,
'filters': {},
'handlers': {
'console': {
'class': 'logging.StreamHandler',
'formatter': 'color',
'level': level,#logging.getLevelName(level),
'stream': 'ext://sys.stderr',
}
},
'loggers': {
},
'root': {
'level': 'NOTSET',
'filters': [],
'handlers': ['console'],
}
}
options.update(kwargs)
if auto_init:
logging.config.dictConfig(options)
return options |
def download_folder(project, destdir, folder="/", overwrite=False, chunksize=dxfile.DEFAULT_BUFFER_SIZE,
show_progress=False, **kwargs):
'''
:param project: Project ID to use as context for this download.
:type project: string
:param destdir: Local destination location
:type destdir: string
:param folder: Path to the remote folder to download
:type folder: string
:param overwrite: Overwrite existing files
:type overwrite: boolean
Downloads the contents of the remote *folder* of the *project* into the local directory specified by *destdir*.
Example::
download_folder("project-xxxx", "/home/jsmith/input", folder="/input")
'''
def ensure_local_dir(d):
if not os.path.isdir(d):
if os.path.exists(d):
raise DXFileError("Destination location '{}' already exists and is not a directory".format(d))
logger.debug("Creating destination directory: '%s'", d)
os.makedirs(d)
def compose_local_dir(d, remote_folder, remote_subfolder):
suffix = remote_subfolder[1:] if remote_folder == "/" else remote_subfolder[len(remote_folder) + 1:]
if os.sep != '/':
suffix = suffix.replace('/', os.sep)
return os.path.join(d, suffix) if suffix != "" else d
normalized_folder = folder.strip()
if normalized_folder != "/" and normalized_folder.endswith("/"):
normalized_folder = normalized_folder[:-1]
if normalized_folder == "":
raise DXFileError("Invalid remote folder name: '{}'".format(folder))
normalized_dest_dir = os.path.normpath(destdir).strip()
if normalized_dest_dir == "":
raise DXFileError("Invalid destination directory name: '{}'".format(destdir))
# Creating target directory tree
remote_folders = list(list_subfolders(project, normalized_folder, recurse=True))
if len(remote_folders) <= 0:
raise DXFileError("Remote folder '{}' not found".format(normalized_folder))
remote_folders.sort()
for remote_subfolder in remote_folders:
ensure_local_dir(compose_local_dir(normalized_dest_dir, normalized_folder, remote_subfolder))
# Downloading files
describe_input = dict(fields=dict(folder=True,
name=True,
id=True,
parts=True,
size=True,
drive=True,
md5=True))
# A generator that returns the files one by one. We don't want to materialize it, because
# there could be many files here.
files_gen = dxpy.search.find_data_objects(classname='file', state='closed', project=project,
folder=normalized_folder, recurse=True, describe=describe_input)
if files_gen is None:
# In python 3, the generator can be None, and iterating on it
# will cause an error.
return
# Now it is safe, in both python 2 and 3, to iterate on the generator
for remote_file in files_gen:
local_filename = os.path.join(compose_local_dir(normalized_dest_dir,
normalized_folder,
remote_file['describe']['folder']),
remote_file['describe']['name'])
if os.path.exists(local_filename) and not overwrite:
raise DXFileError(
"Destination file '{}' already exists but no overwrite option is provided".format(local_filename)
)
logger.debug("Downloading '%s/%s' remote file to '%s' location",
("" if remote_file['describe']['folder'] == "/" else remote_file['describe']['folder']),
remote_file['describe']['name'],
local_filename)
download_dxfile(remote_file['describe']['id'],
local_filename,
chunksize=chunksize,
project=project,
show_progress=show_progress,
describe_output=remote_file['describe'],
**kwargs) | :param project: Project ID to use as context for this download.
:type project: string
:param destdir: Local destination location
:type destdir: string
:param folder: Path to the remote folder to download
:type folder: string
:param overwrite: Overwrite existing files
:type overwrite: boolean
Downloads the contents of the remote *folder* of the *project* into the local directory specified by *destdir*.
Example::
download_folder("project-xxxx", "/home/jsmith/input", folder="/input") | Below is the the instruction that describes the task:
### Input:
:param project: Project ID to use as context for this download.
:type project: string
:param destdir: Local destination location
:type destdir: string
:param folder: Path to the remote folder to download
:type folder: string
:param overwrite: Overwrite existing files
:type overwrite: boolean
Downloads the contents of the remote *folder* of the *project* into the local directory specified by *destdir*.
Example::
download_folder("project-xxxx", "/home/jsmith/input", folder="/input")
### Response:
def download_folder(project, destdir, folder="/", overwrite=False, chunksize=dxfile.DEFAULT_BUFFER_SIZE,
show_progress=False, **kwargs):
'''
:param project: Project ID to use as context for this download.
:type project: string
:param destdir: Local destination location
:type destdir: string
:param folder: Path to the remote folder to download
:type folder: string
:param overwrite: Overwrite existing files
:type overwrite: boolean
Downloads the contents of the remote *folder* of the *project* into the local directory specified by *destdir*.
Example::
download_folder("project-xxxx", "/home/jsmith/input", folder="/input")
'''
def ensure_local_dir(d):
if not os.path.isdir(d):
if os.path.exists(d):
raise DXFileError("Destination location '{}' already exists and is not a directory".format(d))
logger.debug("Creating destination directory: '%s'", d)
os.makedirs(d)
def compose_local_dir(d, remote_folder, remote_subfolder):
suffix = remote_subfolder[1:] if remote_folder == "/" else remote_subfolder[len(remote_folder) + 1:]
if os.sep != '/':
suffix = suffix.replace('/', os.sep)
return os.path.join(d, suffix) if suffix != "" else d
normalized_folder = folder.strip()
if normalized_folder != "/" and normalized_folder.endswith("/"):
normalized_folder = normalized_folder[:-1]
if normalized_folder == "":
raise DXFileError("Invalid remote folder name: '{}'".format(folder))
normalized_dest_dir = os.path.normpath(destdir).strip()
if normalized_dest_dir == "":
raise DXFileError("Invalid destination directory name: '{}'".format(destdir))
# Creating target directory tree
remote_folders = list(list_subfolders(project, normalized_folder, recurse=True))
if len(remote_folders) <= 0:
raise DXFileError("Remote folder '{}' not found".format(normalized_folder))
remote_folders.sort()
for remote_subfolder in remote_folders:
ensure_local_dir(compose_local_dir(normalized_dest_dir, normalized_folder, remote_subfolder))
# Downloading files
describe_input = dict(fields=dict(folder=True,
name=True,
id=True,
parts=True,
size=True,
drive=True,
md5=True))
# A generator that returns the files one by one. We don't want to materialize it, because
# there could be many files here.
files_gen = dxpy.search.find_data_objects(classname='file', state='closed', project=project,
folder=normalized_folder, recurse=True, describe=describe_input)
if files_gen is None:
# In python 3, the generator can be None, and iterating on it
# will cause an error.
return
# Now it is safe, in both python 2 and 3, to iterate on the generator
for remote_file in files_gen:
local_filename = os.path.join(compose_local_dir(normalized_dest_dir,
normalized_folder,
remote_file['describe']['folder']),
remote_file['describe']['name'])
if os.path.exists(local_filename) and not overwrite:
raise DXFileError(
"Destination file '{}' already exists but no overwrite option is provided".format(local_filename)
)
logger.debug("Downloading '%s/%s' remote file to '%s' location",
("" if remote_file['describe']['folder'] == "/" else remote_file['describe']['folder']),
remote_file['describe']['name'],
local_filename)
download_dxfile(remote_file['describe']['id'],
local_filename,
chunksize=chunksize,
project=project,
show_progress=show_progress,
describe_output=remote_file['describe'],
**kwargs) |
def md5(self):
"""Return md5 from meta, or compute it if absent."""
md5 = self.meta.get("md5")
if md5 is None:
md5 = str(hashlib.md5(self.value).hexdigest())
return md5 | Return md5 from meta, or compute it if absent. | Below is the the instruction that describes the task:
### Input:
Return md5 from meta, or compute it if absent.
### Response:
def md5(self):
"""Return md5 from meta, or compute it if absent."""
md5 = self.meta.get("md5")
if md5 is None:
md5 = str(hashlib.md5(self.value).hexdigest())
return md5 |
def read_header(filename, ext=0, extver=None, case_sensitive=False, **keys):
"""
Convenience function to read the header from the specified FITS HDU
The FITSHDR allows access to the values and comments by name and
number.
parameters
----------
filename: string
A filename.
ext: number or string, optional
The extension. Either the numerical extension from zero
or a string extension name. Default read primary header.
extver: integer, optional
FITS allows multiple extensions to have the same name (extname). These
extensions can optionally specify an EXTVER version number in the
header. Send extver= to select a particular version. If extver is not
sent, the first one will be selected. If ext is an integer, the extver
is ignored.
case_sensitive: bool, optional
Match extension names with case-sensitivity. Default is False.
"""
dont_create = 0
try:
hdunum = ext+1
except TypeError:
hdunum = None
_fits = _fitsio_wrap.FITS(filename, READONLY, dont_create)
if hdunum is None:
extname = mks(ext)
if extver is None:
extver_num = 0
else:
extver_num = extver
if not case_sensitive:
# the builtin movnam_hdu is not case sensitive
hdunum = _fits.movnam_hdu(ANY_HDU, extname, extver_num)
else:
# for case sensitivity we'll need to run through
# all the hdus
found = False
current_ext = 0
while True:
hdunum = current_ext+1
try:
hdu_type = _fits.movabs_hdu(hdunum) # noqa - not used
name, vers = _fits.get_hdu_name_version(hdunum)
if name == extname:
if extver is None:
# take the first match
found = True
break
else:
if extver_num == vers:
found = True
break
except OSError:
break
current_ext += 1
if not found:
raise IOError(
'hdu not found: %s (extver %s)' % (extname, extver))
return FITSHDR(_fits.read_header(hdunum)) | Convenience function to read the header from the specified FITS HDU
The FITSHDR allows access to the values and comments by name and
number.
parameters
----------
filename: string
A filename.
ext: number or string, optional
The extension. Either the numerical extension from zero
or a string extension name. Default read primary header.
extver: integer, optional
FITS allows multiple extensions to have the same name (extname). These
extensions can optionally specify an EXTVER version number in the
header. Send extver= to select a particular version. If extver is not
sent, the first one will be selected. If ext is an integer, the extver
is ignored.
case_sensitive: bool, optional
Match extension names with case-sensitivity. Default is False. | Below is the the instruction that describes the task:
### Input:
Convenience function to read the header from the specified FITS HDU
The FITSHDR allows access to the values and comments by name and
number.
parameters
----------
filename: string
A filename.
ext: number or string, optional
The extension. Either the numerical extension from zero
or a string extension name. Default read primary header.
extver: integer, optional
FITS allows multiple extensions to have the same name (extname). These
extensions can optionally specify an EXTVER version number in the
header. Send extver= to select a particular version. If extver is not
sent, the first one will be selected. If ext is an integer, the extver
is ignored.
case_sensitive: bool, optional
Match extension names with case-sensitivity. Default is False.
### Response:
def read_header(filename, ext=0, extver=None, case_sensitive=False, **keys):
"""
Convenience function to read the header from the specified FITS HDU
The FITSHDR allows access to the values and comments by name and
number.
parameters
----------
filename: string
A filename.
ext: number or string, optional
The extension. Either the numerical extension from zero
or a string extension name. Default read primary header.
extver: integer, optional
FITS allows multiple extensions to have the same name (extname). These
extensions can optionally specify an EXTVER version number in the
header. Send extver= to select a particular version. If extver is not
sent, the first one will be selected. If ext is an integer, the extver
is ignored.
case_sensitive: bool, optional
Match extension names with case-sensitivity. Default is False.
"""
dont_create = 0
try:
hdunum = ext+1
except TypeError:
hdunum = None
_fits = _fitsio_wrap.FITS(filename, READONLY, dont_create)
if hdunum is None:
extname = mks(ext)
if extver is None:
extver_num = 0
else:
extver_num = extver
if not case_sensitive:
# the builtin movnam_hdu is not case sensitive
hdunum = _fits.movnam_hdu(ANY_HDU, extname, extver_num)
else:
# for case sensitivity we'll need to run through
# all the hdus
found = False
current_ext = 0
while True:
hdunum = current_ext+1
try:
hdu_type = _fits.movabs_hdu(hdunum) # noqa - not used
name, vers = _fits.get_hdu_name_version(hdunum)
if name == extname:
if extver is None:
# take the first match
found = True
break
else:
if extver_num == vers:
found = True
break
except OSError:
break
current_ext += 1
if not found:
raise IOError(
'hdu not found: %s (extver %s)' % (extname, extver))
return FITSHDR(_fits.read_header(hdunum)) |
def partition_dict(items, key):
"""
Given an ordered dictionary of items and a key in that dict,
return an ordered dict of items before, the keyed item, and
an ordered dict of items after.
>>> od = collections.OrderedDict(zip(range(5), 'abcde'))
>>> before, item, after = partition_dict(od, 3)
>>> before
OrderedDict([(0, 'a'), (1, 'b'), (2, 'c')])
>>> item
'd'
>>> after
OrderedDict([(4, 'e')])
Like string.partition, if the key is not found in the items,
the before will contain all items, item will be None, and
after will be an empty iterable.
>>> before, item, after = partition_dict(od, -1)
>>> before
OrderedDict([(0, 'a'), ..., (4, 'e')])
>>> item
>>> list(after)
[]
"""
def unmatched(pair):
test_key, item, = pair
return test_key != key
items_iter = iter(items.items())
item = items.get(key)
left = collections.OrderedDict(itertools.takewhile(unmatched, items_iter))
right = collections.OrderedDict(items_iter)
return left, item, right | Given an ordered dictionary of items and a key in that dict,
return an ordered dict of items before, the keyed item, and
an ordered dict of items after.
>>> od = collections.OrderedDict(zip(range(5), 'abcde'))
>>> before, item, after = partition_dict(od, 3)
>>> before
OrderedDict([(0, 'a'), (1, 'b'), (2, 'c')])
>>> item
'd'
>>> after
OrderedDict([(4, 'e')])
Like string.partition, if the key is not found in the items,
the before will contain all items, item will be None, and
after will be an empty iterable.
>>> before, item, after = partition_dict(od, -1)
>>> before
OrderedDict([(0, 'a'), ..., (4, 'e')])
>>> item
>>> list(after)
[] | Below is the the instruction that describes the task:
### Input:
Given an ordered dictionary of items and a key in that dict,
return an ordered dict of items before, the keyed item, and
an ordered dict of items after.
>>> od = collections.OrderedDict(zip(range(5), 'abcde'))
>>> before, item, after = partition_dict(od, 3)
>>> before
OrderedDict([(0, 'a'), (1, 'b'), (2, 'c')])
>>> item
'd'
>>> after
OrderedDict([(4, 'e')])
Like string.partition, if the key is not found in the items,
the before will contain all items, item will be None, and
after will be an empty iterable.
>>> before, item, after = partition_dict(od, -1)
>>> before
OrderedDict([(0, 'a'), ..., (4, 'e')])
>>> item
>>> list(after)
[]
### Response:
def partition_dict(items, key):
"""
Given an ordered dictionary of items and a key in that dict,
return an ordered dict of items before, the keyed item, and
an ordered dict of items after.
>>> od = collections.OrderedDict(zip(range(5), 'abcde'))
>>> before, item, after = partition_dict(od, 3)
>>> before
OrderedDict([(0, 'a'), (1, 'b'), (2, 'c')])
>>> item
'd'
>>> after
OrderedDict([(4, 'e')])
Like string.partition, if the key is not found in the items,
the before will contain all items, item will be None, and
after will be an empty iterable.
>>> before, item, after = partition_dict(od, -1)
>>> before
OrderedDict([(0, 'a'), ..., (4, 'e')])
>>> item
>>> list(after)
[]
"""
def unmatched(pair):
test_key, item, = pair
return test_key != key
items_iter = iter(items.items())
item = items.get(key)
left = collections.OrderedDict(itertools.takewhile(unmatched, items_iter))
right = collections.OrderedDict(items_iter)
return left, item, right |
def extend_hierarchy(levels, strength, aggregate, smooth, improve_candidates,
diagonal_dominance=False, keep=True):
"""Extend the multigrid hierarchy.
Service routine to implement the strength of connection, aggregation,
tentative prolongation construction, and prolongation smoothing. Called by
smoothed_aggregation_solver.
"""
def unpack_arg(v):
if isinstance(v, tuple):
return v[0], v[1]
else:
return v, {}
A = levels[-1].A
B = levels[-1].B
if A.symmetry == "nonsymmetric":
AH = A.H.asformat(A.format)
BH = levels[-1].BH
# Compute the strength-of-connection matrix C, where larger
# C[i, j] denote stronger couplings between i and j.
fn, kwargs = unpack_arg(strength[len(levels)-1])
if fn == 'symmetric':
C = symmetric_strength_of_connection(A, **kwargs)
elif fn == 'classical':
C = classical_strength_of_connection(A, **kwargs)
elif fn == 'distance':
C = distance_strength_of_connection(A, **kwargs)
elif (fn == 'ode') or (fn == 'evolution'):
if 'B' in kwargs:
C = evolution_strength_of_connection(A, **kwargs)
else:
C = evolution_strength_of_connection(A, B, **kwargs)
elif fn == 'energy_based':
C = energy_based_strength_of_connection(A, **kwargs)
elif fn == 'predefined':
C = kwargs['C'].tocsr()
elif fn == 'algebraic_distance':
C = algebraic_distance(A, **kwargs)
elif fn == 'affinity':
C = affinity_distance(A, **kwargs)
elif fn is None:
C = A.tocsr()
else:
raise ValueError('unrecognized strength of connection method: %s' %
str(fn))
# Avoid coarsening diagonally dominant rows
flag, kwargs = unpack_arg(diagonal_dominance)
if flag:
C = eliminate_diag_dom_nodes(A, C, **kwargs)
# Compute the aggregation matrix AggOp (i.e., the nodal coarsening of A).
# AggOp is a boolean matrix, where the sparsity pattern for the k-th column
# denotes the fine-grid nodes agglomerated into k-th coarse-grid node.
fn, kwargs = unpack_arg(aggregate[len(levels)-1])
if fn == 'standard':
AggOp, Cnodes = standard_aggregation(C, **kwargs)
elif fn == 'naive':
AggOp, Cnodes = naive_aggregation(C, **kwargs)
elif fn == 'lloyd':
AggOp, Cnodes = lloyd_aggregation(C, **kwargs)
elif fn == 'predefined':
AggOp = kwargs['AggOp'].tocsr()
Cnodes = kwargs['Cnodes']
else:
raise ValueError('unrecognized aggregation method %s' % str(fn))
# Improve near nullspace candidates by relaxing on A B = 0
fn, kwargs = unpack_arg(improve_candidates[len(levels)-1])
if fn is not None:
b = np.zeros((A.shape[0], 1), dtype=A.dtype)
B = relaxation_as_linear_operator((fn, kwargs), A, b) * B
levels[-1].B = B
if A.symmetry == "nonsymmetric":
BH = relaxation_as_linear_operator((fn, kwargs), AH, b) * BH
levels[-1].BH = BH
# Compute the tentative prolongator, T, which is a tentative interpolation
# matrix from the coarse-grid to the fine-grid. T exactly interpolates
# B_fine[:, 0:blocksize(A)] = T B_coarse[:, 0:blocksize(A)].
T, dummy = fit_candidates(AggOp, B[:, 0:blocksize(A)])
del dummy
if A.symmetry == "nonsymmetric":
TH, dummyH = fit_candidates(AggOp, BH[:, 0:blocksize(A)])
del dummyH
# Create necessary root node matrices
Cpt_params = (True, get_Cpt_params(A, Cnodes, AggOp, T))
T = scale_T(T, Cpt_params[1]['P_I'], Cpt_params[1]['I_F'])
if A.symmetry == "nonsymmetric":
TH = scale_T(TH, Cpt_params[1]['P_I'], Cpt_params[1]['I_F'])
# Set coarse grid near nullspace modes as injected fine grid near
# null-space modes
B = Cpt_params[1]['P_I'].T*levels[-1].B
if A.symmetry == "nonsymmetric":
BH = Cpt_params[1]['P_I'].T*levels[-1].BH
# Smooth the tentative prolongator, so that it's accuracy is greatly
# improved for algebraically smooth error.
fn, kwargs = unpack_arg(smooth[len(levels)-1])
if fn == 'energy':
P = energy_prolongation_smoother(A, T, C, B, levels[-1].B,
Cpt_params=Cpt_params, **kwargs)
elif fn is None:
P = T
else:
raise ValueError('unrecognized prolongation smoother \
method %s' % str(fn))
# Compute the restriction matrix R, which interpolates from the fine-grid
# to the coarse-grid. If A is nonsymmetric, then R must be constructed
# based on A.H. Otherwise R = P.H or P.T.
symmetry = A.symmetry
if symmetry == 'hermitian':
R = P.H
elif symmetry == 'symmetric':
R = P.T
elif symmetry == 'nonsymmetric':
fn, kwargs = unpack_arg(smooth[len(levels)-1])
if fn == 'energy':
R = energy_prolongation_smoother(AH, TH, C, BH, levels[-1].BH,
Cpt_params=Cpt_params, **kwargs)
R = R.H
elif fn is None:
R = T.H
else:
raise ValueError('unrecognized prolongation smoother \
method %s' % str(fn))
if keep:
levels[-1].C = C # strength of connection matrix
levels[-1].AggOp = AggOp # aggregation operator
levels[-1].T = T # tentative prolongator
levels[-1].Fpts = Cpt_params[1]['Fpts'] # Fpts
levels[-1].P_I = Cpt_params[1]['P_I'] # Injection operator
levels[-1].I_F = Cpt_params[1]['I_F'] # Identity on F-pts
levels[-1].I_C = Cpt_params[1]['I_C'] # Identity on C-pts
levels[-1].P = P # smoothed prolongator
levels[-1].R = R # restriction operator
levels[-1].Cpts = Cpt_params[1]['Cpts'] # Cpts (i.e., rootnodes)
levels.append(multilevel_solver.level())
A = R * A * P # Galerkin operator
A.symmetry = symmetry
levels[-1].A = A
levels[-1].B = B # right near nullspace candidates
if A.symmetry == "nonsymmetric":
levels[-1].BH = BH | Extend the multigrid hierarchy.
Service routine to implement the strength of connection, aggregation,
tentative prolongation construction, and prolongation smoothing. Called by
smoothed_aggregation_solver. | Below is the the instruction that describes the task:
### Input:
Extend the multigrid hierarchy.
Service routine to implement the strength of connection, aggregation,
tentative prolongation construction, and prolongation smoothing. Called by
smoothed_aggregation_solver.
### Response:
def extend_hierarchy(levels, strength, aggregate, smooth, improve_candidates,
diagonal_dominance=False, keep=True):
"""Extend the multigrid hierarchy.
Service routine to implement the strength of connection, aggregation,
tentative prolongation construction, and prolongation smoothing. Called by
smoothed_aggregation_solver.
"""
def unpack_arg(v):
if isinstance(v, tuple):
return v[0], v[1]
else:
return v, {}
A = levels[-1].A
B = levels[-1].B
if A.symmetry == "nonsymmetric":
AH = A.H.asformat(A.format)
BH = levels[-1].BH
# Compute the strength-of-connection matrix C, where larger
# C[i, j] denote stronger couplings between i and j.
fn, kwargs = unpack_arg(strength[len(levels)-1])
if fn == 'symmetric':
C = symmetric_strength_of_connection(A, **kwargs)
elif fn == 'classical':
C = classical_strength_of_connection(A, **kwargs)
elif fn == 'distance':
C = distance_strength_of_connection(A, **kwargs)
elif (fn == 'ode') or (fn == 'evolution'):
if 'B' in kwargs:
C = evolution_strength_of_connection(A, **kwargs)
else:
C = evolution_strength_of_connection(A, B, **kwargs)
elif fn == 'energy_based':
C = energy_based_strength_of_connection(A, **kwargs)
elif fn == 'predefined':
C = kwargs['C'].tocsr()
elif fn == 'algebraic_distance':
C = algebraic_distance(A, **kwargs)
elif fn == 'affinity':
C = affinity_distance(A, **kwargs)
elif fn is None:
C = A.tocsr()
else:
raise ValueError('unrecognized strength of connection method: %s' %
str(fn))
# Avoid coarsening diagonally dominant rows
flag, kwargs = unpack_arg(diagonal_dominance)
if flag:
C = eliminate_diag_dom_nodes(A, C, **kwargs)
# Compute the aggregation matrix AggOp (i.e., the nodal coarsening of A).
# AggOp is a boolean matrix, where the sparsity pattern for the k-th column
# denotes the fine-grid nodes agglomerated into k-th coarse-grid node.
fn, kwargs = unpack_arg(aggregate[len(levels)-1])
if fn == 'standard':
AggOp, Cnodes = standard_aggregation(C, **kwargs)
elif fn == 'naive':
AggOp, Cnodes = naive_aggregation(C, **kwargs)
elif fn == 'lloyd':
AggOp, Cnodes = lloyd_aggregation(C, **kwargs)
elif fn == 'predefined':
AggOp = kwargs['AggOp'].tocsr()
Cnodes = kwargs['Cnodes']
else:
raise ValueError('unrecognized aggregation method %s' % str(fn))
# Improve near nullspace candidates by relaxing on A B = 0
fn, kwargs = unpack_arg(improve_candidates[len(levels)-1])
if fn is not None:
b = np.zeros((A.shape[0], 1), dtype=A.dtype)
B = relaxation_as_linear_operator((fn, kwargs), A, b) * B
levels[-1].B = B
if A.symmetry == "nonsymmetric":
BH = relaxation_as_linear_operator((fn, kwargs), AH, b) * BH
levels[-1].BH = BH
# Compute the tentative prolongator, T, which is a tentative interpolation
# matrix from the coarse-grid to the fine-grid. T exactly interpolates
# B_fine[:, 0:blocksize(A)] = T B_coarse[:, 0:blocksize(A)].
T, dummy = fit_candidates(AggOp, B[:, 0:blocksize(A)])
del dummy
if A.symmetry == "nonsymmetric":
TH, dummyH = fit_candidates(AggOp, BH[:, 0:blocksize(A)])
del dummyH
# Create necessary root node matrices
Cpt_params = (True, get_Cpt_params(A, Cnodes, AggOp, T))
T = scale_T(T, Cpt_params[1]['P_I'], Cpt_params[1]['I_F'])
if A.symmetry == "nonsymmetric":
TH = scale_T(TH, Cpt_params[1]['P_I'], Cpt_params[1]['I_F'])
# Set coarse grid near nullspace modes as injected fine grid near
# null-space modes
B = Cpt_params[1]['P_I'].T*levels[-1].B
if A.symmetry == "nonsymmetric":
BH = Cpt_params[1]['P_I'].T*levels[-1].BH
# Smooth the tentative prolongator, so that it's accuracy is greatly
# improved for algebraically smooth error.
fn, kwargs = unpack_arg(smooth[len(levels)-1])
if fn == 'energy':
P = energy_prolongation_smoother(A, T, C, B, levels[-1].B,
Cpt_params=Cpt_params, **kwargs)
elif fn is None:
P = T
else:
raise ValueError('unrecognized prolongation smoother \
method %s' % str(fn))
# Compute the restriction matrix R, which interpolates from the fine-grid
# to the coarse-grid. If A is nonsymmetric, then R must be constructed
# based on A.H. Otherwise R = P.H or P.T.
symmetry = A.symmetry
if symmetry == 'hermitian':
R = P.H
elif symmetry == 'symmetric':
R = P.T
elif symmetry == 'nonsymmetric':
fn, kwargs = unpack_arg(smooth[len(levels)-1])
if fn == 'energy':
R = energy_prolongation_smoother(AH, TH, C, BH, levels[-1].BH,
Cpt_params=Cpt_params, **kwargs)
R = R.H
elif fn is None:
R = T.H
else:
raise ValueError('unrecognized prolongation smoother \
method %s' % str(fn))
if keep:
levels[-1].C = C # strength of connection matrix
levels[-1].AggOp = AggOp # aggregation operator
levels[-1].T = T # tentative prolongator
levels[-1].Fpts = Cpt_params[1]['Fpts'] # Fpts
levels[-1].P_I = Cpt_params[1]['P_I'] # Injection operator
levels[-1].I_F = Cpt_params[1]['I_F'] # Identity on F-pts
levels[-1].I_C = Cpt_params[1]['I_C'] # Identity on C-pts
levels[-1].P = P # smoothed prolongator
levels[-1].R = R # restriction operator
levels[-1].Cpts = Cpt_params[1]['Cpts'] # Cpts (i.e., rootnodes)
levels.append(multilevel_solver.level())
A = R * A * P # Galerkin operator
A.symmetry = symmetry
levels[-1].A = A
levels[-1].B = B # right near nullspace candidates
if A.symmetry == "nonsymmetric":
levels[-1].BH = BH |
def gof_plot(
simdata, trueval, name=None, bins=None, format='png', suffix='-gof', path='./',
fontmap=None, verbose=0):
"""
Plots histogram of replicated data, indicating the location of the observed data
:Arguments:
simdata: array or PyMC object
Trace of simulated data or the PyMC stochastic object containing trace.
trueval: numeric
True (observed) value of the data
bins: int or string
The number of bins, or a preferred binning method. Available methods include
'doanes', 'sturges' and 'sqrt' (defaults to 'doanes').
format (optional): string
Graphic output format (defaults to png).
suffix (optional): string
Filename suffix.
path (optional): string
Specifies location for saving plots (defaults to local directory).
fontmap (optional): dict
Font map for plot.
"""
if fontmap is None:
fontmap = {1: 10, 2: 8, 3: 6, 4: 5, 5: 4}
if not isinstance(simdata, ndarray):
## Can't just try and catch because ndarray objects also have
## `trace` method.
simdata = simdata.trace()
if ndim(trueval) == 1 and ndim(simdata == 2):
# Iterate over more than one set of data
for i in range(len(trueval)):
n = name or 'MCMC'
gof_plot(
simdata[
:,
i],
trueval[
i],
'%s[%i]' % (
n,
i),
bins=bins,
format=format,
suffix=suffix,
path=path,
fontmap=fontmap,
verbose=verbose)
return
if verbose > 0:
print_('Plotting', (name or 'MCMC') + suffix)
figure()
# Specify number of bins
if bins is None:
bins = 'sqrt'
uniquevals = len(unique(simdata))
if bins == 'sturges':
bins = uniquevals * (uniquevals <= 25) or _sturges(len(simdata))
elif bins == 'doanes':
bins = uniquevals * (
uniquevals <= 25) or _doanes(simdata,
len(simdata))
elif bins == 'sqrt':
bins = uniquevals * (uniquevals <= 25) or _sqrt_choice(len(simdata))
elif isinstance(bins, int):
bins = bins
else:
raise ValueError('Invalid bins argument in gof_plot')
# Generate histogram
hist(simdata, bins)
# Plot options
xlabel(name or 'Value', fontsize='x-small')
ylabel("Frequency", fontsize='x-small')
# Smaller tick labels
tlabels = gca().get_xticklabels()
setp(tlabels, 'fontsize', fontmap[1])
tlabels = gca().get_yticklabels()
setp(tlabels, 'fontsize', fontmap[1])
# Plot vertical line at location of true data value
axvline(x=trueval, linewidth=2, color='r', linestyle='dotted')
if not os.path.exists(path):
os.mkdir(path)
if not path.endswith('/'):
path += '/'
# Save to file
savefig("%s%s%s.%s" % (path, name or 'MCMC', suffix, format)) | Plots histogram of replicated data, indicating the location of the observed data
:Arguments:
simdata: array or PyMC object
Trace of simulated data or the PyMC stochastic object containing trace.
trueval: numeric
True (observed) value of the data
bins: int or string
The number of bins, or a preferred binning method. Available methods include
'doanes', 'sturges' and 'sqrt' (defaults to 'doanes').
format (optional): string
Graphic output format (defaults to png).
suffix (optional): string
Filename suffix.
path (optional): string
Specifies location for saving plots (defaults to local directory).
fontmap (optional): dict
Font map for plot. | Below is the the instruction that describes the task:
### Input:
Plots histogram of replicated data, indicating the location of the observed data
:Arguments:
simdata: array or PyMC object
Trace of simulated data or the PyMC stochastic object containing trace.
trueval: numeric
True (observed) value of the data
bins: int or string
The number of bins, or a preferred binning method. Available methods include
'doanes', 'sturges' and 'sqrt' (defaults to 'doanes').
format (optional): string
Graphic output format (defaults to png).
suffix (optional): string
Filename suffix.
path (optional): string
Specifies location for saving plots (defaults to local directory).
fontmap (optional): dict
Font map for plot.
### Response:
def gof_plot(
simdata, trueval, name=None, bins=None, format='png', suffix='-gof', path='./',
fontmap=None, verbose=0):
"""
Plots histogram of replicated data, indicating the location of the observed data
:Arguments:
simdata: array or PyMC object
Trace of simulated data or the PyMC stochastic object containing trace.
trueval: numeric
True (observed) value of the data
bins: int or string
The number of bins, or a preferred binning method. Available methods include
'doanes', 'sturges' and 'sqrt' (defaults to 'doanes').
format (optional): string
Graphic output format (defaults to png).
suffix (optional): string
Filename suffix.
path (optional): string
Specifies location for saving plots (defaults to local directory).
fontmap (optional): dict
Font map for plot.
"""
if fontmap is None:
fontmap = {1: 10, 2: 8, 3: 6, 4: 5, 5: 4}
if not isinstance(simdata, ndarray):
## Can't just try and catch because ndarray objects also have
## `trace` method.
simdata = simdata.trace()
if ndim(trueval) == 1 and ndim(simdata == 2):
# Iterate over more than one set of data
for i in range(len(trueval)):
n = name or 'MCMC'
gof_plot(
simdata[
:,
i],
trueval[
i],
'%s[%i]' % (
n,
i),
bins=bins,
format=format,
suffix=suffix,
path=path,
fontmap=fontmap,
verbose=verbose)
return
if verbose > 0:
print_('Plotting', (name or 'MCMC') + suffix)
figure()
# Specify number of bins
if bins is None:
bins = 'sqrt'
uniquevals = len(unique(simdata))
if bins == 'sturges':
bins = uniquevals * (uniquevals <= 25) or _sturges(len(simdata))
elif bins == 'doanes':
bins = uniquevals * (
uniquevals <= 25) or _doanes(simdata,
len(simdata))
elif bins == 'sqrt':
bins = uniquevals * (uniquevals <= 25) or _sqrt_choice(len(simdata))
elif isinstance(bins, int):
bins = bins
else:
raise ValueError('Invalid bins argument in gof_plot')
# Generate histogram
hist(simdata, bins)
# Plot options
xlabel(name or 'Value', fontsize='x-small')
ylabel("Frequency", fontsize='x-small')
# Smaller tick labels
tlabels = gca().get_xticklabels()
setp(tlabels, 'fontsize', fontmap[1])
tlabels = gca().get_yticklabels()
setp(tlabels, 'fontsize', fontmap[1])
# Plot vertical line at location of true data value
axvline(x=trueval, linewidth=2, color='r', linestyle='dotted')
if not os.path.exists(path):
os.mkdir(path)
if not path.endswith('/'):
path += '/'
# Save to file
savefig("%s%s%s.%s" % (path, name or 'MCMC', suffix, format)) |
async def timeRangeAsync(
start: datetime.time, end: datetime.time,
step: float) -> AsyncIterator[datetime.datetime]:
"""
Async version of :meth:`timeRange`.
"""
assert step > 0
start = _fillDate(start)
end = _fillDate(end)
delta = datetime.timedelta(seconds=step)
t = start
while t < datetime.datetime.now():
t += delta
while t <= end:
await waitUntilAsync(t)
yield t
t += delta | Async version of :meth:`timeRange`. | Below is the the instruction that describes the task:
### Input:
Async version of :meth:`timeRange`.
### Response:
async def timeRangeAsync(
start: datetime.time, end: datetime.time,
step: float) -> AsyncIterator[datetime.datetime]:
"""
Async version of :meth:`timeRange`.
"""
assert step > 0
start = _fillDate(start)
end = _fillDate(end)
delta = datetime.timedelta(seconds=step)
t = start
while t < datetime.datetime.now():
t += delta
while t <= end:
await waitUntilAsync(t)
yield t
t += delta |
def nonuniq(iterable):
"""
Yield the non-unique items of an iterable, preserving order. If an
item occurs N > 0 times in the input sequence, it will occur N-1
times in the output sequence.
Example:
>>> x = nonuniq([0, 0, 2, 6, 2, 0, 5])
>>> list(x)
[0, 2, 0]
"""
temp_dict = {}
for e in iterable:
if e in temp_dict:
yield e
temp_dict.setdefault(e, e) | Yield the non-unique items of an iterable, preserving order. If an
item occurs N > 0 times in the input sequence, it will occur N-1
times in the output sequence.
Example:
>>> x = nonuniq([0, 0, 2, 6, 2, 0, 5])
>>> list(x)
[0, 2, 0] | Below is the the instruction that describes the task:
### Input:
Yield the non-unique items of an iterable, preserving order. If an
item occurs N > 0 times in the input sequence, it will occur N-1
times in the output sequence.
Example:
>>> x = nonuniq([0, 0, 2, 6, 2, 0, 5])
>>> list(x)
[0, 2, 0]
### Response:
def nonuniq(iterable):
"""
Yield the non-unique items of an iterable, preserving order. If an
item occurs N > 0 times in the input sequence, it will occur N-1
times in the output sequence.
Example:
>>> x = nonuniq([0, 0, 2, 6, 2, 0, 5])
>>> list(x)
[0, 2, 0]
"""
temp_dict = {}
for e in iterable:
if e in temp_dict:
yield e
temp_dict.setdefault(e, e) |
def stop_host(self, config_file):
"""
Stops a managed host specified by `config_file`.
"""
res = self.send_json_request('host/stop', data={'config': config_file})
if res.status_code != 200:
raise UnexpectedResponse(
'Attempted to stop a JSHost. Response: {res_code}: {res_text}'.format(
res_code=res.status_code,
res_text=res.text,
)
)
return res.json() | Stops a managed host specified by `config_file`. | Below is the the instruction that describes the task:
### Input:
Stops a managed host specified by `config_file`.
### Response:
def stop_host(self, config_file):
"""
Stops a managed host specified by `config_file`.
"""
res = self.send_json_request('host/stop', data={'config': config_file})
if res.status_code != 200:
raise UnexpectedResponse(
'Attempted to stop a JSHost. Response: {res_code}: {res_text}'.format(
res_code=res.status_code,
res_text=res.text,
)
)
return res.json() |
def create_station(name, latlonalt, parent_frame=WGS84, orientation='N', mask=None):
"""Create a ground station instance
Args:
name (str): Name of the station
latlonalt (tuple of float): coordinates of the station, as follow:
* Latitude in degrees
* Longitude in degrees
* Altitude to sea level in meters
parent_frame (Frame): Planetocentric rotating frame of reference of
coordinates.
orientation (str or float): Heading of the station
Acceptable values are 'N', 'S', 'E', 'W' or any angle in radians
mask: (2D array of float): First dimension is azimuth counterclockwise strictly increasing.
Second dimension is elevation. Both in radians
Return:
TopocentricFrame
"""
if isinstance(orientation, str):
orient = {'N': np.pi, 'S': 0., 'E': np.pi / 2., 'W': 3 * np.pi / 2.}
heading = orient[orientation]
else:
heading = orientation
latlonalt = list(latlonalt)
latlonalt[:2] = np.radians(latlonalt[:2])
coordinates = TopocentricFrame._geodetic_to_cartesian(*latlonalt)
mtd = '_to_%s' % parent_frame.__name__
dct = {
mtd: TopocentricFrame._to_parent_frame,
'latlonalt': latlonalt,
'coordinates': coordinates,
'parent_frame': parent_frame,
'heading': heading,
'orientation': orientation,
'mask': np.array(mask) if mask else None,
}
cls = _MetaFrame(name, (TopocentricFrame,), dct)
cls + parent_frame
return cls | Create a ground station instance
Args:
name (str): Name of the station
latlonalt (tuple of float): coordinates of the station, as follow:
* Latitude in degrees
* Longitude in degrees
* Altitude to sea level in meters
parent_frame (Frame): Planetocentric rotating frame of reference of
coordinates.
orientation (str or float): Heading of the station
Acceptable values are 'N', 'S', 'E', 'W' or any angle in radians
mask: (2D array of float): First dimension is azimuth counterclockwise strictly increasing.
Second dimension is elevation. Both in radians
Return:
TopocentricFrame | Below is the the instruction that describes the task:
### Input:
Create a ground station instance
Args:
name (str): Name of the station
latlonalt (tuple of float): coordinates of the station, as follow:
* Latitude in degrees
* Longitude in degrees
* Altitude to sea level in meters
parent_frame (Frame): Planetocentric rotating frame of reference of
coordinates.
orientation (str or float): Heading of the station
Acceptable values are 'N', 'S', 'E', 'W' or any angle in radians
mask: (2D array of float): First dimension is azimuth counterclockwise strictly increasing.
Second dimension is elevation. Both in radians
Return:
TopocentricFrame
### Response:
def create_station(name, latlonalt, parent_frame=WGS84, orientation='N', mask=None):
"""Create a ground station instance
Args:
name (str): Name of the station
latlonalt (tuple of float): coordinates of the station, as follow:
* Latitude in degrees
* Longitude in degrees
* Altitude to sea level in meters
parent_frame (Frame): Planetocentric rotating frame of reference of
coordinates.
orientation (str or float): Heading of the station
Acceptable values are 'N', 'S', 'E', 'W' or any angle in radians
mask: (2D array of float): First dimension is azimuth counterclockwise strictly increasing.
Second dimension is elevation. Both in radians
Return:
TopocentricFrame
"""
if isinstance(orientation, str):
orient = {'N': np.pi, 'S': 0., 'E': np.pi / 2., 'W': 3 * np.pi / 2.}
heading = orient[orientation]
else:
heading = orientation
latlonalt = list(latlonalt)
latlonalt[:2] = np.radians(latlonalt[:2])
coordinates = TopocentricFrame._geodetic_to_cartesian(*latlonalt)
mtd = '_to_%s' % parent_frame.__name__
dct = {
mtd: TopocentricFrame._to_parent_frame,
'latlonalt': latlonalt,
'coordinates': coordinates,
'parent_frame': parent_frame,
'heading': heading,
'orientation': orientation,
'mask': np.array(mask) if mask else None,
}
cls = _MetaFrame(name, (TopocentricFrame,), dct)
cls + parent_frame
return cls |
def dispatch(self, receiver):
''' Dispatch handling of this event to a receiver.
This method will invoke ``receiver._session_callback_removed`` if
it exists.
'''
super(SessionCallbackRemoved, self).dispatch(receiver)
if hasattr(receiver, '_session_callback_removed'):
receiver._session_callback_removed(self) | Dispatch handling of this event to a receiver.
This method will invoke ``receiver._session_callback_removed`` if
it exists. | Below is the the instruction that describes the task:
### Input:
Dispatch handling of this event to a receiver.
This method will invoke ``receiver._session_callback_removed`` if
it exists.
### Response:
def dispatch(self, receiver):
''' Dispatch handling of this event to a receiver.
This method will invoke ``receiver._session_callback_removed`` if
it exists.
'''
super(SessionCallbackRemoved, self).dispatch(receiver)
if hasattr(receiver, '_session_callback_removed'):
receiver._session_callback_removed(self) |
def get_volumes(self):
"""Gets a list of all volumes in this disk, including volumes that are contained in other volumes."""
volumes = []
for v in self.volumes:
volumes.extend(v.get_volumes())
return volumes | Gets a list of all volumes in this disk, including volumes that are contained in other volumes. | Below is the the instruction that describes the task:
### Input:
Gets a list of all volumes in this disk, including volumes that are contained in other volumes.
### Response:
def get_volumes(self):
"""Gets a list of all volumes in this disk, including volumes that are contained in other volumes."""
volumes = []
for v in self.volumes:
volumes.extend(v.get_volumes())
return volumes |
def IsFile(self):
"""Determines if the file entry is a file.
Returns:
bool: True if the file entry is a file.
"""
if self._stat_object is None:
self._stat_object = self._GetStat()
if self._stat_object is not None:
self.entry_type = self._stat_object.type
return self.entry_type == definitions.FILE_ENTRY_TYPE_FILE | Determines if the file entry is a file.
Returns:
bool: True if the file entry is a file. | Below is the the instruction that describes the task:
### Input:
Determines if the file entry is a file.
Returns:
bool: True if the file entry is a file.
### Response:
def IsFile(self):
"""Determines if the file entry is a file.
Returns:
bool: True if the file entry is a file.
"""
if self._stat_object is None:
self._stat_object = self._GetStat()
if self._stat_object is not None:
self.entry_type = self._stat_object.type
return self.entry_type == definitions.FILE_ENTRY_TYPE_FILE |
def run(self):
"""Reads data from disk and generates CSV files."""
# Try to create the directory
if not os.path.exists(self.output):
try:
os.mkdir(self.output)
except:
print 'failed to create output directory %s' % self.output
# Be sure it is a directory
if not os.path.isdir(self.output):
print 'invalid output directory %s' % self.output
sys.exit(1)
# Create the CSV handlers
visitors = [
_CompaniesCSV(self.output),
_ActivitiesCSV(self.output),
_ActivitiesSeenCSV(self.output),
_QSACSV(self.output),
]
# Run by each company populating the CSV files
for path in glob.glob(os.path.join(self.input, '*.json')):
with open(path, 'r') as f:
try:
data = json.load(f, encoding='utf-8')
except ValueError:
continue
for visitor in visitors:
visitor.visit(data) | Reads data from disk and generates CSV files. | Below is the the instruction that describes the task:
### Input:
Reads data from disk and generates CSV files.
### Response:
def run(self):
"""Reads data from disk and generates CSV files."""
# Try to create the directory
if not os.path.exists(self.output):
try:
os.mkdir(self.output)
except:
print 'failed to create output directory %s' % self.output
# Be sure it is a directory
if not os.path.isdir(self.output):
print 'invalid output directory %s' % self.output
sys.exit(1)
# Create the CSV handlers
visitors = [
_CompaniesCSV(self.output),
_ActivitiesCSV(self.output),
_ActivitiesSeenCSV(self.output),
_QSACSV(self.output),
]
# Run by each company populating the CSV files
for path in glob.glob(os.path.join(self.input, '*.json')):
with open(path, 'r') as f:
try:
data = json.load(f, encoding='utf-8')
except ValueError:
continue
for visitor in visitors:
visitor.visit(data) |
def _StripCommonPathPrefix(paths):
"""Removes path common prefix from a list of path strings."""
# Find the longest common prefix in terms of characters.
common_prefix = os.path.commonprefix(paths)
# Truncate at last segment boundary. E.g. '/aa/bb1/x.py' and '/a/bb2/x.py'
# have '/aa/bb' as the common prefix, but we should strip '/aa/' instead.
# If there's no '/' found, returns -1+1=0.
common_prefix_len = common_prefix.rfind('/') + 1
return [path[common_prefix_len:] for path in paths] | Removes path common prefix from a list of path strings. | Below is the the instruction that describes the task:
### Input:
Removes path common prefix from a list of path strings.
### Response:
def _StripCommonPathPrefix(paths):
"""Removes path common prefix from a list of path strings."""
# Find the longest common prefix in terms of characters.
common_prefix = os.path.commonprefix(paths)
# Truncate at last segment boundary. E.g. '/aa/bb1/x.py' and '/a/bb2/x.py'
# have '/aa/bb' as the common prefix, but we should strip '/aa/' instead.
# If there's no '/' found, returns -1+1=0.
common_prefix_len = common_prefix.rfind('/') + 1
return [path[common_prefix_len:] for path in paths] |
def brightness(self):
"""Return current brightness 0-255.
For warm white return current led level. For RGB
calculate the HSV and return the 'value'.
"""
if self.mode == "ww":
return int(self.raw_state[9])
else:
_, _, v = colorsys.rgb_to_hsv(*self.getRgb())
return v | Return current brightness 0-255.
For warm white return current led level. For RGB
calculate the HSV and return the 'value'. | Below is the the instruction that describes the task:
### Input:
Return current brightness 0-255.
For warm white return current led level. For RGB
calculate the HSV and return the 'value'.
### Response:
def brightness(self):
"""Return current brightness 0-255.
For warm white return current led level. For RGB
calculate the HSV and return the 'value'.
"""
if self.mode == "ww":
return int(self.raw_state[9])
else:
_, _, v = colorsys.rgb_to_hsv(*self.getRgb())
return v |
def rmultivariate_hypergeometric(n, m, size=None):
"""
Random multivariate hypergeometric variates.
Parameters:
- `n` : Number of draws.
- `m` : Number of items in each categoy.
"""
N = len(m)
urn = np.repeat(np.arange(N), m)
if size:
draw = np.array([[urn[i] for i in np.random.permutation(len(urn))[:n]]
for j in range(size)])
r = [[np.sum(draw[j] == i) for i in range(len(m))]
for j in range(size)]
else:
draw = np.array([urn[i] for i in np.random.permutation(len(urn))[:n]])
r = [np.sum(draw == i) for i in range(len(m))]
return np.asarray(r) | Random multivariate hypergeometric variates.
Parameters:
- `n` : Number of draws.
- `m` : Number of items in each categoy. | Below is the the instruction that describes the task:
### Input:
Random multivariate hypergeometric variates.
Parameters:
- `n` : Number of draws.
- `m` : Number of items in each categoy.
### Response:
def rmultivariate_hypergeometric(n, m, size=None):
"""
Random multivariate hypergeometric variates.
Parameters:
- `n` : Number of draws.
- `m` : Number of items in each categoy.
"""
N = len(m)
urn = np.repeat(np.arange(N), m)
if size:
draw = np.array([[urn[i] for i in np.random.permutation(len(urn))[:n]]
for j in range(size)])
r = [[np.sum(draw[j] == i) for i in range(len(m))]
for j in range(size)]
else:
draw = np.array([urn[i] for i in np.random.permutation(len(urn))[:n]])
r = [np.sum(draw == i) for i in range(len(m))]
return np.asarray(r) |
def convert_tags_to_dict(item):
"""
Convert AWS inconvenient tags model of a list of {"Key": <key>, "Value": <value>} pairs
to a dict of {<key>: <value>} for easier querying.
This returns a proxied object over given item to return a different tags format as the tags
attribute is read-only and we cannot modify it directly.
"""
if hasattr(item, 'tags'):
tags = item.tags
if isinstance(tags, list):
tags_dict = {}
for kv_dict in tags:
if isinstance(kv_dict, dict) and 'Key' in kv_dict and 'Value' in kv_dict:
tags_dict[kv_dict['Key']] = kv_dict['Value']
return ObjectProxy(item, tags=tags_dict)
return item | Convert AWS inconvenient tags model of a list of {"Key": <key>, "Value": <value>} pairs
to a dict of {<key>: <value>} for easier querying.
This returns a proxied object over given item to return a different tags format as the tags
attribute is read-only and we cannot modify it directly. | Below is the the instruction that describes the task:
### Input:
Convert AWS inconvenient tags model of a list of {"Key": <key>, "Value": <value>} pairs
to a dict of {<key>: <value>} for easier querying.
This returns a proxied object over given item to return a different tags format as the tags
attribute is read-only and we cannot modify it directly.
### Response:
def convert_tags_to_dict(item):
"""
Convert AWS inconvenient tags model of a list of {"Key": <key>, "Value": <value>} pairs
to a dict of {<key>: <value>} for easier querying.
This returns a proxied object over given item to return a different tags format as the tags
attribute is read-only and we cannot modify it directly.
"""
if hasattr(item, 'tags'):
tags = item.tags
if isinstance(tags, list):
tags_dict = {}
for kv_dict in tags:
if isinstance(kv_dict, dict) and 'Key' in kv_dict and 'Value' in kv_dict:
tags_dict[kv_dict['Key']] = kv_dict['Value']
return ObjectProxy(item, tags=tags_dict)
return item |
def add_data_point_xy(self, x, y):
"""Add a new data point to the data set to be smoothed."""
self.x.append(x)
self.y.append(y) | Add a new data point to the data set to be smoothed. | Below is the the instruction that describes the task:
### Input:
Add a new data point to the data set to be smoothed.
### Response:
def add_data_point_xy(self, x, y):
"""Add a new data point to the data set to be smoothed."""
self.x.append(x)
self.y.append(y) |
def _try_import(module_name):
"""Try importing a module, with an informative error message on failure."""
try:
mod = importlib.import_module(module_name)
return mod
except ImportError:
err_msg = ("Tried importing %s but failed. See setup.py extras_require. "
"The dataset you are trying to use may have additional "
"dependencies.")
utils.reraise(err_msg) | Try importing a module, with an informative error message on failure. | Below is the the instruction that describes the task:
### Input:
Try importing a module, with an informative error message on failure.
### Response:
def _try_import(module_name):
"""Try importing a module, with an informative error message on failure."""
try:
mod = importlib.import_module(module_name)
return mod
except ImportError:
err_msg = ("Tried importing %s but failed. See setup.py extras_require. "
"The dataset you are trying to use may have additional "
"dependencies.")
utils.reraise(err_msg) |
def _load_data(self, band):
"""In-flight effective areas for the Swift UVOT, as obtained from the CALDB.
See Breeveld+ 2011. XXX: confirm that these are equal-energy, not
quantum-efficiency.
"""
d = bandpass_data_fits('sw' + self._band_map[band] + '_20041120v106.arf')[1].data
# note:
# data.WAVE_MIN[i] < data.WAVE_MIN[i+1], but
# data.WAVE_MIN[i] > data.WAVE_MAX[i] (!)
# data.WAVE_MIN[i] = data.WAVE_MAX[i+1] (!)
wmid = 0.5 * (d.WAVE_MIN + d.WAVE_MAX) # in Ångström
df = pd.DataFrame({'wlen': wmid, 'resp': d.SPECRESP,
'wlo': d.WAVE_MAX, 'whi': d.WAVE_MIN})
return df | In-flight effective areas for the Swift UVOT, as obtained from the CALDB.
See Breeveld+ 2011. XXX: confirm that these are equal-energy, not
quantum-efficiency. | Below is the the instruction that describes the task:
### Input:
In-flight effective areas for the Swift UVOT, as obtained from the CALDB.
See Breeveld+ 2011. XXX: confirm that these are equal-energy, not
quantum-efficiency.
### Response:
def _load_data(self, band):
"""In-flight effective areas for the Swift UVOT, as obtained from the CALDB.
See Breeveld+ 2011. XXX: confirm that these are equal-energy, not
quantum-efficiency.
"""
d = bandpass_data_fits('sw' + self._band_map[band] + '_20041120v106.arf')[1].data
# note:
# data.WAVE_MIN[i] < data.WAVE_MIN[i+1], but
# data.WAVE_MIN[i] > data.WAVE_MAX[i] (!)
# data.WAVE_MIN[i] = data.WAVE_MAX[i+1] (!)
wmid = 0.5 * (d.WAVE_MIN + d.WAVE_MAX) # in Ångström
df = pd.DataFrame({'wlen': wmid, 'resp': d.SPECRESP,
'wlo': d.WAVE_MAX, 'whi': d.WAVE_MIN})
return df |
def vdm_b(vdm, lat):
"""
Converts a virtual dipole moment (VDM) or a virtual axial dipole moment
(VADM; input in units of Am^2) to a local magnetic field value (output in
units of tesla)
Parameters
----------
vdm : V(A)DM in units of Am^2
lat: latitude of site in degrees
Returns
-------
B: local magnetic field strength in tesla
"""
rad = old_div(np.pi, 180.)
# changed radius of the earth from 3.367e6 3/12/2010
fact = ((6.371e6)**3) * 1e7
colat = (90. - lat) * rad
return vdm * (np.sqrt(1 + 3 * (np.cos(colat)**2))) / fact | Converts a virtual dipole moment (VDM) or a virtual axial dipole moment
(VADM; input in units of Am^2) to a local magnetic field value (output in
units of tesla)
Parameters
----------
vdm : V(A)DM in units of Am^2
lat: latitude of site in degrees
Returns
-------
B: local magnetic field strength in tesla | Below is the the instruction that describes the task:
### Input:
Converts a virtual dipole moment (VDM) or a virtual axial dipole moment
(VADM; input in units of Am^2) to a local magnetic field value (output in
units of tesla)
Parameters
----------
vdm : V(A)DM in units of Am^2
lat: latitude of site in degrees
Returns
-------
B: local magnetic field strength in tesla
### Response:
def vdm_b(vdm, lat):
"""
Converts a virtual dipole moment (VDM) or a virtual axial dipole moment
(VADM; input in units of Am^2) to a local magnetic field value (output in
units of tesla)
Parameters
----------
vdm : V(A)DM in units of Am^2
lat: latitude of site in degrees
Returns
-------
B: local magnetic field strength in tesla
"""
rad = old_div(np.pi, 180.)
# changed radius of the earth from 3.367e6 3/12/2010
fact = ((6.371e6)**3) * 1e7
colat = (90. - lat) * rad
return vdm * (np.sqrt(1 + 3 * (np.cos(colat)**2))) / fact |
def _rspiral(width, height):
"""Reversed spiral generator.
Parameters
----------
width : `int`
Spiral width.
height : `int`
Spiral height.
Returns
-------
`generator` of (`int`, `int`)
Points.
"""
x0 = 0
y0 = 0
x1 = width - 1
y1 = height - 1
while x0 < x1 and y0 < y1:
for x in range(x0, x1):
yield x, y0
for y in range(y0, y1):
yield x1, y
for x in range(x1, x0, -1):
yield x, y1
for y in range(y1, y0, -1):
yield x0, y
x0 += 1
y0 += 1
x1 -= 1
y1 -= 1
if x0 == x1:
for y in range(y0, y1 + 1):
yield x0, y
elif y0 == y1:
for x in range(x0, x1 + 1):
yield x, y0 | Reversed spiral generator.
Parameters
----------
width : `int`
Spiral width.
height : `int`
Spiral height.
Returns
-------
`generator` of (`int`, `int`)
Points. | Below is the the instruction that describes the task:
### Input:
Reversed spiral generator.
Parameters
----------
width : `int`
Spiral width.
height : `int`
Spiral height.
Returns
-------
`generator` of (`int`, `int`)
Points.
### Response:
def _rspiral(width, height):
"""Reversed spiral generator.
Parameters
----------
width : `int`
Spiral width.
height : `int`
Spiral height.
Returns
-------
`generator` of (`int`, `int`)
Points.
"""
x0 = 0
y0 = 0
x1 = width - 1
y1 = height - 1
while x0 < x1 and y0 < y1:
for x in range(x0, x1):
yield x, y0
for y in range(y0, y1):
yield x1, y
for x in range(x1, x0, -1):
yield x, y1
for y in range(y1, y0, -1):
yield x0, y
x0 += 1
y0 += 1
x1 -= 1
y1 -= 1
if x0 == x1:
for y in range(y0, y1 + 1):
yield x0, y
elif y0 == y1:
for x in range(x0, x1 + 1):
yield x, y0 |
def outer_product(vec0: QubitVector, vec1: QubitVector) -> QubitVector:
"""Direct product of qubit vectors
The tensor ranks must match and qubits must be disjoint.
"""
R = vec0.rank
R1 = vec1.rank
N0 = vec0.qubit_nb
N1 = vec1.qubit_nb
if R != R1:
raise ValueError('Incompatibly vectors. Rank must match')
if not set(vec0.qubits).isdisjoint(vec1.qubits):
raise ValueError('Overlapping qubits')
qubits: Qubits = tuple(vec0.qubits) + tuple(vec1.qubits)
tensor = bk.outer(vec0.tensor, vec1.tensor)
# Interleave (super)-operator axes
# R = 1 perm = (0, 1)
# R = 2 perm = (0, 2, 1, 3)
# R = 4 perm = (0, 4, 1, 5, 2, 6, 3, 7)
tensor = bk.reshape(tensor, ([2**N0] * R) + ([2**N1] * R))
perm = [idx for ij in zip(range(0, R), range(R, 2*R)) for idx in ij]
tensor = bk.transpose(tensor, perm)
return QubitVector(tensor, qubits) | Direct product of qubit vectors
The tensor ranks must match and qubits must be disjoint. | Below is the the instruction that describes the task:
### Input:
Direct product of qubit vectors
The tensor ranks must match and qubits must be disjoint.
### Response:
def outer_product(vec0: QubitVector, vec1: QubitVector) -> QubitVector:
"""Direct product of qubit vectors
The tensor ranks must match and qubits must be disjoint.
"""
R = vec0.rank
R1 = vec1.rank
N0 = vec0.qubit_nb
N1 = vec1.qubit_nb
if R != R1:
raise ValueError('Incompatibly vectors. Rank must match')
if not set(vec0.qubits).isdisjoint(vec1.qubits):
raise ValueError('Overlapping qubits')
qubits: Qubits = tuple(vec0.qubits) + tuple(vec1.qubits)
tensor = bk.outer(vec0.tensor, vec1.tensor)
# Interleave (super)-operator axes
# R = 1 perm = (0, 1)
# R = 2 perm = (0, 2, 1, 3)
# R = 4 perm = (0, 4, 1, 5, 2, 6, 3, 7)
tensor = bk.reshape(tensor, ([2**N0] * R) + ([2**N1] * R))
perm = [idx for ij in zip(range(0, R), range(R, 2*R)) for idx in ij]
tensor = bk.transpose(tensor, perm)
return QubitVector(tensor, qubits) |
def _get_kws_plt(self, usrgos, **kws_usr):
"""Add go2color and go2bordercolor relevant to this grouping into plot."""
kws_plt = kws_usr.copy()
kws_dag = {}
hdrgo = kws_plt.get('hdrgo', None)
objcolor = GrouperColors(self.grprobj)
# GO term colors
if 'go2color' not in kws_usr:
kws_plt['go2color'] = objcolor.get_go2color_users()
elif hdrgo is not None:
go2color = kws_plt.get('go2color').copy()
go2color[hdrgo] = PltGroupedGosArgs.hdrgo_dflt_color
kws_plt['go2color'] = go2color
# GO term border colors
if 'go2bordercolor' not in kws_usr:
kws_plt['go2bordercolor'] = objcolor.get_bordercolor()
prune = kws_usr.get('prune', None)
if prune is True and hdrgo is not None:
kws_dag['dst_srcs_list'] = [(hdrgo, usrgos), (None, set([hdrgo]))]
kws_plt['parentcnt'] = True
elif prune:
kws_dag['dst_srcs_list'] = prune
kws_plt['parentcnt'] = True
# Group text
kws_plt['go2txt'] = self.get_go2txt(self.grprobj,
kws_plt.get('go2color'), kws_plt.get('go2bordercolor'))
return kws_plt, kws_dag | Add go2color and go2bordercolor relevant to this grouping into plot. | Below is the the instruction that describes the task:
### Input:
Add go2color and go2bordercolor relevant to this grouping into plot.
### Response:
def _get_kws_plt(self, usrgos, **kws_usr):
"""Add go2color and go2bordercolor relevant to this grouping into plot."""
kws_plt = kws_usr.copy()
kws_dag = {}
hdrgo = kws_plt.get('hdrgo', None)
objcolor = GrouperColors(self.grprobj)
# GO term colors
if 'go2color' not in kws_usr:
kws_plt['go2color'] = objcolor.get_go2color_users()
elif hdrgo is not None:
go2color = kws_plt.get('go2color').copy()
go2color[hdrgo] = PltGroupedGosArgs.hdrgo_dflt_color
kws_plt['go2color'] = go2color
# GO term border colors
if 'go2bordercolor' not in kws_usr:
kws_plt['go2bordercolor'] = objcolor.get_bordercolor()
prune = kws_usr.get('prune', None)
if prune is True and hdrgo is not None:
kws_dag['dst_srcs_list'] = [(hdrgo, usrgos), (None, set([hdrgo]))]
kws_plt['parentcnt'] = True
elif prune:
kws_dag['dst_srcs_list'] = prune
kws_plt['parentcnt'] = True
# Group text
kws_plt['go2txt'] = self.get_go2txt(self.grprobj,
kws_plt.get('go2color'), kws_plt.get('go2bordercolor'))
return kws_plt, kws_dag |
def safe_input(prompt):
"""
Prompts user for input. Correctly handles prompt message encoding.
"""
if sys.version_info < (3,0):
if isinstance(prompt, compat.text_type):
# Python 2.x: unicode → bytes
encoding = locale.getpreferredencoding() or 'utf-8'
prompt = prompt.encode(encoding)
else:
if not isinstance(prompt, compat.text_type):
# Python 3.x: bytes → unicode
prompt = prompt.decode()
return _input(prompt) | Prompts user for input. Correctly handles prompt message encoding. | Below is the the instruction that describes the task:
### Input:
Prompts user for input. Correctly handles prompt message encoding.
### Response:
def safe_input(prompt):
"""
Prompts user for input. Correctly handles prompt message encoding.
"""
if sys.version_info < (3,0):
if isinstance(prompt, compat.text_type):
# Python 2.x: unicode → bytes
encoding = locale.getpreferredencoding() or 'utf-8'
prompt = prompt.encode(encoding)
else:
if not isinstance(prompt, compat.text_type):
# Python 3.x: bytes → unicode
prompt = prompt.decode()
return _input(prompt) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.