code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def agent(cls, version=None):
"""
Returns:
a connection object to make REST calls to QDS
optionally override the `version` of the REST endpoint for advanced
features available only in the newer version of the API available
for certain resource end points eg: /v1.3/cluster. When version is
None we default to v1.2
"""
reuse_cached_agent = True
if version:
log.debug("api version changed to %s" % version)
cls.rest_url = '/'.join([cls.baseurl.rstrip('/'), version])
reuse_cached_agent = False
else:
cls.rest_url = '/'.join([cls.baseurl.rstrip('/'), cls.version])
if cls.api_token is None:
raise ConfigError("No API Token specified - please supply one via Qubole.configure()")
if not reuse_cached_agent:
uncached_agent = Connection(cls._auth, cls.rest_url, cls.skip_ssl_cert_check)
return uncached_agent
if cls.cached_agent is None:
cls.cached_agent = Connection(cls._auth, cls.rest_url, cls.skip_ssl_cert_check)
return cls.cached_agent | Returns:
a connection object to make REST calls to QDS
optionally override the `version` of the REST endpoint for advanced
features available only in the newer version of the API available
for certain resource end points eg: /v1.3/cluster. When version is
None we default to v1.2 | Below is the the instruction that describes the task:
### Input:
Returns:
a connection object to make REST calls to QDS
optionally override the `version` of the REST endpoint for advanced
features available only in the newer version of the API available
for certain resource end points eg: /v1.3/cluster. When version is
None we default to v1.2
### Response:
def agent(cls, version=None):
"""
Returns:
a connection object to make REST calls to QDS
optionally override the `version` of the REST endpoint for advanced
features available only in the newer version of the API available
for certain resource end points eg: /v1.3/cluster. When version is
None we default to v1.2
"""
reuse_cached_agent = True
if version:
log.debug("api version changed to %s" % version)
cls.rest_url = '/'.join([cls.baseurl.rstrip('/'), version])
reuse_cached_agent = False
else:
cls.rest_url = '/'.join([cls.baseurl.rstrip('/'), cls.version])
if cls.api_token is None:
raise ConfigError("No API Token specified - please supply one via Qubole.configure()")
if not reuse_cached_agent:
uncached_agent = Connection(cls._auth, cls.rest_url, cls.skip_ssl_cert_check)
return uncached_agent
if cls.cached_agent is None:
cls.cached_agent = Connection(cls._auth, cls.rest_url, cls.skip_ssl_cert_check)
return cls.cached_agent |
def writeNumber(self, n):
"""
Writes a float to the stream.
@type n: C{float}
"""
self.stream.write(TYPE_NUMBER)
self.stream.write_double(n) | Writes a float to the stream.
@type n: C{float} | Below is the the instruction that describes the task:
### Input:
Writes a float to the stream.
@type n: C{float}
### Response:
def writeNumber(self, n):
"""
Writes a float to the stream.
@type n: C{float}
"""
self.stream.write(TYPE_NUMBER)
self.stream.write_double(n) |
def run(self):
"""
Starts the server.
"""
server_logger = logging.getLogger('aiohttp.server')
# In debug mode we don't use the standard request log but a more complete in response.py
if log.getEffectiveLevel() == logging.DEBUG:
server_logger.setLevel(logging.CRITICAL)
logger = logging.getLogger("asyncio")
logger.setLevel(logging.ERROR)
if sys.platform.startswith("win"):
loop = asyncio.get_event_loop()
# Add a periodic callback to give a chance to process signals on Windows
# because asyncio.add_signal_handler() is not supported yet on that platform
# otherwise the loop runs outside of signal module's ability to trap signals.
def wakeup():
loop.call_later(0.5, wakeup)
loop.call_later(0.5, wakeup)
server_config = Config.instance().get_section_config("Server")
ssl_context = None
if server_config.getboolean("ssl"):
if sys.platform.startswith("win"):
log.critical("SSL mode is not supported on Windows")
raise SystemExit
ssl_context = self._create_ssl_context(server_config)
self._loop = asyncio.get_event_loop()
if log.getEffectiveLevel() == logging.DEBUG:
# On debug version we enable info that
# coroutine is not called in a way await/yield from
self._loop.set_debug(True)
for key, val in os.environ.items():
log.debug("ENV %s=%s", key, val)
self._app = aiohttp.web.Application()
# Background task started with the server
self._app.on_startup.append(self._on_startup)
# Allow CORS for this domains
cors = aiohttp_cors.setup(self._app, defaults={
# Default web server for web gui dev
"http://127.0.0.1:8080": aiohttp_cors.ResourceOptions(expose_headers="*", allow_headers="*"),
"http://localhost:8080": aiohttp_cors.ResourceOptions(expose_headers="*", allow_headers="*"),
"http://127.0.0.1:4200": aiohttp_cors.ResourceOptions(expose_headers="*", allow_headers="*"),
"http://localhost:4200": aiohttp_cors.ResourceOptions(expose_headers="*", allow_headers="*"),
"http://gns3.github.io": aiohttp_cors.ResourceOptions(expose_headers="*", allow_headers="*"),
"https://gns3.github.io": aiohttp_cors.ResourceOptions(expose_headers="*", allow_headers="*")
})
PortManager.instance().console_host = self._host
for method, route, handler in Route.get_routes():
log.debug("Adding route: {} {}".format(method, route))
cors.add(self._app.router.add_route(method, route, handler))
for module in MODULES:
log.debug("Loading module {}".format(module.__name__))
m = module.instance()
m.port_manager = PortManager.instance()
log.info("Starting server on {}:{}".format(self._host, self._port))
self._handler = self._app.make_handler()
if self._run_application(self._handler, ssl_context) is False:
self._loop.stop()
return
self._signal_handling()
self._exit_handling()
if server_config.getboolean("shell"):
asyncio.async(self.start_shell())
try:
self._loop.run_forever()
except TypeError as e:
# This is to ignore an asyncio.windows_events exception
# on Windows when the process gets the SIGBREAK signal
# TypeError: async() takes 1 positional argument but 3 were given
log.warning("TypeError exception in the loop {}".format(e))
finally:
if self._loop.is_running():
self._loop.run_until_complete(self.shutdown_server()) | Starts the server. | Below is the the instruction that describes the task:
### Input:
Starts the server.
### Response:
def run(self):
"""
Starts the server.
"""
server_logger = logging.getLogger('aiohttp.server')
# In debug mode we don't use the standard request log but a more complete in response.py
if log.getEffectiveLevel() == logging.DEBUG:
server_logger.setLevel(logging.CRITICAL)
logger = logging.getLogger("asyncio")
logger.setLevel(logging.ERROR)
if sys.platform.startswith("win"):
loop = asyncio.get_event_loop()
# Add a periodic callback to give a chance to process signals on Windows
# because asyncio.add_signal_handler() is not supported yet on that platform
# otherwise the loop runs outside of signal module's ability to trap signals.
def wakeup():
loop.call_later(0.5, wakeup)
loop.call_later(0.5, wakeup)
server_config = Config.instance().get_section_config("Server")
ssl_context = None
if server_config.getboolean("ssl"):
if sys.platform.startswith("win"):
log.critical("SSL mode is not supported on Windows")
raise SystemExit
ssl_context = self._create_ssl_context(server_config)
self._loop = asyncio.get_event_loop()
if log.getEffectiveLevel() == logging.DEBUG:
# On debug version we enable info that
# coroutine is not called in a way await/yield from
self._loop.set_debug(True)
for key, val in os.environ.items():
log.debug("ENV %s=%s", key, val)
self._app = aiohttp.web.Application()
# Background task started with the server
self._app.on_startup.append(self._on_startup)
# Allow CORS for this domains
cors = aiohttp_cors.setup(self._app, defaults={
# Default web server for web gui dev
"http://127.0.0.1:8080": aiohttp_cors.ResourceOptions(expose_headers="*", allow_headers="*"),
"http://localhost:8080": aiohttp_cors.ResourceOptions(expose_headers="*", allow_headers="*"),
"http://127.0.0.1:4200": aiohttp_cors.ResourceOptions(expose_headers="*", allow_headers="*"),
"http://localhost:4200": aiohttp_cors.ResourceOptions(expose_headers="*", allow_headers="*"),
"http://gns3.github.io": aiohttp_cors.ResourceOptions(expose_headers="*", allow_headers="*"),
"https://gns3.github.io": aiohttp_cors.ResourceOptions(expose_headers="*", allow_headers="*")
})
PortManager.instance().console_host = self._host
for method, route, handler in Route.get_routes():
log.debug("Adding route: {} {}".format(method, route))
cors.add(self._app.router.add_route(method, route, handler))
for module in MODULES:
log.debug("Loading module {}".format(module.__name__))
m = module.instance()
m.port_manager = PortManager.instance()
log.info("Starting server on {}:{}".format(self._host, self._port))
self._handler = self._app.make_handler()
if self._run_application(self._handler, ssl_context) is False:
self._loop.stop()
return
self._signal_handling()
self._exit_handling()
if server_config.getboolean("shell"):
asyncio.async(self.start_shell())
try:
self._loop.run_forever()
except TypeError as e:
# This is to ignore an asyncio.windows_events exception
# on Windows when the process gets the SIGBREAK signal
# TypeError: async() takes 1 positional argument but 3 were given
log.warning("TypeError exception in the loop {}".format(e))
finally:
if self._loop.is_running():
self._loop.run_until_complete(self.shutdown_server()) |
def create_throttle():
""" Create a THROTTLE statement """
throttle_amount = "*" | Combine(number + "%") | number
return Group(
function("throttle", throttle_amount, throttle_amount, caseless=True)
).setResultsName("throttle") | Create a THROTTLE statement | Below is the the instruction that describes the task:
### Input:
Create a THROTTLE statement
### Response:
def create_throttle():
""" Create a THROTTLE statement """
throttle_amount = "*" | Combine(number + "%") | number
return Group(
function("throttle", throttle_amount, throttle_amount, caseless=True)
).setResultsName("throttle") |
def _make_model(self, data, key=None):
"""
Creates a model instance with the given data.
Args:
data: Model data returned from DB.
key: Object key
Returns:
pyoko.Model object.
"""
if data['deleted'] and not self.adapter.want_deleted:
raise ObjectDoesNotExist('Deleted object returned')
model = self._model_class(self._current_context,
_pass_perm_checks=self._pass_perm_checks)
model.setattr('key', ub_to_str(key) if key else ub_to_str(data.get('key')))
model = model.set_data(data, from_db=True)
model._initial_data = model.clean_value()
return model | Creates a model instance with the given data.
Args:
data: Model data returned from DB.
key: Object key
Returns:
pyoko.Model object. | Below is the the instruction that describes the task:
### Input:
Creates a model instance with the given data.
Args:
data: Model data returned from DB.
key: Object key
Returns:
pyoko.Model object.
### Response:
def _make_model(self, data, key=None):
"""
Creates a model instance with the given data.
Args:
data: Model data returned from DB.
key: Object key
Returns:
pyoko.Model object.
"""
if data['deleted'] and not self.adapter.want_deleted:
raise ObjectDoesNotExist('Deleted object returned')
model = self._model_class(self._current_context,
_pass_perm_checks=self._pass_perm_checks)
model.setattr('key', ub_to_str(key) if key else ub_to_str(data.get('key')))
model = model.set_data(data, from_db=True)
model._initial_data = model.clean_value()
return model |
def transfer_and_wait(
self,
registry_address: PaymentNetworkID,
token_address: TokenAddress,
amount: TokenAmount,
target: Address,
identifier: PaymentID = None,
transfer_timeout: int = None,
secret: Secret = None,
secret_hash: SecretHash = None,
):
""" Do a transfer with `target` with the given `amount` of `token_address`. """
# pylint: disable=too-many-arguments
payment_status = self.transfer_async(
registry_address=registry_address,
token_address=token_address,
amount=amount,
target=target,
identifier=identifier,
secret=secret,
secret_hash=secret_hash,
)
payment_status.payment_done.wait(timeout=transfer_timeout)
return payment_status | Do a transfer with `target` with the given `amount` of `token_address`. | Below is the the instruction that describes the task:
### Input:
Do a transfer with `target` with the given `amount` of `token_address`.
### Response:
def transfer_and_wait(
self,
registry_address: PaymentNetworkID,
token_address: TokenAddress,
amount: TokenAmount,
target: Address,
identifier: PaymentID = None,
transfer_timeout: int = None,
secret: Secret = None,
secret_hash: SecretHash = None,
):
""" Do a transfer with `target` with the given `amount` of `token_address`. """
# pylint: disable=too-many-arguments
payment_status = self.transfer_async(
registry_address=registry_address,
token_address=token_address,
amount=amount,
target=target,
identifier=identifier,
secret=secret,
secret_hash=secret_hash,
)
payment_status.payment_done.wait(timeout=transfer_timeout)
return payment_status |
def equal_args(*args, **kwargs):
"""A memoized key factory that compares the equality (`==`) of a stable sort of the parameters."""
key = args
if kwargs:
key += _kwargs_separator + tuple(sorted(kwargs.items()))
return key | A memoized key factory that compares the equality (`==`) of a stable sort of the parameters. | Below is the the instruction that describes the task:
### Input:
A memoized key factory that compares the equality (`==`) of a stable sort of the parameters.
### Response:
def equal_args(*args, **kwargs):
"""A memoized key factory that compares the equality (`==`) of a stable sort of the parameters."""
key = args
if kwargs:
key += _kwargs_separator + tuple(sorted(kwargs.items()))
return key |
def _check_r(self, r):
"""the columns must orthogonal"""
if abs(np.dot(r[:, 0], r[:, 0]) - 1) > eps or \
abs(np.dot(r[:, 0], r[:, 0]) - 1) > eps or \
abs(np.dot(r[:, 0], r[:, 0]) - 1) > eps or \
np.dot(r[:, 0], r[:, 1]) > eps or \
np.dot(r[:, 1], r[:, 2]) > eps or \
np.dot(r[:, 2], r[:, 0]) > eps:
raise ValueError("The rotation matrix is significantly non-orthonormal.") | the columns must orthogonal | Below is the the instruction that describes the task:
### Input:
the columns must orthogonal
### Response:
def _check_r(self, r):
"""the columns must orthogonal"""
if abs(np.dot(r[:, 0], r[:, 0]) - 1) > eps or \
abs(np.dot(r[:, 0], r[:, 0]) - 1) > eps or \
abs(np.dot(r[:, 0], r[:, 0]) - 1) > eps or \
np.dot(r[:, 0], r[:, 1]) > eps or \
np.dot(r[:, 1], r[:, 2]) > eps or \
np.dot(r[:, 2], r[:, 0]) > eps:
raise ValueError("The rotation matrix is significantly non-orthonormal.") |
def _handle_special_yaml_cases(v):
"""Handle values that pass integer, boolean, list or dictionary values.
"""
if "::" in v:
out = {}
for part in v.split("::"):
k_part, v_part = part.split(":")
out[k_part] = v_part.split(";")
v = out
elif ";" in v:
# split lists and remove accidental empty values
v = [x for x in v.split(";") if x != ""]
elif isinstance(v, list):
v = v
else:
try:
v = int(v)
except ValueError:
if v.lower() == "true":
v = True
elif v.lower() == "false":
v = False
return v | Handle values that pass integer, boolean, list or dictionary values. | Below is the the instruction that describes the task:
### Input:
Handle values that pass integer, boolean, list or dictionary values.
### Response:
def _handle_special_yaml_cases(v):
"""Handle values that pass integer, boolean, list or dictionary values.
"""
if "::" in v:
out = {}
for part in v.split("::"):
k_part, v_part = part.split(":")
out[k_part] = v_part.split(";")
v = out
elif ";" in v:
# split lists and remove accidental empty values
v = [x for x in v.split(";") if x != ""]
elif isinstance(v, list):
v = v
else:
try:
v = int(v)
except ValueError:
if v.lower() == "true":
v = True
elif v.lower() == "false":
v = False
return v |
def main():
'''Main function.'''
usage = ('\n\n %prog [options] XML_PATH\n\nArguments:\n\n '
'XML_PATH the directory containing the '
'GermaNet .xml files')
parser = optparse.OptionParser(usage=usage)
parser.add_option('--host', default=None,
help='hostname or IP address of the MongoDB instance '
'where the GermaNet database will be inserted '
'(default: %default)')
parser.add_option('--port', type='int', default=None,
help='port number of the MongoDB instance where the '
'GermaNet database will be inserted (default: %default)')
parser.add_option('--database', dest='database_name', default='germanet',
help='the name of the database on the MongoDB instance '
'where GermaNet will be stored (default: %default)')
(options, args) = parser.parse_args()
if len(args) != 1:
parser.error("incorrect number of arguments")
sys.exit(1)
xml_path = args[0]
client = MongoClient(options.host, options.port)
germanet_db = client[options.database_name]
lex_files, gn_rels_file, wiktionary_files, ili_files = \
find_germanet_xml_files(xml_path)
insert_lexical_information(germanet_db, lex_files)
insert_relation_information(germanet_db, gn_rels_file)
insert_paraphrase_information(germanet_db, wiktionary_files)
insert_lemmatisation_data(germanet_db)
insert_infocontent_data(germanet_db)
compute_max_min_depth(germanet_db)
client.close() | Main function. | Below is the the instruction that describes the task:
### Input:
Main function.
### Response:
def main():
'''Main function.'''
usage = ('\n\n %prog [options] XML_PATH\n\nArguments:\n\n '
'XML_PATH the directory containing the '
'GermaNet .xml files')
parser = optparse.OptionParser(usage=usage)
parser.add_option('--host', default=None,
help='hostname or IP address of the MongoDB instance '
'where the GermaNet database will be inserted '
'(default: %default)')
parser.add_option('--port', type='int', default=None,
help='port number of the MongoDB instance where the '
'GermaNet database will be inserted (default: %default)')
parser.add_option('--database', dest='database_name', default='germanet',
help='the name of the database on the MongoDB instance '
'where GermaNet will be stored (default: %default)')
(options, args) = parser.parse_args()
if len(args) != 1:
parser.error("incorrect number of arguments")
sys.exit(1)
xml_path = args[0]
client = MongoClient(options.host, options.port)
germanet_db = client[options.database_name]
lex_files, gn_rels_file, wiktionary_files, ili_files = \
find_germanet_xml_files(xml_path)
insert_lexical_information(germanet_db, lex_files)
insert_relation_information(germanet_db, gn_rels_file)
insert_paraphrase_information(germanet_db, wiktionary_files)
insert_lemmatisation_data(germanet_db)
insert_infocontent_data(germanet_db)
compute_max_min_depth(germanet_db)
client.close() |
def update(self, data, decayFactor, timeUnit):
"""Update the centroids, according to data
:param data:
RDD with new data for the model update.
:param decayFactor:
Forgetfulness of the previous centroids.
:param timeUnit:
Can be "batches" or "points". If points, then the decay factor
is raised to the power of number of new points and if batches,
then decay factor will be used as is.
"""
if not isinstance(data, RDD):
raise TypeError("Data should be of an RDD, got %s." % type(data))
data = data.map(_convert_to_vector)
decayFactor = float(decayFactor)
if timeUnit not in ["batches", "points"]:
raise ValueError(
"timeUnit should be 'batches' or 'points', got %s." % timeUnit)
vectorCenters = [_convert_to_vector(center) for center in self.centers]
updatedModel = callMLlibFunc(
"updateStreamingKMeansModel", vectorCenters, self._clusterWeights,
data, decayFactor, timeUnit)
self.centers = array(updatedModel[0])
self._clusterWeights = list(updatedModel[1])
return self | Update the centroids, according to data
:param data:
RDD with new data for the model update.
:param decayFactor:
Forgetfulness of the previous centroids.
:param timeUnit:
Can be "batches" or "points". If points, then the decay factor
is raised to the power of number of new points and if batches,
then decay factor will be used as is. | Below is the the instruction that describes the task:
### Input:
Update the centroids, according to data
:param data:
RDD with new data for the model update.
:param decayFactor:
Forgetfulness of the previous centroids.
:param timeUnit:
Can be "batches" or "points". If points, then the decay factor
is raised to the power of number of new points and if batches,
then decay factor will be used as is.
### Response:
def update(self, data, decayFactor, timeUnit):
"""Update the centroids, according to data
:param data:
RDD with new data for the model update.
:param decayFactor:
Forgetfulness of the previous centroids.
:param timeUnit:
Can be "batches" or "points". If points, then the decay factor
is raised to the power of number of new points and if batches,
then decay factor will be used as is.
"""
if not isinstance(data, RDD):
raise TypeError("Data should be of an RDD, got %s." % type(data))
data = data.map(_convert_to_vector)
decayFactor = float(decayFactor)
if timeUnit not in ["batches", "points"]:
raise ValueError(
"timeUnit should be 'batches' or 'points', got %s." % timeUnit)
vectorCenters = [_convert_to_vector(center) for center in self.centers]
updatedModel = callMLlibFunc(
"updateStreamingKMeansModel", vectorCenters, self._clusterWeights,
data, decayFactor, timeUnit)
self.centers = array(updatedModel[0])
self._clusterWeights = list(updatedModel[1])
return self |
def install_package(package,
wheels_path,
venv=None,
requirement_files=None,
upgrade=False,
install_args=None):
"""Install a Python package.
Can specify a specific version.
Can specify a prerelease.
Can specify a venv to install in.
Can specify a list of paths or urls to requirement txt files.
Can specify a local wheels_path to use for offline installation.
Can request an upgrade.
"""
requirement_files = requirement_files or []
logger.info('Installing %s...', package)
if venv and not os.path.isdir(venv):
raise WagonError('virtualenv {0} does not exist'.format(venv))
pip_command = _construct_pip_command(
package,
wheels_path,
venv,
requirement_files,
upgrade,
install_args)
if IS_VIRTUALENV and not venv:
logger.info('Installing within current virtualenv')
result = _run(pip_command)
if not result.returncode == 0:
raise WagonError('Could not install package: {0} ({1})'.format(
package, result.aggr_stderr)) | Install a Python package.
Can specify a specific version.
Can specify a prerelease.
Can specify a venv to install in.
Can specify a list of paths or urls to requirement txt files.
Can specify a local wheels_path to use for offline installation.
Can request an upgrade. | Below is the the instruction that describes the task:
### Input:
Install a Python package.
Can specify a specific version.
Can specify a prerelease.
Can specify a venv to install in.
Can specify a list of paths or urls to requirement txt files.
Can specify a local wheels_path to use for offline installation.
Can request an upgrade.
### Response:
def install_package(package,
wheels_path,
venv=None,
requirement_files=None,
upgrade=False,
install_args=None):
"""Install a Python package.
Can specify a specific version.
Can specify a prerelease.
Can specify a venv to install in.
Can specify a list of paths or urls to requirement txt files.
Can specify a local wheels_path to use for offline installation.
Can request an upgrade.
"""
requirement_files = requirement_files or []
logger.info('Installing %s...', package)
if venv and not os.path.isdir(venv):
raise WagonError('virtualenv {0} does not exist'.format(venv))
pip_command = _construct_pip_command(
package,
wheels_path,
venv,
requirement_files,
upgrade,
install_args)
if IS_VIRTUALENV and not venv:
logger.info('Installing within current virtualenv')
result = _run(pip_command)
if not result.returncode == 0:
raise WagonError('Could not install package: {0} ({1})'.format(
package, result.aggr_stderr)) |
def day_of_year(self):
"""
Returns the day of the year (1-366).
:rtype: int
"""
k = 1 if self.is_leap_year() else 2
return (275 * self.month) // 9 - k * ((self.month + 9) // 12) + self.day - 30 | Returns the day of the year (1-366).
:rtype: int | Below is the the instruction that describes the task:
### Input:
Returns the day of the year (1-366).
:rtype: int
### Response:
def day_of_year(self):
"""
Returns the day of the year (1-366).
:rtype: int
"""
k = 1 if self.is_leap_year() else 2
return (275 * self.month) // 9 - k * ((self.month + 9) // 12) + self.day - 30 |
def post_process(self, paths, dry_run=False, **options):
"""
Overridden to allow some files to be excluded (using
``postprocess_exclusions``)
"""
if self.postprocess_exclusions:
paths = dict((k, v) for k, v in paths.items() if not
self.exclude_file(k))
return super(CachedFilesMixin, self).post_process(paths,
dry_run, **options) | Overridden to allow some files to be excluded (using
``postprocess_exclusions``) | Below is the the instruction that describes the task:
### Input:
Overridden to allow some files to be excluded (using
``postprocess_exclusions``)
### Response:
def post_process(self, paths, dry_run=False, **options):
"""
Overridden to allow some files to be excluded (using
``postprocess_exclusions``)
"""
if self.postprocess_exclusions:
paths = dict((k, v) for k, v in paths.items() if not
self.exclude_file(k))
return super(CachedFilesMixin, self).post_process(paths,
dry_run, **options) |
def balance(self):
"""Returns a tuple of (total amount deposited, total amount
withdrawn)."""
sin = Decimal("0.00")
sout = Decimal("0.00")
for t in self.trans:
if t.amount < Decimal("0.00"):
sout += t.amount
else:
sin += t.amount
return sin, sout | Returns a tuple of (total amount deposited, total amount
withdrawn). | Below is the the instruction that describes the task:
### Input:
Returns a tuple of (total amount deposited, total amount
withdrawn).
### Response:
def balance(self):
"""Returns a tuple of (total amount deposited, total amount
withdrawn)."""
sin = Decimal("0.00")
sout = Decimal("0.00")
for t in self.trans:
if t.amount < Decimal("0.00"):
sout += t.amount
else:
sin += t.amount
return sin, sout |
def write(self, filename):
"""
Write detector to a file - uses HDF5 file format.
Meta-data are stored alongside numpy data arrays. See h5py.org for \
details of the methods.
:type filename: str
:param filename: Filename to save the detector to.
"""
f = h5py.File(filename, "w")
# Must store eqcorrscan version number, username would be useful too.
data_group = f.create_group(name="data")
for i, data in enumerate(self.data):
dset = data_group.create_dataset(name="data_" + str(i),
shape=data.shape,
dtype=data.dtype)
dset[...] = data
data_group.attrs['length'] = len(self.data)
data_group.attrs['name'] = self.name.encode("ascii", "ignore")
data_group.attrs['sampling_rate'] = self.sampling_rate
data_group.attrs['multiplex'] = self.multiplex
data_group.attrs['lowcut'] = self.lowcut
data_group.attrs['highcut'] = self.highcut
data_group.attrs['filt_order'] = self.filt_order
data_group.attrs['dimension'] = self.dimension
data_group.attrs['user'] = getpass.getuser()
data_group.attrs['eqcorrscan_version'] = str(eqcorrscan.__version__)
# Convert station-channel list to something writable
ascii_stachans = ['.'.join(stachan).encode("ascii", "ignore")
for stachan in self.stachans]
stachans = f.create_dataset(name="stachans",
shape=(len(ascii_stachans),),
dtype='S10')
stachans[...] = ascii_stachans
u_group = f.create_group("u")
for i, u in enumerate(self.u):
uset = u_group.create_dataset(name="u_" + str(i),
shape=u.shape, dtype=u.dtype)
uset[...] = u
u_group.attrs['length'] = len(self.u)
sigma_group = f.create_group("sigma")
for i, sigma in enumerate(self.sigma):
sigmaset = sigma_group.create_dataset(name="sigma_" + str(i),
shape=sigma.shape,
dtype=sigma.dtype)
sigmaset[...] = sigma
sigma_group.attrs['length'] = len(self.sigma)
v_group = f.create_group("v")
for i, v in enumerate(self.v):
vset = v_group.create_dataset(name="v_" + str(i),
shape=v.shape, dtype=v.dtype)
vset[...] = v
v_group.attrs['length'] = len(self.v)
f.flush()
f.close()
return self | Write detector to a file - uses HDF5 file format.
Meta-data are stored alongside numpy data arrays. See h5py.org for \
details of the methods.
:type filename: str
:param filename: Filename to save the detector to. | Below is the the instruction that describes the task:
### Input:
Write detector to a file - uses HDF5 file format.
Meta-data are stored alongside numpy data arrays. See h5py.org for \
details of the methods.
:type filename: str
:param filename: Filename to save the detector to.
### Response:
def write(self, filename):
"""
Write detector to a file - uses HDF5 file format.
Meta-data are stored alongside numpy data arrays. See h5py.org for \
details of the methods.
:type filename: str
:param filename: Filename to save the detector to.
"""
f = h5py.File(filename, "w")
# Must store eqcorrscan version number, username would be useful too.
data_group = f.create_group(name="data")
for i, data in enumerate(self.data):
dset = data_group.create_dataset(name="data_" + str(i),
shape=data.shape,
dtype=data.dtype)
dset[...] = data
data_group.attrs['length'] = len(self.data)
data_group.attrs['name'] = self.name.encode("ascii", "ignore")
data_group.attrs['sampling_rate'] = self.sampling_rate
data_group.attrs['multiplex'] = self.multiplex
data_group.attrs['lowcut'] = self.lowcut
data_group.attrs['highcut'] = self.highcut
data_group.attrs['filt_order'] = self.filt_order
data_group.attrs['dimension'] = self.dimension
data_group.attrs['user'] = getpass.getuser()
data_group.attrs['eqcorrscan_version'] = str(eqcorrscan.__version__)
# Convert station-channel list to something writable
ascii_stachans = ['.'.join(stachan).encode("ascii", "ignore")
for stachan in self.stachans]
stachans = f.create_dataset(name="stachans",
shape=(len(ascii_stachans),),
dtype='S10')
stachans[...] = ascii_stachans
u_group = f.create_group("u")
for i, u in enumerate(self.u):
uset = u_group.create_dataset(name="u_" + str(i),
shape=u.shape, dtype=u.dtype)
uset[...] = u
u_group.attrs['length'] = len(self.u)
sigma_group = f.create_group("sigma")
for i, sigma in enumerate(self.sigma):
sigmaset = sigma_group.create_dataset(name="sigma_" + str(i),
shape=sigma.shape,
dtype=sigma.dtype)
sigmaset[...] = sigma
sigma_group.attrs['length'] = len(self.sigma)
v_group = f.create_group("v")
for i, v in enumerate(self.v):
vset = v_group.create_dataset(name="v_" + str(i),
shape=v.shape, dtype=v.dtype)
vset[...] = v
v_group.attrs['length'] = len(self.v)
f.flush()
f.close()
return self |
def __unify_unique_identities(self, uidentities, matcher,
fast_matching, interactive):
"""Unify unique identities looking for similar identities."""
self.total = len(uidentities)
self.matched = 0
if self.recovery and self.recovery_file.exists():
print("Loading matches from recovery file: %s" % self.recovery_file.location())
matched = self.recovery_file.load_matches()
else:
matched = match(uidentities, matcher, fastmode=fast_matching)
# convert the matched identities to a common JSON format to ease resuming operations
matched = self.__marshal_matches(matched)
self.__merge(matched, interactive)
if self.recovery:
self.recovery_file.delete() | Unify unique identities looking for similar identities. | Below is the the instruction that describes the task:
### Input:
Unify unique identities looking for similar identities.
### Response:
def __unify_unique_identities(self, uidentities, matcher,
fast_matching, interactive):
"""Unify unique identities looking for similar identities."""
self.total = len(uidentities)
self.matched = 0
if self.recovery and self.recovery_file.exists():
print("Loading matches from recovery file: %s" % self.recovery_file.location())
matched = self.recovery_file.load_matches()
else:
matched = match(uidentities, matcher, fastmode=fast_matching)
# convert the matched identities to a common JSON format to ease resuming operations
matched = self.__marshal_matches(matched)
self.__merge(matched, interactive)
if self.recovery:
self.recovery_file.delete() |
def _is_descendant_of(self, parent):
"""
Returns True if parent is in the list of ancestors, returns False
otherwise.
:type parent: Task
:param parent: The parent that is searched in the ancestors.
:rtype: bool
:returns: Whether the parent was found.
"""
if self.parent is None:
return False
if self.parent == parent:
return True
return self.parent._is_descendant_of(parent) | Returns True if parent is in the list of ancestors, returns False
otherwise.
:type parent: Task
:param parent: The parent that is searched in the ancestors.
:rtype: bool
:returns: Whether the parent was found. | Below is the the instruction that describes the task:
### Input:
Returns True if parent is in the list of ancestors, returns False
otherwise.
:type parent: Task
:param parent: The parent that is searched in the ancestors.
:rtype: bool
:returns: Whether the parent was found.
### Response:
def _is_descendant_of(self, parent):
"""
Returns True if parent is in the list of ancestors, returns False
otherwise.
:type parent: Task
:param parent: The parent that is searched in the ancestors.
:rtype: bool
:returns: Whether the parent was found.
"""
if self.parent is None:
return False
if self.parent == parent:
return True
return self.parent._is_descendant_of(parent) |
def _printHeadline(self, kind, test, is_failure=True):
"""Output a 1-line error summary to the stream if appropriate.
The line contains the kind of error and the pathname of the test.
:arg kind: The (string) type of incident the precipitated this call
:arg test: The test that precipitated this call
"""
if is_failure or self._options.show_advisories:
with self.bar.dodging():
self.stream.writeln(
'\n' +
(self._term.bold if is_failure else '') +
'%s: %s' % (kind, nose_selector(test)) +
(self._term.normal if is_failure else '')) | Output a 1-line error summary to the stream if appropriate.
The line contains the kind of error and the pathname of the test.
:arg kind: The (string) type of incident the precipitated this call
:arg test: The test that precipitated this call | Below is the the instruction that describes the task:
### Input:
Output a 1-line error summary to the stream if appropriate.
The line contains the kind of error and the pathname of the test.
:arg kind: The (string) type of incident the precipitated this call
:arg test: The test that precipitated this call
### Response:
def _printHeadline(self, kind, test, is_failure=True):
"""Output a 1-line error summary to the stream if appropriate.
The line contains the kind of error and the pathname of the test.
:arg kind: The (string) type of incident the precipitated this call
:arg test: The test that precipitated this call
"""
if is_failure or self._options.show_advisories:
with self.bar.dodging():
self.stream.writeln(
'\n' +
(self._term.bold if is_failure else '') +
'%s: %s' % (kind, nose_selector(test)) +
(self._term.normal if is_failure else '')) |
def detailed_tokens(tokenizer, text):
"""Format Mecab output into a nice data structure, based on Janome."""
node = tokenizer.parseToNode(text)
node = node.next # first node is beginning of sentence and empty, skip it
words = []
while node.posid != 0:
surface = node.surface
base = surface # a default value. Updated if available later.
parts = node.feature.split(",")
pos = ",".join(parts[0:4])
if len(parts) > 7:
# this information is only available for words in the tokenizer
# dictionary
base = parts[7]
words.append(ShortUnitWord(surface, base, pos))
node = node.next
return words | Format Mecab output into a nice data structure, based on Janome. | Below is the the instruction that describes the task:
### Input:
Format Mecab output into a nice data structure, based on Janome.
### Response:
def detailed_tokens(tokenizer, text):
"""Format Mecab output into a nice data structure, based on Janome."""
node = tokenizer.parseToNode(text)
node = node.next # first node is beginning of sentence and empty, skip it
words = []
while node.posid != 0:
surface = node.surface
base = surface # a default value. Updated if available later.
parts = node.feature.split(",")
pos = ",".join(parts[0:4])
if len(parts) > 7:
# this information is only available for words in the tokenizer
# dictionary
base = parts[7]
words.append(ShortUnitWord(surface, base, pos))
node = node.next
return words |
def block_code(self):
inputs = self._get_all_input_values()
outputs = {}
"""
self.f = self.user_function(**inputs)
try:
outputs = self.f.send(inputs)
except StopIteration:
self.terminate()
"""
if self.first_time:
self.f = self.user_function(**inputs)
outputs = self.f.next()
self.first_time = False
else:
try:
outputs = self.f.send(inputs)
except StopIteration:
self.terminate()
if outputs:
for key in outputs.keys():
self.set_output_data(key, outputs[key])
if 'previous_outputs' in self.output_channels.keys():
self.output_channels['previous_outputs'].set_value(Data(self.time, copy.deepcopy(outputs))) | self.f = self.user_function(**inputs)
try:
outputs = self.f.send(inputs)
except StopIteration:
self.terminate() | Below is the the instruction that describes the task:
### Input:
self.f = self.user_function(**inputs)
try:
outputs = self.f.send(inputs)
except StopIteration:
self.terminate()
### Response:
def block_code(self):
inputs = self._get_all_input_values()
outputs = {}
"""
self.f = self.user_function(**inputs)
try:
outputs = self.f.send(inputs)
except StopIteration:
self.terminate()
"""
if self.first_time:
self.f = self.user_function(**inputs)
outputs = self.f.next()
self.first_time = False
else:
try:
outputs = self.f.send(inputs)
except StopIteration:
self.terminate()
if outputs:
for key in outputs.keys():
self.set_output_data(key, outputs[key])
if 'previous_outputs' in self.output_channels.keys():
self.output_channels['previous_outputs'].set_value(Data(self.time, copy.deepcopy(outputs))) |
def _get_label(self):
'''Find the label for the output files
for this calculation
'''
if self._label is None:
foundfiles = False
for f in self._files:
if ".files" in f:
foundfiles = True
self._label = f.split(".")[0]
with open(self._label + '.files', 'r') as fp:
line = fp.readline().split()[0]
if line != self._label + ".in":
fp.close()
raise Exception('first line must be label.in')
line = fp.readline().split()[0]
if line != self._label + ".txt":
fp.close()
raise Exception('second line must be label.txt')
line = fp.readline().split()[0]
if line != self._label + "i":
fp.close()
raise Exception('third line must be labeli')
line = fp.readline().split()[0]
if line != self._label + "o":
fp.close()
raise Exception('fourth line must be labelo')
fp.close()
if foundfiles:
return self._label
else:
raise Exception('label.files not found')
#ASE format
# (self.prefix + '.in') # input
# (self.prefix + '.txt')# output
# (self.prefix + 'i') # input
# (self.prefix + 'o') # output
else:
return self._label | Find the label for the output files
for this calculation | Below is the the instruction that describes the task:
### Input:
Find the label for the output files
for this calculation
### Response:
def _get_label(self):
'''Find the label for the output files
for this calculation
'''
if self._label is None:
foundfiles = False
for f in self._files:
if ".files" in f:
foundfiles = True
self._label = f.split(".")[0]
with open(self._label + '.files', 'r') as fp:
line = fp.readline().split()[0]
if line != self._label + ".in":
fp.close()
raise Exception('first line must be label.in')
line = fp.readline().split()[0]
if line != self._label + ".txt":
fp.close()
raise Exception('second line must be label.txt')
line = fp.readline().split()[0]
if line != self._label + "i":
fp.close()
raise Exception('third line must be labeli')
line = fp.readline().split()[0]
if line != self._label + "o":
fp.close()
raise Exception('fourth line must be labelo')
fp.close()
if foundfiles:
return self._label
else:
raise Exception('label.files not found')
#ASE format
# (self.prefix + '.in') # input
# (self.prefix + '.txt')# output
# (self.prefix + 'i') # input
# (self.prefix + 'o') # output
else:
return self._label |
def show_term_protect(name=None, instance_id=None, call=None, quiet=False):
'''
Show the details from EC2 concerning an instance's termination protection state
'''
if call != 'action':
raise SaltCloudSystemExit(
'The show_term_protect action must be called with -a or --action.'
)
if not instance_id:
instance_id = _get_node(name)['instanceId']
params = {'Action': 'DescribeInstanceAttribute',
'InstanceId': instance_id,
'Attribute': 'disableApiTermination'}
result = aws.query(params,
location=get_location(),
provider=get_provider(),
return_root=True,
opts=__opts__,
sigver='4')
disable_protect = False
for item in result:
if 'value' in item:
disable_protect = item['value']
break
log.log(
logging.DEBUG if quiet is True else logging.INFO,
'Termination Protection is %s for %s',
disable_protect == 'true' and 'enabled' or 'disabled', name
)
return disable_protect | Show the details from EC2 concerning an instance's termination protection state | Below is the the instruction that describes the task:
### Input:
Show the details from EC2 concerning an instance's termination protection state
### Response:
def show_term_protect(name=None, instance_id=None, call=None, quiet=False):
'''
Show the details from EC2 concerning an instance's termination protection state
'''
if call != 'action':
raise SaltCloudSystemExit(
'The show_term_protect action must be called with -a or --action.'
)
if not instance_id:
instance_id = _get_node(name)['instanceId']
params = {'Action': 'DescribeInstanceAttribute',
'InstanceId': instance_id,
'Attribute': 'disableApiTermination'}
result = aws.query(params,
location=get_location(),
provider=get_provider(),
return_root=True,
opts=__opts__,
sigver='4')
disable_protect = False
for item in result:
if 'value' in item:
disable_protect = item['value']
break
log.log(
logging.DEBUG if quiet is True else logging.INFO,
'Termination Protection is %s for %s',
disable_protect == 'true' and 'enabled' or 'disabled', name
)
return disable_protect |
def discover(timeout=1, retries=1):
"""Discover Raumfeld devices in the network
:param timeout: The timeout in seconds
:param retries: How often the search should be retried
:returns: A list of raumfeld devices, sorted by name
"""
locations = []
group = ('239.255.255.250', 1900)
service = 'ssdp:urn:schemas-upnp-org:device:MediaRenderer:1' # 'ssdp:all'
message = '\r\n'.join(['M-SEARCH * HTTP/1.1',
'HOST: {group[0]}:{group[1]}',
'MAN: "ssdp:discover"',
'ST: {st}',
'MX: 1', '', '']).format(group=group, st=service)
socket.setdefaulttimeout(timeout)
for _ in range(retries):
sock = socket.socket(socket.AF_INET,
socket.SOCK_DGRAM,
socket.IPPROTO_UDP)
# socket options
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, 2)
# send group multicast
sock.sendto(message.encode('utf-8'), group)
while True:
try:
response = sock.recv(2048).decode('utf-8')
for line in response.split('\r\n'):
if line.startswith('Location: '):
location = line.split(' ')[1].strip()
if not location in locations:
locations.append(location)
except socket.timeout:
break
devices = [RaumfeldDevice(location) for location in locations]
# only return 'Virtual Media Player' and sort the list
return sorted([device for device in devices
if device.model_description == 'Virtual Media Player'],
key=lambda device: device.friendly_name) | Discover Raumfeld devices in the network
:param timeout: The timeout in seconds
:param retries: How often the search should be retried
:returns: A list of raumfeld devices, sorted by name | Below is the the instruction that describes the task:
### Input:
Discover Raumfeld devices in the network
:param timeout: The timeout in seconds
:param retries: How often the search should be retried
:returns: A list of raumfeld devices, sorted by name
### Response:
def discover(timeout=1, retries=1):
"""Discover Raumfeld devices in the network
:param timeout: The timeout in seconds
:param retries: How often the search should be retried
:returns: A list of raumfeld devices, sorted by name
"""
locations = []
group = ('239.255.255.250', 1900)
service = 'ssdp:urn:schemas-upnp-org:device:MediaRenderer:1' # 'ssdp:all'
message = '\r\n'.join(['M-SEARCH * HTTP/1.1',
'HOST: {group[0]}:{group[1]}',
'MAN: "ssdp:discover"',
'ST: {st}',
'MX: 1', '', '']).format(group=group, st=service)
socket.setdefaulttimeout(timeout)
for _ in range(retries):
sock = socket.socket(socket.AF_INET,
socket.SOCK_DGRAM,
socket.IPPROTO_UDP)
# socket options
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, 2)
# send group multicast
sock.sendto(message.encode('utf-8'), group)
while True:
try:
response = sock.recv(2048).decode('utf-8')
for line in response.split('\r\n'):
if line.startswith('Location: '):
location = line.split(' ')[1].strip()
if not location in locations:
locations.append(location)
except socket.timeout:
break
devices = [RaumfeldDevice(location) for location in locations]
# only return 'Virtual Media Player' and sort the list
return sorted([device for device in devices
if device.model_description == 'Virtual Media Player'],
key=lambda device: device.friendly_name) |
def unzip_file_to_dir(path_to_zip, output_directory):
"""
Extract a ZIP archive to a directory
"""
z = ZipFile(path_to_zip, 'r')
z.extractall(output_directory)
z.close() | Extract a ZIP archive to a directory | Below is the the instruction that describes the task:
### Input:
Extract a ZIP archive to a directory
### Response:
def unzip_file_to_dir(path_to_zip, output_directory):
"""
Extract a ZIP archive to a directory
"""
z = ZipFile(path_to_zip, 'r')
z.extractall(output_directory)
z.close() |
def get_roles(client):
"""Returns a list of all the roles for an account. Returns a list containing all the roles for the account.
Args:
client (:obj:`boto3.session.Session`): A boto3 Session object
Returns:
:obj:`list` of `dict`
"""
done = False
marker = None
roles = []
while not done:
if marker:
response = client.list_roles(Marker=marker)
else:
response = client.list_roles()
roles += response['Roles']
if response['IsTruncated']:
marker = response['Marker']
else:
done = True
return roles | Returns a list of all the roles for an account. Returns a list containing all the roles for the account.
Args:
client (:obj:`boto3.session.Session`): A boto3 Session object
Returns:
:obj:`list` of `dict` | Below is the the instruction that describes the task:
### Input:
Returns a list of all the roles for an account. Returns a list containing all the roles for the account.
Args:
client (:obj:`boto3.session.Session`): A boto3 Session object
Returns:
:obj:`list` of `dict`
### Response:
def get_roles(client):
"""Returns a list of all the roles for an account. Returns a list containing all the roles for the account.
Args:
client (:obj:`boto3.session.Session`): A boto3 Session object
Returns:
:obj:`list` of `dict`
"""
done = False
marker = None
roles = []
while not done:
if marker:
response = client.list_roles(Marker=marker)
else:
response = client.list_roles()
roles += response['Roles']
if response['IsTruncated']:
marker = response['Marker']
else:
done = True
return roles |
def _read_range(self, start, end=0):
"""
Read a range of bytes in stream.
Args:
start (int): Start stream position.
end (int): End stream position.
0 To not specify end.
Returns:
bytes: number of bytes read
"""
stream = _BytesIO()
try:
with _handle_azure_exception():
self._get_to_stream(
stream=stream, start_range=start,
end_range=(end - 1) if end else None, **self._client_kwargs)
# Check for end of file
except _AzureHttpError as exception:
if exception.status_code == 416:
# EOF
return bytes()
raise
return stream.getvalue() | Read a range of bytes in stream.
Args:
start (int): Start stream position.
end (int): End stream position.
0 To not specify end.
Returns:
bytes: number of bytes read | Below is the the instruction that describes the task:
### Input:
Read a range of bytes in stream.
Args:
start (int): Start stream position.
end (int): End stream position.
0 To not specify end.
Returns:
bytes: number of bytes read
### Response:
def _read_range(self, start, end=0):
"""
Read a range of bytes in stream.
Args:
start (int): Start stream position.
end (int): End stream position.
0 To not specify end.
Returns:
bytes: number of bytes read
"""
stream = _BytesIO()
try:
with _handle_azure_exception():
self._get_to_stream(
stream=stream, start_range=start,
end_range=(end - 1) if end else None, **self._client_kwargs)
# Check for end of file
except _AzureHttpError as exception:
if exception.status_code == 416:
# EOF
return bytes()
raise
return stream.getvalue() |
def download(self, path=None, file_obj=None, progressbar=False):
"""Downloads files from One Codex.
Parameters
----------
path : `string`, optional
Full path to save the file to. If omitted, defaults to the original filename
in the current working directory.
file_obj : file-like object, optional
Rather than save the file to a path, write it to this file-like object.
progressbar : `bool`
Display a progress bar using Click for the download?
Returns
-------
`string`
The path the file was downloaded to, if applicable. Otherwise, None.
Notes
-----
If no arguments specified, defaults to download the file as the original filename
in the current working directory. If `file_obj` given, will write data into the
passed file-like object. If `path` given, will download the file to the path provided,
but will not overwrite any existing files.
"""
if path and file_obj:
raise OneCodexException("Please specify only one of: path, file_obj")
if path is None and file_obj is None:
path = os.path.join(os.getcwd(), self.filename)
if path and os.path.exists(path):
raise OneCodexException("{} already exists! Will not overwrite.".format(path))
try:
url_data = self._resource.download_uri()
resp = requests.get(url_data["download_uri"], stream=True)
with (open(path, "wb") if path else file_obj) as f_out:
if progressbar:
with click.progressbar(length=self.size, label=self.filename) as bar:
for data in resp.iter_content(chunk_size=1024):
bar.update(len(data))
f_out.write(data)
else:
for data in resp.iter_content(chunk_size=1024):
f_out.write(data)
except KeyboardInterrupt:
if path:
os.remove(path)
raise
except requests.exceptions.HTTPError as exc:
if exc.response.status_code == 401:
raise OneCodexException("You must be logged in to download files.")
elif exc.response.status_code == 402:
raise OneCodexException(
"You must either have a premium platform account or be in "
"a notebook environment to download files."
)
elif exc.response.status_code == 403:
raise OneCodexException("You are not authorized to download this file.")
else:
raise OneCodexException(
"Download failed with an HTTP status code {}.".format(exc.response.status_code)
)
return path | Downloads files from One Codex.
Parameters
----------
path : `string`, optional
Full path to save the file to. If omitted, defaults to the original filename
in the current working directory.
file_obj : file-like object, optional
Rather than save the file to a path, write it to this file-like object.
progressbar : `bool`
Display a progress bar using Click for the download?
Returns
-------
`string`
The path the file was downloaded to, if applicable. Otherwise, None.
Notes
-----
If no arguments specified, defaults to download the file as the original filename
in the current working directory. If `file_obj` given, will write data into the
passed file-like object. If `path` given, will download the file to the path provided,
but will not overwrite any existing files. | Below is the the instruction that describes the task:
### Input:
Downloads files from One Codex.
Parameters
----------
path : `string`, optional
Full path to save the file to. If omitted, defaults to the original filename
in the current working directory.
file_obj : file-like object, optional
Rather than save the file to a path, write it to this file-like object.
progressbar : `bool`
Display a progress bar using Click for the download?
Returns
-------
`string`
The path the file was downloaded to, if applicable. Otherwise, None.
Notes
-----
If no arguments specified, defaults to download the file as the original filename
in the current working directory. If `file_obj` given, will write data into the
passed file-like object. If `path` given, will download the file to the path provided,
but will not overwrite any existing files.
### Response:
def download(self, path=None, file_obj=None, progressbar=False):
"""Downloads files from One Codex.
Parameters
----------
path : `string`, optional
Full path to save the file to. If omitted, defaults to the original filename
in the current working directory.
file_obj : file-like object, optional
Rather than save the file to a path, write it to this file-like object.
progressbar : `bool`
Display a progress bar using Click for the download?
Returns
-------
`string`
The path the file was downloaded to, if applicable. Otherwise, None.
Notes
-----
If no arguments specified, defaults to download the file as the original filename
in the current working directory. If `file_obj` given, will write data into the
passed file-like object. If `path` given, will download the file to the path provided,
but will not overwrite any existing files.
"""
if path and file_obj:
raise OneCodexException("Please specify only one of: path, file_obj")
if path is None and file_obj is None:
path = os.path.join(os.getcwd(), self.filename)
if path and os.path.exists(path):
raise OneCodexException("{} already exists! Will not overwrite.".format(path))
try:
url_data = self._resource.download_uri()
resp = requests.get(url_data["download_uri"], stream=True)
with (open(path, "wb") if path else file_obj) as f_out:
if progressbar:
with click.progressbar(length=self.size, label=self.filename) as bar:
for data in resp.iter_content(chunk_size=1024):
bar.update(len(data))
f_out.write(data)
else:
for data in resp.iter_content(chunk_size=1024):
f_out.write(data)
except KeyboardInterrupt:
if path:
os.remove(path)
raise
except requests.exceptions.HTTPError as exc:
if exc.response.status_code == 401:
raise OneCodexException("You must be logged in to download files.")
elif exc.response.status_code == 402:
raise OneCodexException(
"You must either have a premium platform account or be in "
"a notebook environment to download files."
)
elif exc.response.status_code == 403:
raise OneCodexException("You are not authorized to download this file.")
else:
raise OneCodexException(
"Download failed with an HTTP status code {}.".format(exc.response.status_code)
)
return path |
def configure(self, options=None, attribute_options=None): # pylint: disable=W0221
"""
Configures the options and attribute options of the mapping associated
with this representer with the given dictionaries.
:param dict options: configuration options for the mapping associated
with this representer.
:param dict attribute_options: attribute options for the mapping
associated with this representer.
"""
self._mapping.update(options=options,
attribute_options=attribute_options) | Configures the options and attribute options of the mapping associated
with this representer with the given dictionaries.
:param dict options: configuration options for the mapping associated
with this representer.
:param dict attribute_options: attribute options for the mapping
associated with this representer. | Below is the the instruction that describes the task:
### Input:
Configures the options and attribute options of the mapping associated
with this representer with the given dictionaries.
:param dict options: configuration options for the mapping associated
with this representer.
:param dict attribute_options: attribute options for the mapping
associated with this representer.
### Response:
def configure(self, options=None, attribute_options=None): # pylint: disable=W0221
"""
Configures the options and attribute options of the mapping associated
with this representer with the given dictionaries.
:param dict options: configuration options for the mapping associated
with this representer.
:param dict attribute_options: attribute options for the mapping
associated with this representer.
"""
self._mapping.update(options=options,
attribute_options=attribute_options) |
def scanString( self, instring, maxMatches=_MAX_INT, overlap=False ):
"""
Scan the input string for expression matches. Each match will return the
matching tokens, start location, and end location. May be called with optional
C{maxMatches} argument, to clip scanning after 'n' matches are found. If
C{overlap} is specified, then overlapping matches will be reported.
Note that the start and end locations are reported relative to the string
being parsed. See L{I{parseString}<parseString>} for more information on parsing
strings with embedded tabs.
Example::
source = "sldjf123lsdjjkf345sldkjf879lkjsfd987"
print(source)
for tokens,start,end in Word(alphas).scanString(source):
print(' '*start + '^'*(end-start))
print(' '*start + tokens[0])
prints::
sldjf123lsdjjkf345sldkjf879lkjsfd987
^^^^^
sldjf
^^^^^^^
lsdjjkf
^^^^^^
sldkjf
^^^^^^
lkjsfd
"""
if not self.streamlined:
self.streamline()
for e in self.ignoreExprs:
e.streamline()
if not self.keepTabs:
instring = _ustr(instring).expandtabs()
instrlen = len(instring)
loc = 0
preparseFn = self.preParse
parseFn = self._parse
ParserElement.resetCache()
matches = 0
try:
while loc <= instrlen and matches < maxMatches:
try:
preloc = preparseFn( instring, loc )
nextLoc,tokens = parseFn( instring, preloc, callPreParse=False )
except ParseException:
loc = preloc+1
else:
if nextLoc > loc:
matches += 1
yield tokens, preloc, nextLoc
if overlap:
nextloc = preparseFn( instring, loc )
if nextloc > loc:
loc = nextLoc
else:
loc += 1
else:
loc = nextLoc
else:
loc = preloc+1
except ParseBaseException as exc:
if ParserElement.verbose_stacktrace:
raise
else:
# catch and re-raise exception from here, clears out pyparsing internal stack trace
raise exc | Scan the input string for expression matches. Each match will return the
matching tokens, start location, and end location. May be called with optional
C{maxMatches} argument, to clip scanning after 'n' matches are found. If
C{overlap} is specified, then overlapping matches will be reported.
Note that the start and end locations are reported relative to the string
being parsed. See L{I{parseString}<parseString>} for more information on parsing
strings with embedded tabs.
Example::
source = "sldjf123lsdjjkf345sldkjf879lkjsfd987"
print(source)
for tokens,start,end in Word(alphas).scanString(source):
print(' '*start + '^'*(end-start))
print(' '*start + tokens[0])
prints::
sldjf123lsdjjkf345sldkjf879lkjsfd987
^^^^^
sldjf
^^^^^^^
lsdjjkf
^^^^^^
sldkjf
^^^^^^
lkjsfd | Below is the the instruction that describes the task:
### Input:
Scan the input string for expression matches. Each match will return the
matching tokens, start location, and end location. May be called with optional
C{maxMatches} argument, to clip scanning after 'n' matches are found. If
C{overlap} is specified, then overlapping matches will be reported.
Note that the start and end locations are reported relative to the string
being parsed. See L{I{parseString}<parseString>} for more information on parsing
strings with embedded tabs.
Example::
source = "sldjf123lsdjjkf345sldkjf879lkjsfd987"
print(source)
for tokens,start,end in Word(alphas).scanString(source):
print(' '*start + '^'*(end-start))
print(' '*start + tokens[0])
prints::
sldjf123lsdjjkf345sldkjf879lkjsfd987
^^^^^
sldjf
^^^^^^^
lsdjjkf
^^^^^^
sldkjf
^^^^^^
lkjsfd
### Response:
def scanString( self, instring, maxMatches=_MAX_INT, overlap=False ):
"""
Scan the input string for expression matches. Each match will return the
matching tokens, start location, and end location. May be called with optional
C{maxMatches} argument, to clip scanning after 'n' matches are found. If
C{overlap} is specified, then overlapping matches will be reported.
Note that the start and end locations are reported relative to the string
being parsed. See L{I{parseString}<parseString>} for more information on parsing
strings with embedded tabs.
Example::
source = "sldjf123lsdjjkf345sldkjf879lkjsfd987"
print(source)
for tokens,start,end in Word(alphas).scanString(source):
print(' '*start + '^'*(end-start))
print(' '*start + tokens[0])
prints::
sldjf123lsdjjkf345sldkjf879lkjsfd987
^^^^^
sldjf
^^^^^^^
lsdjjkf
^^^^^^
sldkjf
^^^^^^
lkjsfd
"""
if not self.streamlined:
self.streamline()
for e in self.ignoreExprs:
e.streamline()
if not self.keepTabs:
instring = _ustr(instring).expandtabs()
instrlen = len(instring)
loc = 0
preparseFn = self.preParse
parseFn = self._parse
ParserElement.resetCache()
matches = 0
try:
while loc <= instrlen and matches < maxMatches:
try:
preloc = preparseFn( instring, loc )
nextLoc,tokens = parseFn( instring, preloc, callPreParse=False )
except ParseException:
loc = preloc+1
else:
if nextLoc > loc:
matches += 1
yield tokens, preloc, nextLoc
if overlap:
nextloc = preparseFn( instring, loc )
if nextloc > loc:
loc = nextLoc
else:
loc += 1
else:
loc = nextLoc
else:
loc = preloc+1
except ParseBaseException as exc:
if ParserElement.verbose_stacktrace:
raise
else:
# catch and re-raise exception from here, clears out pyparsing internal stack trace
raise exc |
def parse_clubs(self, clubs_page):
"""Parses the DOM and returns user clubs attributes.
:type clubs_page: :class:`bs4.BeautifulSoup`
:param clubs_page: MAL user clubs page's DOM
:rtype: dict
:return: User clubs attributes.
"""
user_info = self.parse_sidebar(clubs_page)
second_col = clubs_page.find(u'div', {u'id': u'content'}).find(u'table').find(u'tr').find_all(u'td', recursive=False)[1]
try:
user_info[u'clubs'] = []
club_list = second_col.find(u'ol')
if club_list:
clubs = club_list.find_all(u'li')
for row in clubs:
club_link = row.find(u'a')
link_parts = club_link.get(u'href').split(u'?cid=')
# of the form /clubs.php?cid=10178
user_info[u'clubs'].append(self.session.club(int(link_parts[1])).set({u'name': club_link.text}))
except:
if not self.session.suppress_parse_exceptions:
raise
return user_info | Parses the DOM and returns user clubs attributes.
:type clubs_page: :class:`bs4.BeautifulSoup`
:param clubs_page: MAL user clubs page's DOM
:rtype: dict
:return: User clubs attributes. | Below is the the instruction that describes the task:
### Input:
Parses the DOM and returns user clubs attributes.
:type clubs_page: :class:`bs4.BeautifulSoup`
:param clubs_page: MAL user clubs page's DOM
:rtype: dict
:return: User clubs attributes.
### Response:
def parse_clubs(self, clubs_page):
"""Parses the DOM and returns user clubs attributes.
:type clubs_page: :class:`bs4.BeautifulSoup`
:param clubs_page: MAL user clubs page's DOM
:rtype: dict
:return: User clubs attributes.
"""
user_info = self.parse_sidebar(clubs_page)
second_col = clubs_page.find(u'div', {u'id': u'content'}).find(u'table').find(u'tr').find_all(u'td', recursive=False)[1]
try:
user_info[u'clubs'] = []
club_list = second_col.find(u'ol')
if club_list:
clubs = club_list.find_all(u'li')
for row in clubs:
club_link = row.find(u'a')
link_parts = club_link.get(u'href').split(u'?cid=')
# of the form /clubs.php?cid=10178
user_info[u'clubs'].append(self.session.club(int(link_parts[1])).set({u'name': club_link.text}))
except:
if not self.session.suppress_parse_exceptions:
raise
return user_info |
def visitExactRange(self, ctx: ShExDocParser.ExactRangeContext):
""" repeatRange: '{' INTEGER '}' #exactRange """
self.expression.min = int(ctx.INTEGER().getText())
self.expression.max = self.expression.min | repeatRange: '{' INTEGER '}' #exactRange | Below is the the instruction that describes the task:
### Input:
repeatRange: '{' INTEGER '}' #exactRange
### Response:
def visitExactRange(self, ctx: ShExDocParser.ExactRangeContext):
""" repeatRange: '{' INTEGER '}' #exactRange """
self.expression.min = int(ctx.INTEGER().getText())
self.expression.max = self.expression.min |
def _disable_socket_timeout(self, socket):
""" Depending on the combination of python version and whether we're
connecting over http or https, we might need to access _sock, which
may or may not exist; or we may need to just settimeout on socket
itself, which also may or may not have settimeout on it. To avoid
missing the correct one, we try both.
We also do not want to set the timeout if it is already disabled, as
you run the risk of changing a socket that was non-blocking to
blocking, for example when using gevent.
"""
sockets = [socket, getattr(socket, '_sock', None)]
for s in sockets:
if not hasattr(s, 'settimeout'):
continue
timeout = -1
if hasattr(s, 'gettimeout'):
timeout = s.gettimeout()
# Don't change the timeout if it is already disabled.
if timeout is None or timeout == 0.0:
continue
s.settimeout(None) | Depending on the combination of python version and whether we're
connecting over http or https, we might need to access _sock, which
may or may not exist; or we may need to just settimeout on socket
itself, which also may or may not have settimeout on it. To avoid
missing the correct one, we try both.
We also do not want to set the timeout if it is already disabled, as
you run the risk of changing a socket that was non-blocking to
blocking, for example when using gevent. | Below is the the instruction that describes the task:
### Input:
Depending on the combination of python version and whether we're
connecting over http or https, we might need to access _sock, which
may or may not exist; or we may need to just settimeout on socket
itself, which also may or may not have settimeout on it. To avoid
missing the correct one, we try both.
We also do not want to set the timeout if it is already disabled, as
you run the risk of changing a socket that was non-blocking to
blocking, for example when using gevent.
### Response:
def _disable_socket_timeout(self, socket):
""" Depending on the combination of python version and whether we're
connecting over http or https, we might need to access _sock, which
may or may not exist; or we may need to just settimeout on socket
itself, which also may or may not have settimeout on it. To avoid
missing the correct one, we try both.
We also do not want to set the timeout if it is already disabled, as
you run the risk of changing a socket that was non-blocking to
blocking, for example when using gevent.
"""
sockets = [socket, getattr(socket, '_sock', None)]
for s in sockets:
if not hasattr(s, 'settimeout'):
continue
timeout = -1
if hasattr(s, 'gettimeout'):
timeout = s.gettimeout()
# Don't change the timeout if it is already disabled.
if timeout is None or timeout == 0.0:
continue
s.settimeout(None) |
def _validate_classpath_tuples(self, classpath, target):
"""Validates that all files are located within the working directory, to simplify relativization.
:param classpath: The list of classpath tuples. Each tuple is a 2-tuple of ivy_conf and
ClasspathEntry.
:param target: The target that the classpath tuple is being registered for.
:raises: `TaskError` when the path is outside the work directory
"""
for classpath_tuple in classpath:
conf, classpath_entry = classpath_tuple
path = classpath_entry.path
if os.path.relpath(path, self._pants_workdir).startswith(os.pardir):
raise TaskError(
'Classpath entry {} for target {} is located outside the working directory "{}".'
.format(path, target.address.spec, self._pants_workdir)) | Validates that all files are located within the working directory, to simplify relativization.
:param classpath: The list of classpath tuples. Each tuple is a 2-tuple of ivy_conf and
ClasspathEntry.
:param target: The target that the classpath tuple is being registered for.
:raises: `TaskError` when the path is outside the work directory | Below is the the instruction that describes the task:
### Input:
Validates that all files are located within the working directory, to simplify relativization.
:param classpath: The list of classpath tuples. Each tuple is a 2-tuple of ivy_conf and
ClasspathEntry.
:param target: The target that the classpath tuple is being registered for.
:raises: `TaskError` when the path is outside the work directory
### Response:
def _validate_classpath_tuples(self, classpath, target):
"""Validates that all files are located within the working directory, to simplify relativization.
:param classpath: The list of classpath tuples. Each tuple is a 2-tuple of ivy_conf and
ClasspathEntry.
:param target: The target that the classpath tuple is being registered for.
:raises: `TaskError` when the path is outside the work directory
"""
for classpath_tuple in classpath:
conf, classpath_entry = classpath_tuple
path = classpath_entry.path
if os.path.relpath(path, self._pants_workdir).startswith(os.pardir):
raise TaskError(
'Classpath entry {} for target {} is located outside the working directory "{}".'
.format(path, target.address.spec, self._pants_workdir)) |
def set_elevation(self, elevation_grid_path, mask_shapefile):
"""
Adds elevation file to project
"""
# ADD ELEVATION FILE
ele_file = ElevationGridFile(project_file=self.project_manager,
session=self.db_session)
ele_file.generateFromRaster(elevation_grid_path,
mask_shapefile,
load_raster_to_db=self.load_rasters_to_db) | Adds elevation file to project | Below is the the instruction that describes the task:
### Input:
Adds elevation file to project
### Response:
def set_elevation(self, elevation_grid_path, mask_shapefile):
"""
Adds elevation file to project
"""
# ADD ELEVATION FILE
ele_file = ElevationGridFile(project_file=self.project_manager,
session=self.db_session)
ele_file.generateFromRaster(elevation_grid_path,
mask_shapefile,
load_raster_to_db=self.load_rasters_to_db) |
def _get_preprocessed(self, data):
"""
Returns:
(DeveloperPackage, new_data) 2-tuple IFF the preprocess function
changed the package; otherwise None.
"""
from rez.serialise import process_python_objects
from rez.utils.data_utils import get_dict_diff_str
from copy import deepcopy
with add_sys_paths(config.package_definition_build_python_paths):
preprocess_func = getattr(self, "preprocess", None)
if preprocess_func:
print_info("Applying preprocess from package.py")
else:
# load globally configured preprocess function
dotted = self.config.package_preprocess_function
if not dotted:
return None
if '.' not in dotted:
print_error(
"Setting 'package_preprocess_function' must be of "
"form 'module[.module.module...].funcname'. Package "
"preprocessing has not been applied.")
return None
name, funcname = dotted.rsplit('.', 1)
try:
module = __import__(name=name, fromlist=[funcname])
except Exception as e:
print_error("Failed to load preprocessing function '%s': %s"
% (dotted, str(e)))
return None
setattr(module, "InvalidPackageError", InvalidPackageError)
preprocess_func = getattr(module, funcname)
if not preprocess_func or not isfunction(isfunction):
print_error("Function '%s' not found" % dotted)
return None
print_info("Applying preprocess function %s" % dotted)
preprocessed_data = deepcopy(data)
# apply preprocessing
try:
preprocess_func(this=self, data=preprocessed_data)
except InvalidPackageError:
raise
except Exception as e:
print_error("Failed to apply preprocess: %s: %s"
% (e.__class__.__name__, str(e)))
return None
# if preprocess added functions, these may need to be converted to
# SourceCode instances
preprocessed_data = process_python_objects(preprocessed_data)
if preprocessed_data == data:
return None
# recreate package from modified package data
package = create_package(self.name, preprocessed_data,
package_cls=self.__class__)
# print summary of changed package attributes
txt = get_dict_diff_str(
data,
preprocessed_data,
title="Package attributes were changed in preprocessing:"
)
print_info(txt)
return package, preprocessed_data | Returns:
(DeveloperPackage, new_data) 2-tuple IFF the preprocess function
changed the package; otherwise None. | Below is the the instruction that describes the task:
### Input:
Returns:
(DeveloperPackage, new_data) 2-tuple IFF the preprocess function
changed the package; otherwise None.
### Response:
def _get_preprocessed(self, data):
"""
Returns:
(DeveloperPackage, new_data) 2-tuple IFF the preprocess function
changed the package; otherwise None.
"""
from rez.serialise import process_python_objects
from rez.utils.data_utils import get_dict_diff_str
from copy import deepcopy
with add_sys_paths(config.package_definition_build_python_paths):
preprocess_func = getattr(self, "preprocess", None)
if preprocess_func:
print_info("Applying preprocess from package.py")
else:
# load globally configured preprocess function
dotted = self.config.package_preprocess_function
if not dotted:
return None
if '.' not in dotted:
print_error(
"Setting 'package_preprocess_function' must be of "
"form 'module[.module.module...].funcname'. Package "
"preprocessing has not been applied.")
return None
name, funcname = dotted.rsplit('.', 1)
try:
module = __import__(name=name, fromlist=[funcname])
except Exception as e:
print_error("Failed to load preprocessing function '%s': %s"
% (dotted, str(e)))
return None
setattr(module, "InvalidPackageError", InvalidPackageError)
preprocess_func = getattr(module, funcname)
if not preprocess_func or not isfunction(isfunction):
print_error("Function '%s' not found" % dotted)
return None
print_info("Applying preprocess function %s" % dotted)
preprocessed_data = deepcopy(data)
# apply preprocessing
try:
preprocess_func(this=self, data=preprocessed_data)
except InvalidPackageError:
raise
except Exception as e:
print_error("Failed to apply preprocess: %s: %s"
% (e.__class__.__name__, str(e)))
return None
# if preprocess added functions, these may need to be converted to
# SourceCode instances
preprocessed_data = process_python_objects(preprocessed_data)
if preprocessed_data == data:
return None
# recreate package from modified package data
package = create_package(self.name, preprocessed_data,
package_cls=self.__class__)
# print summary of changed package attributes
txt = get_dict_diff_str(
data,
preprocessed_data,
title="Package attributes were changed in preprocessing:"
)
print_info(txt)
return package, preprocessed_data |
def refitPrefixes(self):
"""
Refit namespace qualification by replacing prefixes
with explicit namespaces. Also purges prefix mapping table.
@return: self
@rtype: L{Element}
"""
for c in self.children:
c.refitPrefixes()
if self.prefix is not None:
ns = self.resolvePrefix(self.prefix)
if ns[1] is not None:
self.expns = ns[1]
self.prefix = None
self.nsprefixes = {}
return self | Refit namespace qualification by replacing prefixes
with explicit namespaces. Also purges prefix mapping table.
@return: self
@rtype: L{Element} | Below is the the instruction that describes the task:
### Input:
Refit namespace qualification by replacing prefixes
with explicit namespaces. Also purges prefix mapping table.
@return: self
@rtype: L{Element}
### Response:
def refitPrefixes(self):
"""
Refit namespace qualification by replacing prefixes
with explicit namespaces. Also purges prefix mapping table.
@return: self
@rtype: L{Element}
"""
for c in self.children:
c.refitPrefixes()
if self.prefix is not None:
ns = self.resolvePrefix(self.prefix)
if ns[1] is not None:
self.expns = ns[1]
self.prefix = None
self.nsprefixes = {}
return self |
def is_closed_chunk(self, chk):
"""Check the chunk is free or not"""
cs = self.get_chunk_status(chk)
if cs & 0x2 != 0:
return True
return False | Check the chunk is free or not | Below is the the instruction that describes the task:
### Input:
Check the chunk is free or not
### Response:
def is_closed_chunk(self, chk):
"""Check the chunk is free or not"""
cs = self.get_chunk_status(chk)
if cs & 0x2 != 0:
return True
return False |
def resizeColumnsToContents(self):
"""Resize the columns to its contents."""
self._autosized_cols = set()
self._resizeColumnsToContents(self.table_level,
self.table_index, self._max_autosize_ms)
self._update_layout() | Resize the columns to its contents. | Below is the the instruction that describes the task:
### Input:
Resize the columns to its contents.
### Response:
def resizeColumnsToContents(self):
"""Resize the columns to its contents."""
self._autosized_cols = set()
self._resizeColumnsToContents(self.table_level,
self.table_index, self._max_autosize_ms)
self._update_layout() |
def get_article(doi, output_format='txt'):
"""Get the full body of an article from Elsevier.
Parameters
----------
doi : str
The doi for the desired article.
output_format : 'txt' or 'xml'
The desired format for the output. Selecting 'txt' (default) strips all
xml tags and joins the pieces of text in the main text, while 'xml'
simply takes the tag containing the body of the article and returns it
as is . In the latter case, downstream code needs to be able to
interpret Elsever's XML format.
Returns
-------
content : str
Either text content or xml, as described above, for the given doi.
"""
xml_string = download_article(doi)
if output_format == 'txt' and xml_string is not None:
text = extract_text(xml_string)
return text
return xml_string | Get the full body of an article from Elsevier.
Parameters
----------
doi : str
The doi for the desired article.
output_format : 'txt' or 'xml'
The desired format for the output. Selecting 'txt' (default) strips all
xml tags and joins the pieces of text in the main text, while 'xml'
simply takes the tag containing the body of the article and returns it
as is . In the latter case, downstream code needs to be able to
interpret Elsever's XML format.
Returns
-------
content : str
Either text content or xml, as described above, for the given doi. | Below is the the instruction that describes the task:
### Input:
Get the full body of an article from Elsevier.
Parameters
----------
doi : str
The doi for the desired article.
output_format : 'txt' or 'xml'
The desired format for the output. Selecting 'txt' (default) strips all
xml tags and joins the pieces of text in the main text, while 'xml'
simply takes the tag containing the body of the article and returns it
as is . In the latter case, downstream code needs to be able to
interpret Elsever's XML format.
Returns
-------
content : str
Either text content or xml, as described above, for the given doi.
### Response:
def get_article(doi, output_format='txt'):
"""Get the full body of an article from Elsevier.
Parameters
----------
doi : str
The doi for the desired article.
output_format : 'txt' or 'xml'
The desired format for the output. Selecting 'txt' (default) strips all
xml tags and joins the pieces of text in the main text, while 'xml'
simply takes the tag containing the body of the article and returns it
as is . In the latter case, downstream code needs to be able to
interpret Elsever's XML format.
Returns
-------
content : str
Either text content or xml, as described above, for the given doi.
"""
xml_string = download_article(doi)
if output_format == 'txt' and xml_string is not None:
text = extract_text(xml_string)
return text
return xml_string |
def _setMetadata(self, text):
"""_setMetadata(self, text) -> PyObject *"""
if self.isClosed or self.isEncrypted:
raise ValueError("operation illegal for closed / encrypted doc")
return _fitz.Document__setMetadata(self, text) | _setMetadata(self, text) -> PyObject * | Below is the the instruction that describes the task:
### Input:
_setMetadata(self, text) -> PyObject *
### Response:
def _setMetadata(self, text):
"""_setMetadata(self, text) -> PyObject *"""
if self.isClosed or self.isEncrypted:
raise ValueError("operation illegal for closed / encrypted doc")
return _fitz.Document__setMetadata(self, text) |
def save_model(self, file_name='model.cx'):
"""Save the assembled CX network in a file.
Parameters
----------
file_name : Optional[str]
The name of the file to save the CX network to. Default: model.cx
"""
with open(file_name, 'wt') as fh:
cx_str = self.print_cx()
fh.write(cx_str) | Save the assembled CX network in a file.
Parameters
----------
file_name : Optional[str]
The name of the file to save the CX network to. Default: model.cx | Below is the the instruction that describes the task:
### Input:
Save the assembled CX network in a file.
Parameters
----------
file_name : Optional[str]
The name of the file to save the CX network to. Default: model.cx
### Response:
def save_model(self, file_name='model.cx'):
"""Save the assembled CX network in a file.
Parameters
----------
file_name : Optional[str]
The name of the file to save the CX network to. Default: model.cx
"""
with open(file_name, 'wt') as fh:
cx_str = self.print_cx()
fh.write(cx_str) |
def updateScreenshotProgress(self, screenshotHandle, flProgress):
"""
Call this if the application is taking the screen shot
will take more than a few ms processing. This will result
in an overlay being presented that shows a completion
bar.
"""
fn = self.function_table.updateScreenshotProgress
result = fn(screenshotHandle, flProgress)
return result | Call this if the application is taking the screen shot
will take more than a few ms processing. This will result
in an overlay being presented that shows a completion
bar. | Below is the the instruction that describes the task:
### Input:
Call this if the application is taking the screen shot
will take more than a few ms processing. This will result
in an overlay being presented that shows a completion
bar.
### Response:
def updateScreenshotProgress(self, screenshotHandle, flProgress):
"""
Call this if the application is taking the screen shot
will take more than a few ms processing. This will result
in an overlay being presented that shows a completion
bar.
"""
fn = self.function_table.updateScreenshotProgress
result = fn(screenshotHandle, flProgress)
return result |
def makeelement(tagname, tagtext=None, nsprefix='w', attributes=None,
attrnsprefix=None):
'''Create an element & return it'''
# Deal with list of nsprefix by making namespacemap
namespacemap = None
if isinstance(nsprefix, list):
namespacemap = {}
for prefix in nsprefix:
namespacemap[prefix] = nsprefixes[prefix]
# FIXME: rest of code below expects a single prefix
nsprefix = nsprefix[0]
if nsprefix:
namespace = '{%s}' % nsprefixes[nsprefix]
else:
# For when namespace = None
namespace = ''
newelement = etree.Element(namespace+tagname, nsmap=namespacemap)
# Add attributes with namespaces
if attributes:
# If they haven't bothered setting attribute namespace, use an empty
# string (equivalent of no namespace)
if not attrnsprefix:
# Quick hack: it seems every element that has a 'w' nsprefix for
# its tag uses the same prefix for it's attributes
if nsprefix == 'w':
attributenamespace = namespace
else:
attributenamespace = ''
else:
attributenamespace = '{'+nsprefixes[attrnsprefix]+'}'
for tagattribute in attributes:
newelement.set(attributenamespace+tagattribute,
attributes[tagattribute])
if tagtext:
newelement.text = tagtext
return newelement | Create an element & return it | Below is the the instruction that describes the task:
### Input:
Create an element & return it
### Response:
def makeelement(tagname, tagtext=None, nsprefix='w', attributes=None,
attrnsprefix=None):
'''Create an element & return it'''
# Deal with list of nsprefix by making namespacemap
namespacemap = None
if isinstance(nsprefix, list):
namespacemap = {}
for prefix in nsprefix:
namespacemap[prefix] = nsprefixes[prefix]
# FIXME: rest of code below expects a single prefix
nsprefix = nsprefix[0]
if nsprefix:
namespace = '{%s}' % nsprefixes[nsprefix]
else:
# For when namespace = None
namespace = ''
newelement = etree.Element(namespace+tagname, nsmap=namespacemap)
# Add attributes with namespaces
if attributes:
# If they haven't bothered setting attribute namespace, use an empty
# string (equivalent of no namespace)
if not attrnsprefix:
# Quick hack: it seems every element that has a 'w' nsprefix for
# its tag uses the same prefix for it's attributes
if nsprefix == 'w':
attributenamespace = namespace
else:
attributenamespace = ''
else:
attributenamespace = '{'+nsprefixes[attrnsprefix]+'}'
for tagattribute in attributes:
newelement.set(attributenamespace+tagattribute,
attributes[tagattribute])
if tagtext:
newelement.text = tagtext
return newelement |
def putCallback(self,url,headers=""):
'''
Set the callback URL. To be used in place of LongPolling when deploying a webapp.
**note**: make sure you set up a callback URL in your web app
:param str url: complete url, including port, where the callback url is located
:param str headers: Optional - Headers to have Connector send back with all calls
:return: successful ``.status_code`` / ``.is_done``. Check the ``.error``
:rtype: asyncResult
'''
result = asyncResult()
payloadToSend = {"url":url}
if headers:
payload['headers':headers]
data = self._putURL(url="/notification/callback",payload=payloadToSend, versioned=False)
if data.status_code == 204: #immediate success
result.error = False
result.result = data.content
else:
result.error = response_codes("put_callback_url",data.status_code)
result.raw_data = data.content
result.status_code = data.status_code
result.is_done = True
return result | Set the callback URL. To be used in place of LongPolling when deploying a webapp.
**note**: make sure you set up a callback URL in your web app
:param str url: complete url, including port, where the callback url is located
:param str headers: Optional - Headers to have Connector send back with all calls
:return: successful ``.status_code`` / ``.is_done``. Check the ``.error``
:rtype: asyncResult | Below is the the instruction that describes the task:
### Input:
Set the callback URL. To be used in place of LongPolling when deploying a webapp.
**note**: make sure you set up a callback URL in your web app
:param str url: complete url, including port, where the callback url is located
:param str headers: Optional - Headers to have Connector send back with all calls
:return: successful ``.status_code`` / ``.is_done``. Check the ``.error``
:rtype: asyncResult
### Response:
def putCallback(self,url,headers=""):
'''
Set the callback URL. To be used in place of LongPolling when deploying a webapp.
**note**: make sure you set up a callback URL in your web app
:param str url: complete url, including port, where the callback url is located
:param str headers: Optional - Headers to have Connector send back with all calls
:return: successful ``.status_code`` / ``.is_done``. Check the ``.error``
:rtype: asyncResult
'''
result = asyncResult()
payloadToSend = {"url":url}
if headers:
payload['headers':headers]
data = self._putURL(url="/notification/callback",payload=payloadToSend, versioned=False)
if data.status_code == 204: #immediate success
result.error = False
result.result = data.content
else:
result.error = response_codes("put_callback_url",data.status_code)
result.raw_data = data.content
result.status_code = data.status_code
result.is_done = True
return result |
def output_after_run_set(self, runSet, cputime=None, walltime=None, energy={}):
"""
The method output_after_run_set() stores the times of a run set in XML.
@params cputime, walltime: accumulated times of the run set
"""
self.add_values_to_run_set_xml(runSet, cputime, walltime, energy)
# write results to files
self._write_pretty_result_xml_to_file(runSet.xml, runSet.xml_file_name)
if len(runSet.blocks) > 1:
for block in runSet.blocks:
blockFileName = self.get_filename(runSet.name, block.name + ".xml")
self._write_pretty_result_xml_to_file(
self.runs_to_xml(runSet, block.runs, block.name),
blockFileName)
self.txt_file.append(self.run_set_to_text(runSet, True, cputime, walltime, energy)) | The method output_after_run_set() stores the times of a run set in XML.
@params cputime, walltime: accumulated times of the run set | Below is the the instruction that describes the task:
### Input:
The method output_after_run_set() stores the times of a run set in XML.
@params cputime, walltime: accumulated times of the run set
### Response:
def output_after_run_set(self, runSet, cputime=None, walltime=None, energy={}):
"""
The method output_after_run_set() stores the times of a run set in XML.
@params cputime, walltime: accumulated times of the run set
"""
self.add_values_to_run_set_xml(runSet, cputime, walltime, energy)
# write results to files
self._write_pretty_result_xml_to_file(runSet.xml, runSet.xml_file_name)
if len(runSet.blocks) > 1:
for block in runSet.blocks:
blockFileName = self.get_filename(runSet.name, block.name + ".xml")
self._write_pretty_result_xml_to_file(
self.runs_to_xml(runSet, block.runs, block.name),
blockFileName)
self.txt_file.append(self.run_set_to_text(runSet, True, cputime, walltime, energy)) |
def switch_delete_record_for_userid(self, userid):
"""Remove userid switch record from switch table."""
with get_network_conn() as conn:
conn.execute("DELETE FROM switch WHERE userid=?",
(userid,))
LOG.debug("Switch record for user %s is removed from "
"switch table" % userid) | Remove userid switch record from switch table. | Below is the the instruction that describes the task:
### Input:
Remove userid switch record from switch table.
### Response:
def switch_delete_record_for_userid(self, userid):
"""Remove userid switch record from switch table."""
with get_network_conn() as conn:
conn.execute("DELETE FROM switch WHERE userid=?",
(userid,))
LOG.debug("Switch record for user %s is removed from "
"switch table" % userid) |
async def main():
"""
Main code (synchronous requests)
"""
# Create Client from endpoint string in Duniter format
client = Client(BMAS_ENDPOINT)
# Get the node summary infos by dedicated method (with json schema validation)
print("\nCall bma.node.summary:")
response = await client(bma.node.summary)
print(response)
# Get the money parameters located in the first block
print("\nCall bma.blockchain.parameters:")
response = await client(bma.blockchain.parameters)
print(response)
# Get the current block
print("\nCall bma.blockchain.current:")
response = await client(bma.blockchain.current)
print(response)
# Get the block number 10
print("\nCall bma.blockchain.block(10):")
response = await client(bma.blockchain.block, 10)
print(response)
# jsonschema validator
summary_schema = {
"type": "object",
"properties": {
"duniter": {
"type": "object",
"properties": {
"software": {
"type": "string"
},
"version": {
"type": "string",
},
"forkWindowSize": {
"type": "number"
}
},
"required": ["software", "version"]
},
},
"required": ["duniter"]
}
# Get the node summary infos (direct REST GET request)
print("\nCall direct get on node/summary")
response = await client.get('node/summary', rtype=RESPONSE_AIOHTTP, schema=summary_schema)
print(response)
# Close client aiohttp session
await client.close() | Main code (synchronous requests) | Below is the the instruction that describes the task:
### Input:
Main code (synchronous requests)
### Response:
async def main():
"""
Main code (synchronous requests)
"""
# Create Client from endpoint string in Duniter format
client = Client(BMAS_ENDPOINT)
# Get the node summary infos by dedicated method (with json schema validation)
print("\nCall bma.node.summary:")
response = await client(bma.node.summary)
print(response)
# Get the money parameters located in the first block
print("\nCall bma.blockchain.parameters:")
response = await client(bma.blockchain.parameters)
print(response)
# Get the current block
print("\nCall bma.blockchain.current:")
response = await client(bma.blockchain.current)
print(response)
# Get the block number 10
print("\nCall bma.blockchain.block(10):")
response = await client(bma.blockchain.block, 10)
print(response)
# jsonschema validator
summary_schema = {
"type": "object",
"properties": {
"duniter": {
"type": "object",
"properties": {
"software": {
"type": "string"
},
"version": {
"type": "string",
},
"forkWindowSize": {
"type": "number"
}
},
"required": ["software", "version"]
},
},
"required": ["duniter"]
}
# Get the node summary infos (direct REST GET request)
print("\nCall direct get on node/summary")
response = await client.get('node/summary', rtype=RESPONSE_AIOHTTP, schema=summary_schema)
print(response)
# Close client aiohttp session
await client.close() |
def term_count_buckets(self):
"""
Returns:
dict: A dictionary that maps occurrence counts to the terms that
appear that many times in the text.
"""
buckets = {}
for term, count in self.term_counts().items():
if count in buckets: buckets[count].append(term)
else: buckets[count] = [term]
return buckets | Returns:
dict: A dictionary that maps occurrence counts to the terms that
appear that many times in the text. | Below is the the instruction that describes the task:
### Input:
Returns:
dict: A dictionary that maps occurrence counts to the terms that
appear that many times in the text.
### Response:
def term_count_buckets(self):
"""
Returns:
dict: A dictionary that maps occurrence counts to the terms that
appear that many times in the text.
"""
buckets = {}
for term, count in self.term_counts().items():
if count in buckets: buckets[count].append(term)
else: buckets[count] = [term]
return buckets |
def main() -> None:
"""
Command-line handler for the ``find_recovered_openxml`` tool.
Use the ``--help`` option for help.
"""
parser = ArgumentParser(
formatter_class=RawDescriptionHelpFormatter,
description="""
Tool to recognize and rescue Microsoft Office OpenXML files, even if they have
garbage appended to them.
- Rationale: when you have accidentally deleted files from an NTFS disk, and
they really matter, you should (a) stop what you're doing; (b) clone the disk
to an image file using "dd" under Linux; (c) perform all subsequent
operations on the cloned image (in read-only mode). Those steps might
include:
- ntfsundelete, to find files that the filesystem is still aware of;
- scalpel, to find files based on their contents.
- Scalpel is great at finding stuff efficiently, but it works best when files
can be defined by both a start (header) signature and an end (footer)
signature. However, the Microsoft Office OpenXML file format has a
recognizable header, but no standard footer. In these circumstances, Scalpel
reads up to a certain limit that you specify in its configuration file. (To
retrieve large Powerpoint files, this limit needs to be substantial, e.g.
50 Mb or more, depending on your ways of working with Powerpoint.)
- That means that files emerging from a Scalpel search for DOCX/PPTX/XLSX files
may be
- false positives, having nothing to do with Office;
- clean Office files (the least likely category!);
- Office files with garbage stuck on the end.
- The OpenXML file format is just a zip file. If you stick too much garbage on
the end of a zip file, zip readers will see it as corrupt.
- THIS TOOL detects (and optionally moves) potentially corrupted zipfiles based
on file contents, by unzipping the file and checking for "inner" files with
names like:
File type Contents filename signature (regular expression)
----------------------------------------------------------------
DOCX {DOCX_CONTENTS_REGEX_STR}
PPTX {PPTX_CONTENTS_REGEX_STR}
XLSX {XLSX_CONTENTS_REGEX_STR}
- WARNING: it's possible for an OpenXML file to contain more than one of these.
If so, they may be mis-classified.
- If a file is not immediately readable as a zip, it uses Linux's "zip -FF" to
repair zip files with corrupted ends, and tries again.
- Having found valid-looking files, you can elect to move them elsewhere.
- As an additional and VERY DANGEROUS operation, you can elect to delete files
that this tool doesn't recognize. (Why? Because a 450Gb disk might produce
well in excess of 1.7Tb of candidate files; many will be false positives and
even the true positives will all be expanded to your file size limit, e.g.
50 Mb. You may have a problem with available disk space, so running this tool
regularly allows you to clear up the junk. Use the --run_every option to help
with this.)
""".format(
DOCX_CONTENTS_REGEX_STR=DOCX_CONTENTS_REGEX_STR,
PPTX_CONTENTS_REGEX_STR=PPTX_CONTENTS_REGEX_STR,
XLSX_CONTENTS_REGEX_STR=XLSX_CONTENTS_REGEX_STR,
)
)
parser.add_argument(
"filename", nargs="+",
help="File(s) to check. You can also specify directores if you use "
"--recursive"
)
parser.add_argument(
"--recursive", action="store_true",
help="Allow search to descend recursively into any directories "
"encountered."
)
parser.add_argument(
"--skip_files", nargs="*", default=[],
help="File pattern(s) to skip. You can specify wildcards like '*.txt' "
"(but you will have to enclose that pattern in quotes under "
"UNIX-like operating systems). The basename of each file will be "
"tested against these filenames/patterns. Consider including "
"Scalpel's 'audit.txt'."
)
parser.add_argument(
"--filetypes", nargs="+", default=FILETYPES,
help="File types to check. Options: {}".format(FILETYPES)
)
parser.add_argument(
"--move_to",
help="If the file is recognized as one of the specified file types, "
"move it to the directory specified here."
)
parser.add_argument(
"--delete_if_not_specified_file_type", action="store_true",
help="If a file is NOT recognized as one of the specified file types, "
"delete it. VERY DANGEROUS."
)
parser.add_argument(
"--run_repeatedly", type=int,
help="Run the tool repeatedly with a pause of <run_repeatedly> "
"seconds between runs. (For this to work well with the move/"
"delete options, you should specify one or more DIRECTORIES in "
"the 'filename' arguments, not files, and you will need the "
"--recursive option.)"
)
parser.add_argument(
"--nprocesses", type=int, default=multiprocessing.cpu_count(),
help="Specify the number of processes to run in parallel."
)
parser.add_argument(
"--verbose", action="store_true",
help="Verbose output"
)
parser.add_argument(
"--show_zip_output", action="store_true",
help="Verbose output from the external 'zip' tool"
)
args = parser.parse_args()
main_only_quicksetup_rootlogger(
level=logging.DEBUG if args.verbose else logging.INFO,
with_process_id=True
)
# Further argument checks
if args.move_to:
if not os.path.isdir(args.move_to):
raise ValueError("Destination directory {!r} is not a "
"directory".format(args.move_to))
if not args.filetypes:
raise ValueError("No file type to scan for")
filetypes = [ft.lower() for ft in args.filetypes]
if any(ft not in FILETYPES for ft in filetypes):
raise ValueError("Invalid filetypes; choose from {}".format(FILETYPES))
assert shutil.which("zip"), "Need 'zip' tool!"
# Repeated scanning loop
while True:
log.info("Starting scan.")
log.info("- Looking for filetypes {}", filetypes)
log.info("- Scanning files/directories {!r}{}",
args.filename,
" recursively" if args.recursive else "")
log.info("- Skipping files matching {!r}", args.skip_files)
log.info("- Using {} simultaneous processes", args.nprocesses)
if args.move_to:
log.info("- Moving target files to " + args.move_to)
if args.delete_if_not_specified_file_type:
log.info("- Deleting non-target files.")
# Iterate through files
pool = multiprocessing.Pool(processes=args.nprocesses)
for filename in gen_filenames(starting_filenames=args.filename,
recursive=args.recursive):
src_basename = os.path.basename(filename)
if any(fnmatch.fnmatch(src_basename, pattern)
for pattern in args.skip_files):
log.info("Skipping file as ordered: " + filename)
continue
exists, locked = exists_locked(filename)
if locked or not exists:
log.info("Skipping currently inaccessible file: " + filename)
continue
kwargs = {
'filename': filename,
'filetypes': filetypes,
'move_to': args.move_to,
'delete_if_not_specified_file_type':
args.delete_if_not_specified_file_type,
'show_zip_output': args.show_zip_output,
}
# log.critical("start")
pool.apply_async(process_file, [], kwargs)
# result = pool.apply_async(process_file, [], kwargs)
# result.get() # will re-raise any child exceptions
# ... but it waits for the process to complete! That's no help.
# log.critical("next")
# ... https://stackoverflow.com/questions/22094852/how-to-catch-exceptions-in-workers-in-multiprocessing # noqa
pool.close()
pool.join()
log.info("Finished scan.")
if args.run_repeatedly is None:
break
log.info("Sleeping for {} s...", args.run_repeatedly)
sleep(args.run_repeatedly) | Command-line handler for the ``find_recovered_openxml`` tool.
Use the ``--help`` option for help. | Below is the the instruction that describes the task:
### Input:
Command-line handler for the ``find_recovered_openxml`` tool.
Use the ``--help`` option for help.
### Response:
def main() -> None:
"""
Command-line handler for the ``find_recovered_openxml`` tool.
Use the ``--help`` option for help.
"""
parser = ArgumentParser(
formatter_class=RawDescriptionHelpFormatter,
description="""
Tool to recognize and rescue Microsoft Office OpenXML files, even if they have
garbage appended to them.
- Rationale: when you have accidentally deleted files from an NTFS disk, and
they really matter, you should (a) stop what you're doing; (b) clone the disk
to an image file using "dd" under Linux; (c) perform all subsequent
operations on the cloned image (in read-only mode). Those steps might
include:
- ntfsundelete, to find files that the filesystem is still aware of;
- scalpel, to find files based on their contents.
- Scalpel is great at finding stuff efficiently, but it works best when files
can be defined by both a start (header) signature and an end (footer)
signature. However, the Microsoft Office OpenXML file format has a
recognizable header, but no standard footer. In these circumstances, Scalpel
reads up to a certain limit that you specify in its configuration file. (To
retrieve large Powerpoint files, this limit needs to be substantial, e.g.
50 Mb or more, depending on your ways of working with Powerpoint.)
- That means that files emerging from a Scalpel search for DOCX/PPTX/XLSX files
may be
- false positives, having nothing to do with Office;
- clean Office files (the least likely category!);
- Office files with garbage stuck on the end.
- The OpenXML file format is just a zip file. If you stick too much garbage on
the end of a zip file, zip readers will see it as corrupt.
- THIS TOOL detects (and optionally moves) potentially corrupted zipfiles based
on file contents, by unzipping the file and checking for "inner" files with
names like:
File type Contents filename signature (regular expression)
----------------------------------------------------------------
DOCX {DOCX_CONTENTS_REGEX_STR}
PPTX {PPTX_CONTENTS_REGEX_STR}
XLSX {XLSX_CONTENTS_REGEX_STR}
- WARNING: it's possible for an OpenXML file to contain more than one of these.
If so, they may be mis-classified.
- If a file is not immediately readable as a zip, it uses Linux's "zip -FF" to
repair zip files with corrupted ends, and tries again.
- Having found valid-looking files, you can elect to move them elsewhere.
- As an additional and VERY DANGEROUS operation, you can elect to delete files
that this tool doesn't recognize. (Why? Because a 450Gb disk might produce
well in excess of 1.7Tb of candidate files; many will be false positives and
even the true positives will all be expanded to your file size limit, e.g.
50 Mb. You may have a problem with available disk space, so running this tool
regularly allows you to clear up the junk. Use the --run_every option to help
with this.)
""".format(
DOCX_CONTENTS_REGEX_STR=DOCX_CONTENTS_REGEX_STR,
PPTX_CONTENTS_REGEX_STR=PPTX_CONTENTS_REGEX_STR,
XLSX_CONTENTS_REGEX_STR=XLSX_CONTENTS_REGEX_STR,
)
)
parser.add_argument(
"filename", nargs="+",
help="File(s) to check. You can also specify directores if you use "
"--recursive"
)
parser.add_argument(
"--recursive", action="store_true",
help="Allow search to descend recursively into any directories "
"encountered."
)
parser.add_argument(
"--skip_files", nargs="*", default=[],
help="File pattern(s) to skip. You can specify wildcards like '*.txt' "
"(but you will have to enclose that pattern in quotes under "
"UNIX-like operating systems). The basename of each file will be "
"tested against these filenames/patterns. Consider including "
"Scalpel's 'audit.txt'."
)
parser.add_argument(
"--filetypes", nargs="+", default=FILETYPES,
help="File types to check. Options: {}".format(FILETYPES)
)
parser.add_argument(
"--move_to",
help="If the file is recognized as one of the specified file types, "
"move it to the directory specified here."
)
parser.add_argument(
"--delete_if_not_specified_file_type", action="store_true",
help="If a file is NOT recognized as one of the specified file types, "
"delete it. VERY DANGEROUS."
)
parser.add_argument(
"--run_repeatedly", type=int,
help="Run the tool repeatedly with a pause of <run_repeatedly> "
"seconds between runs. (For this to work well with the move/"
"delete options, you should specify one or more DIRECTORIES in "
"the 'filename' arguments, not files, and you will need the "
"--recursive option.)"
)
parser.add_argument(
"--nprocesses", type=int, default=multiprocessing.cpu_count(),
help="Specify the number of processes to run in parallel."
)
parser.add_argument(
"--verbose", action="store_true",
help="Verbose output"
)
parser.add_argument(
"--show_zip_output", action="store_true",
help="Verbose output from the external 'zip' tool"
)
args = parser.parse_args()
main_only_quicksetup_rootlogger(
level=logging.DEBUG if args.verbose else logging.INFO,
with_process_id=True
)
# Further argument checks
if args.move_to:
if not os.path.isdir(args.move_to):
raise ValueError("Destination directory {!r} is not a "
"directory".format(args.move_to))
if not args.filetypes:
raise ValueError("No file type to scan for")
filetypes = [ft.lower() for ft in args.filetypes]
if any(ft not in FILETYPES for ft in filetypes):
raise ValueError("Invalid filetypes; choose from {}".format(FILETYPES))
assert shutil.which("zip"), "Need 'zip' tool!"
# Repeated scanning loop
while True:
log.info("Starting scan.")
log.info("- Looking for filetypes {}", filetypes)
log.info("- Scanning files/directories {!r}{}",
args.filename,
" recursively" if args.recursive else "")
log.info("- Skipping files matching {!r}", args.skip_files)
log.info("- Using {} simultaneous processes", args.nprocesses)
if args.move_to:
log.info("- Moving target files to " + args.move_to)
if args.delete_if_not_specified_file_type:
log.info("- Deleting non-target files.")
# Iterate through files
pool = multiprocessing.Pool(processes=args.nprocesses)
for filename in gen_filenames(starting_filenames=args.filename,
recursive=args.recursive):
src_basename = os.path.basename(filename)
if any(fnmatch.fnmatch(src_basename, pattern)
for pattern in args.skip_files):
log.info("Skipping file as ordered: " + filename)
continue
exists, locked = exists_locked(filename)
if locked or not exists:
log.info("Skipping currently inaccessible file: " + filename)
continue
kwargs = {
'filename': filename,
'filetypes': filetypes,
'move_to': args.move_to,
'delete_if_not_specified_file_type':
args.delete_if_not_specified_file_type,
'show_zip_output': args.show_zip_output,
}
# log.critical("start")
pool.apply_async(process_file, [], kwargs)
# result = pool.apply_async(process_file, [], kwargs)
# result.get() # will re-raise any child exceptions
# ... but it waits for the process to complete! That's no help.
# log.critical("next")
# ... https://stackoverflow.com/questions/22094852/how-to-catch-exceptions-in-workers-in-multiprocessing # noqa
pool.close()
pool.join()
log.info("Finished scan.")
if args.run_repeatedly is None:
break
log.info("Sleeping for {} s...", args.run_repeatedly)
sleep(args.run_repeatedly) |
def as_sql(self, *args, **kwargs):
"""
Overrides the :class:`SQLUpdateCompiler` method in order to remove any
CTE-related WHERE clauses, which are not necessary for UPDATE queries,
yet may have been added if this query was cloned from a CTEQuery.
:return:
:rtype:
"""
CTEQuery._remove_cte_where(self.query)
return super(self.__class__, self).as_sql(*args, **kwargs) | Overrides the :class:`SQLUpdateCompiler` method in order to remove any
CTE-related WHERE clauses, which are not necessary for UPDATE queries,
yet may have been added if this query was cloned from a CTEQuery.
:return:
:rtype: | Below is the the instruction that describes the task:
### Input:
Overrides the :class:`SQLUpdateCompiler` method in order to remove any
CTE-related WHERE clauses, which are not necessary for UPDATE queries,
yet may have been added if this query was cloned from a CTEQuery.
:return:
:rtype:
### Response:
def as_sql(self, *args, **kwargs):
"""
Overrides the :class:`SQLUpdateCompiler` method in order to remove any
CTE-related WHERE clauses, which are not necessary for UPDATE queries,
yet may have been added if this query was cloned from a CTEQuery.
:return:
:rtype:
"""
CTEQuery._remove_cte_where(self.query)
return super(self.__class__, self).as_sql(*args, **kwargs) |
def _inherited_dashboard(dashboard, base_dashboards_from_pillar, ret):
'''Return a dashboard with properties from parents.'''
base_dashboards = []
for base_dashboard_from_pillar in base_dashboards_from_pillar:
base_dashboard = __salt__['pillar.get'](base_dashboard_from_pillar)
if base_dashboard:
base_dashboards.append(base_dashboard)
elif base_dashboard_from_pillar != _DEFAULT_DASHBOARD_PILLAR:
ret.setdefault('warnings', [])
warning_message = 'Cannot find dashboard pillar "{0}".'.format(
base_dashboard_from_pillar)
if warning_message not in ret['warnings']:
ret['warnings'].append(warning_message)
base_dashboards.append(dashboard)
result_dashboard = {}
tags = set()
for dashboard in base_dashboards:
tags.update(dashboard.get('tags', []))
result_dashboard.update(dashboard)
result_dashboard['tags'] = list(tags)
return result_dashboard | Return a dashboard with properties from parents. | Below is the the instruction that describes the task:
### Input:
Return a dashboard with properties from parents.
### Response:
def _inherited_dashboard(dashboard, base_dashboards_from_pillar, ret):
'''Return a dashboard with properties from parents.'''
base_dashboards = []
for base_dashboard_from_pillar in base_dashboards_from_pillar:
base_dashboard = __salt__['pillar.get'](base_dashboard_from_pillar)
if base_dashboard:
base_dashboards.append(base_dashboard)
elif base_dashboard_from_pillar != _DEFAULT_DASHBOARD_PILLAR:
ret.setdefault('warnings', [])
warning_message = 'Cannot find dashboard pillar "{0}".'.format(
base_dashboard_from_pillar)
if warning_message not in ret['warnings']:
ret['warnings'].append(warning_message)
base_dashboards.append(dashboard)
result_dashboard = {}
tags = set()
for dashboard in base_dashboards:
tags.update(dashboard.get('tags', []))
result_dashboard.update(dashboard)
result_dashboard['tags'] = list(tags)
return result_dashboard |
def numlistbetween(num1, num2, option='list', listoption='string'):
"""
List Or Count The Numbers Between Two Numbers
"""
if option == 'list':
if listoption == 'string':
output = ''
output += str(num1)
for currentnum in range(num1 + 1, num2 + 1):
output += ','
output += str(currentnum)
elif listoption == 'list':
output = []
for currentnum in range(num1, num2 + 1):
output.append(str(currentnum))
return output
elif option == 'count':
return num2 - num1 | List Or Count The Numbers Between Two Numbers | Below is the the instruction that describes the task:
### Input:
List Or Count The Numbers Between Two Numbers
### Response:
def numlistbetween(num1, num2, option='list', listoption='string'):
"""
List Or Count The Numbers Between Two Numbers
"""
if option == 'list':
if listoption == 'string':
output = ''
output += str(num1)
for currentnum in range(num1 + 1, num2 + 1):
output += ','
output += str(currentnum)
elif listoption == 'list':
output = []
for currentnum in range(num1, num2 + 1):
output.append(str(currentnum))
return output
elif option == 'count':
return num2 - num1 |
def configuration_get_default_folder():
"""
Return the default folder where user-specific data is stored.
This depends of the system on which Python is running,
:return: path to the user-specific configuration data folder
"""
system = platform.system()
if system == 'Linux':
# https://specifications.freedesktop.org/basedir-spec/basedir-spec-latest.html
sys_config_path = Path(os.getenv('XDG_CONFIG_HOME', os.path.expanduser("~/.config")))
elif system == 'Windows':
sys_config_path = Path(os.getenv('APPDATA', ''))
else:
log.error('Unknown system: "{system}" (using default configuration path)'.format(system=system))
sys_config_path = Path()
log.debug('User-specific system configuration folder="{sys_config_path}"'.format(
sys_config_path=sys_config_path))
sys_config = sys_config_path / PROJECT_TITLE
log.debug('User-specific {project} configuration folder="{sys_config}"'.format(
project=PROJECT_TITLE, sys_config=sys_config))
return sys_config | Return the default folder where user-specific data is stored.
This depends of the system on which Python is running,
:return: path to the user-specific configuration data folder | Below is the the instruction that describes the task:
### Input:
Return the default folder where user-specific data is stored.
This depends of the system on which Python is running,
:return: path to the user-specific configuration data folder
### Response:
def configuration_get_default_folder():
"""
Return the default folder where user-specific data is stored.
This depends of the system on which Python is running,
:return: path to the user-specific configuration data folder
"""
system = platform.system()
if system == 'Linux':
# https://specifications.freedesktop.org/basedir-spec/basedir-spec-latest.html
sys_config_path = Path(os.getenv('XDG_CONFIG_HOME', os.path.expanduser("~/.config")))
elif system == 'Windows':
sys_config_path = Path(os.getenv('APPDATA', ''))
else:
log.error('Unknown system: "{system}" (using default configuration path)'.format(system=system))
sys_config_path = Path()
log.debug('User-specific system configuration folder="{sys_config_path}"'.format(
sys_config_path=sys_config_path))
sys_config = sys_config_path / PROJECT_TITLE
log.debug('User-specific {project} configuration folder="{sys_config}"'.format(
project=PROJECT_TITLE, sys_config=sys_config))
return sys_config |
def default_capability(self):
"""Set capability name in md.
Every ResourceSync document should have the top-level
capability attributes.
"""
if ('capability' not in self.md and self.capability_name is not None):
self.md['capability'] = self.capability_name | Set capability name in md.
Every ResourceSync document should have the top-level
capability attributes. | Below is the the instruction that describes the task:
### Input:
Set capability name in md.
Every ResourceSync document should have the top-level
capability attributes.
### Response:
def default_capability(self):
"""Set capability name in md.
Every ResourceSync document should have the top-level
capability attributes.
"""
if ('capability' not in self.md and self.capability_name is not None):
self.md['capability'] = self.capability_name |
def parse_get_list_response(content):
"""Parses of response content XML from WebDAV server and extract file and directory names.
:param content: the XML content of HTTP response from WebDAV server for getting list of files by remote path.
:return: list of extracted file or directory names.
"""
try:
tree = etree.fromstring(content)
hrees = [Urn.separate + unquote(urlsplit(hree.text).path) for hree in tree.findall('.//{DAV:}href')]
return [Urn(hree) for hree in hrees]
except etree.XMLSyntaxError:
return list() | Parses of response content XML from WebDAV server and extract file and directory names.
:param content: the XML content of HTTP response from WebDAV server for getting list of files by remote path.
:return: list of extracted file or directory names. | Below is the the instruction that describes the task:
### Input:
Parses of response content XML from WebDAV server and extract file and directory names.
:param content: the XML content of HTTP response from WebDAV server for getting list of files by remote path.
:return: list of extracted file or directory names.
### Response:
def parse_get_list_response(content):
"""Parses of response content XML from WebDAV server and extract file and directory names.
:param content: the XML content of HTTP response from WebDAV server for getting list of files by remote path.
:return: list of extracted file or directory names.
"""
try:
tree = etree.fromstring(content)
hrees = [Urn.separate + unquote(urlsplit(hree.text).path) for hree in tree.findall('.//{DAV:}href')]
return [Urn(hree) for hree in hrees]
except etree.XMLSyntaxError:
return list() |
def bar(self, x=None, y=None, **kwds):
"""
Vertical bar plot.
A bar plot is a plot that presents categorical data with
rectangular bars with lengths proportional to the values that they
represent. A bar plot shows comparisons among discrete categories. One
axis of the plot shows the specific categories being compared, and the
other axis represents a measured value.
Parameters
----------
x : label or position, optional
Allows plotting of one column versus another. If not specified,
the index of the DataFrame is used.
y : label or position, optional
Allows plotting of one column versus another. If not specified,
all numerical columns are used.
**kwds
Additional keyword arguments are documented in
:meth:`DataFrame.plot`.
Returns
-------
matplotlib.axes.Axes or np.ndarray of them
An ndarray is returned with one :class:`matplotlib.axes.Axes`
per column when ``subplots=True``.
See Also
--------
DataFrame.plot.barh : Horizontal bar plot.
DataFrame.plot : Make plots of a DataFrame.
matplotlib.pyplot.bar : Make a bar plot with matplotlib.
Examples
--------
Basic plot.
.. plot::
:context: close-figs
>>> df = pd.DataFrame({'lab':['A', 'B', 'C'], 'val':[10, 30, 20]})
>>> ax = df.plot.bar(x='lab', y='val', rot=0)
Plot a whole dataframe to a bar plot. Each column is assigned a
distinct color, and each row is nested in a group along the
horizontal axis.
.. plot::
:context: close-figs
>>> speed = [0.1, 17.5, 40, 48, 52, 69, 88]
>>> lifespan = [2, 8, 70, 1.5, 25, 12, 28]
>>> index = ['snail', 'pig', 'elephant',
... 'rabbit', 'giraffe', 'coyote', 'horse']
>>> df = pd.DataFrame({'speed': speed,
... 'lifespan': lifespan}, index=index)
>>> ax = df.plot.bar(rot=0)
Instead of nesting, the figure can be split by column with
``subplots=True``. In this case, a :class:`numpy.ndarray` of
:class:`matplotlib.axes.Axes` are returned.
.. plot::
:context: close-figs
>>> axes = df.plot.bar(rot=0, subplots=True)
>>> axes[1].legend(loc=2) # doctest: +SKIP
Plot a single column.
.. plot::
:context: close-figs
>>> ax = df.plot.bar(y='speed', rot=0)
Plot only selected categories for the DataFrame.
.. plot::
:context: close-figs
>>> ax = df.plot.bar(x='lifespan', rot=0)
"""
return self(kind='bar', x=x, y=y, **kwds) | Vertical bar plot.
A bar plot is a plot that presents categorical data with
rectangular bars with lengths proportional to the values that they
represent. A bar plot shows comparisons among discrete categories. One
axis of the plot shows the specific categories being compared, and the
other axis represents a measured value.
Parameters
----------
x : label or position, optional
Allows plotting of one column versus another. If not specified,
the index of the DataFrame is used.
y : label or position, optional
Allows plotting of one column versus another. If not specified,
all numerical columns are used.
**kwds
Additional keyword arguments are documented in
:meth:`DataFrame.plot`.
Returns
-------
matplotlib.axes.Axes or np.ndarray of them
An ndarray is returned with one :class:`matplotlib.axes.Axes`
per column when ``subplots=True``.
See Also
--------
DataFrame.plot.barh : Horizontal bar plot.
DataFrame.plot : Make plots of a DataFrame.
matplotlib.pyplot.bar : Make a bar plot with matplotlib.
Examples
--------
Basic plot.
.. plot::
:context: close-figs
>>> df = pd.DataFrame({'lab':['A', 'B', 'C'], 'val':[10, 30, 20]})
>>> ax = df.plot.bar(x='lab', y='val', rot=0)
Plot a whole dataframe to a bar plot. Each column is assigned a
distinct color, and each row is nested in a group along the
horizontal axis.
.. plot::
:context: close-figs
>>> speed = [0.1, 17.5, 40, 48, 52, 69, 88]
>>> lifespan = [2, 8, 70, 1.5, 25, 12, 28]
>>> index = ['snail', 'pig', 'elephant',
... 'rabbit', 'giraffe', 'coyote', 'horse']
>>> df = pd.DataFrame({'speed': speed,
... 'lifespan': lifespan}, index=index)
>>> ax = df.plot.bar(rot=0)
Instead of nesting, the figure can be split by column with
``subplots=True``. In this case, a :class:`numpy.ndarray` of
:class:`matplotlib.axes.Axes` are returned.
.. plot::
:context: close-figs
>>> axes = df.plot.bar(rot=0, subplots=True)
>>> axes[1].legend(loc=2) # doctest: +SKIP
Plot a single column.
.. plot::
:context: close-figs
>>> ax = df.plot.bar(y='speed', rot=0)
Plot only selected categories for the DataFrame.
.. plot::
:context: close-figs
>>> ax = df.plot.bar(x='lifespan', rot=0) | Below is the the instruction that describes the task:
### Input:
Vertical bar plot.
A bar plot is a plot that presents categorical data with
rectangular bars with lengths proportional to the values that they
represent. A bar plot shows comparisons among discrete categories. One
axis of the plot shows the specific categories being compared, and the
other axis represents a measured value.
Parameters
----------
x : label or position, optional
Allows plotting of one column versus another. If not specified,
the index of the DataFrame is used.
y : label or position, optional
Allows plotting of one column versus another. If not specified,
all numerical columns are used.
**kwds
Additional keyword arguments are documented in
:meth:`DataFrame.plot`.
Returns
-------
matplotlib.axes.Axes or np.ndarray of them
An ndarray is returned with one :class:`matplotlib.axes.Axes`
per column when ``subplots=True``.
See Also
--------
DataFrame.plot.barh : Horizontal bar plot.
DataFrame.plot : Make plots of a DataFrame.
matplotlib.pyplot.bar : Make a bar plot with matplotlib.
Examples
--------
Basic plot.
.. plot::
:context: close-figs
>>> df = pd.DataFrame({'lab':['A', 'B', 'C'], 'val':[10, 30, 20]})
>>> ax = df.plot.bar(x='lab', y='val', rot=0)
Plot a whole dataframe to a bar plot. Each column is assigned a
distinct color, and each row is nested in a group along the
horizontal axis.
.. plot::
:context: close-figs
>>> speed = [0.1, 17.5, 40, 48, 52, 69, 88]
>>> lifespan = [2, 8, 70, 1.5, 25, 12, 28]
>>> index = ['snail', 'pig', 'elephant',
... 'rabbit', 'giraffe', 'coyote', 'horse']
>>> df = pd.DataFrame({'speed': speed,
... 'lifespan': lifespan}, index=index)
>>> ax = df.plot.bar(rot=0)
Instead of nesting, the figure can be split by column with
``subplots=True``. In this case, a :class:`numpy.ndarray` of
:class:`matplotlib.axes.Axes` are returned.
.. plot::
:context: close-figs
>>> axes = df.plot.bar(rot=0, subplots=True)
>>> axes[1].legend(loc=2) # doctest: +SKIP
Plot a single column.
.. plot::
:context: close-figs
>>> ax = df.plot.bar(y='speed', rot=0)
Plot only selected categories for the DataFrame.
.. plot::
:context: close-figs
>>> ax = df.plot.bar(x='lifespan', rot=0)
### Response:
def bar(self, x=None, y=None, **kwds):
"""
Vertical bar plot.
A bar plot is a plot that presents categorical data with
rectangular bars with lengths proportional to the values that they
represent. A bar plot shows comparisons among discrete categories. One
axis of the plot shows the specific categories being compared, and the
other axis represents a measured value.
Parameters
----------
x : label or position, optional
Allows plotting of one column versus another. If not specified,
the index of the DataFrame is used.
y : label or position, optional
Allows plotting of one column versus another. If not specified,
all numerical columns are used.
**kwds
Additional keyword arguments are documented in
:meth:`DataFrame.plot`.
Returns
-------
matplotlib.axes.Axes or np.ndarray of them
An ndarray is returned with one :class:`matplotlib.axes.Axes`
per column when ``subplots=True``.
See Also
--------
DataFrame.plot.barh : Horizontal bar plot.
DataFrame.plot : Make plots of a DataFrame.
matplotlib.pyplot.bar : Make a bar plot with matplotlib.
Examples
--------
Basic plot.
.. plot::
:context: close-figs
>>> df = pd.DataFrame({'lab':['A', 'B', 'C'], 'val':[10, 30, 20]})
>>> ax = df.plot.bar(x='lab', y='val', rot=0)
Plot a whole dataframe to a bar plot. Each column is assigned a
distinct color, and each row is nested in a group along the
horizontal axis.
.. plot::
:context: close-figs
>>> speed = [0.1, 17.5, 40, 48, 52, 69, 88]
>>> lifespan = [2, 8, 70, 1.5, 25, 12, 28]
>>> index = ['snail', 'pig', 'elephant',
... 'rabbit', 'giraffe', 'coyote', 'horse']
>>> df = pd.DataFrame({'speed': speed,
... 'lifespan': lifespan}, index=index)
>>> ax = df.plot.bar(rot=0)
Instead of nesting, the figure can be split by column with
``subplots=True``. In this case, a :class:`numpy.ndarray` of
:class:`matplotlib.axes.Axes` are returned.
.. plot::
:context: close-figs
>>> axes = df.plot.bar(rot=0, subplots=True)
>>> axes[1].legend(loc=2) # doctest: +SKIP
Plot a single column.
.. plot::
:context: close-figs
>>> ax = df.plot.bar(y='speed', rot=0)
Plot only selected categories for the DataFrame.
.. plot::
:context: close-figs
>>> ax = df.plot.bar(x='lifespan', rot=0)
"""
return self(kind='bar', x=x, y=y, **kwds) |
def remove_straddlers(events, time, s_freq, toler=0.1):
"""Reject an event if it straddles a stitch, by comparing its
duration to its timespan.
Parameters
----------
events : ndarray (dtype='int')
N x M matrix with start, ..., end samples
time : ndarray (dtype='float')
vector with time points
s_freq : float
sampling frequency
toler : float, def=0.1
maximum tolerated difference between event duration and timespan
Returns
-------
ndarray (dtype='int')
N x M matrix with start , ..., end samples
"""
dur = (events[:, -1] - 1 - events[:, 0]) / s_freq
continuous = time[events[:, -1] - 1] - time[events[:, 0]] - dur < toler
return events[continuous, :] | Reject an event if it straddles a stitch, by comparing its
duration to its timespan.
Parameters
----------
events : ndarray (dtype='int')
N x M matrix with start, ..., end samples
time : ndarray (dtype='float')
vector with time points
s_freq : float
sampling frequency
toler : float, def=0.1
maximum tolerated difference between event duration and timespan
Returns
-------
ndarray (dtype='int')
N x M matrix with start , ..., end samples | Below is the the instruction that describes the task:
### Input:
Reject an event if it straddles a stitch, by comparing its
duration to its timespan.
Parameters
----------
events : ndarray (dtype='int')
N x M matrix with start, ..., end samples
time : ndarray (dtype='float')
vector with time points
s_freq : float
sampling frequency
toler : float, def=0.1
maximum tolerated difference between event duration and timespan
Returns
-------
ndarray (dtype='int')
N x M matrix with start , ..., end samples
### Response:
def remove_straddlers(events, time, s_freq, toler=0.1):
"""Reject an event if it straddles a stitch, by comparing its
duration to its timespan.
Parameters
----------
events : ndarray (dtype='int')
N x M matrix with start, ..., end samples
time : ndarray (dtype='float')
vector with time points
s_freq : float
sampling frequency
toler : float, def=0.1
maximum tolerated difference between event duration and timespan
Returns
-------
ndarray (dtype='int')
N x M matrix with start , ..., end samples
"""
dur = (events[:, -1] - 1 - events[:, 0]) / s_freq
continuous = time[events[:, -1] - 1] - time[events[:, 0]] - dur < toler
return events[continuous, :] |
def _build_dictionary(self, models):
"""
Build a dictionary with the models.
:param models: The models
:type models: Collection
"""
for model in models:
key = getattr(model, self._morph_type, None)
if key:
foreign = getattr(model, self._foreign_key)
if key not in self._dictionary:
self._dictionary[key] = {}
if foreign not in self._dictionary[key]:
self._dictionary[key][foreign] = []
self._dictionary[key][foreign].append(model) | Build a dictionary with the models.
:param models: The models
:type models: Collection | Below is the the instruction that describes the task:
### Input:
Build a dictionary with the models.
:param models: The models
:type models: Collection
### Response:
def _build_dictionary(self, models):
"""
Build a dictionary with the models.
:param models: The models
:type models: Collection
"""
for model in models:
key = getattr(model, self._morph_type, None)
if key:
foreign = getattr(model, self._foreign_key)
if key not in self._dictionary:
self._dictionary[key] = {}
if foreign not in self._dictionary[key]:
self._dictionary[key][foreign] = []
self._dictionary[key][foreign].append(model) |
def environ(self):
"""
Add path_info to the request's META dictionary.
"""
environ = dict(self._request.META)
environ['PATH_INFO'] = self._request.path_info
return environ | Add path_info to the request's META dictionary. | Below is the the instruction that describes the task:
### Input:
Add path_info to the request's META dictionary.
### Response:
def environ(self):
"""
Add path_info to the request's META dictionary.
"""
environ = dict(self._request.META)
environ['PATH_INFO'] = self._request.path_info
return environ |
def inversefunc(func,
y_values=None,
domain=None,
image=None,
open_domain=None,
args=(),
accuracy=2):
r"""Obtain the inverse of a function.
Returns the numerical inverse of the function `f`. It may return a callable
that can be used to calculate the inverse, or the inverse of certain points
depending on the `y_values` argument.
In order for the numerical inverse to exist in its domain, the
input function must have, continuous, strictly monotonic behavior i.e. be
purely decreasing or purely increasing in that domain. By default the
domain interval spans all the real numbers, however it can be restricted
with the `domain` and `open_domain` arguments. The image of the function
in the interval may be provided, for cases where the function is non
continuous right at the end of an open interval.
Parameters
----------
func : callable
Callable representing the function to be inverted, able to take a
ndarray or an scalar and return an object of the same kind with the
evaluation of the function. If `func` takes many arguments, it is
inverted along the axis corresponding to the first argument.
The function must not diverge and have a continuous strictly monotonic
behavior in the chosen interval.
y_values : float, ndarray, optional
Values for which calculate the inverse function. If set to None, then
a callable that can be used to calculate the inverse of values is
returned. Default None.
domain : float, ndarray, optional
Boundaries of the domain (`domain[0]`, `domain[1]`).
`domain[1]` must be larger than `domain[0]`.
None values are assumed to be no boundary in that direction.
A single scalar value will set it to [`domain`, None].
Default None (-Inf, Inf).
open_domain : bool, ndarray, optional
Whether the domain is an open interval at each of the ends.
A single scalar boolean will set it to [`open_domain`, `open_domain`].
Default None [False, False].
image : float, ndarray, optional
Image of the function in the domain (`image[0]`, `image[1]`).
`image[1]` must be larger than `image[0]`.
None values are assumed to be no boundary in that direction.
Default None, this is (-Inf, Inf) if domain is None, or the limits
set by func(domain[0]) and func(domain[1]).
args : tuple, optional
Extra arguments to pass to `func`. Default ().
accuracy : int, optional
Number of digits for the desired accuracy. It will give a warning
if the accuracy is worse than this.
Default 2.
Returns
-------
callable or ndarray
Inverse function of `func`. It can take scalars or ndarrays, and return
objects of the same kind with the calculated inverse values.
Notes
-----
.. versionadded:: 0.19.0
Examples
--------
>>> from pynverse import inversefunc
>>> import numpy as np
>>> cube = (lambda x: x**3)
>>> invcube = inversefunc(cube)
>>> invcube(27) # Should give 3
array(3.0000000063797567)
>>> invsquare = inversefunc(np.power, args=(2), domain=0)
>>> invsquare([4, 16, 64]) # Should give [2, 4, 8]
array([ 2., 4., 8.])
>>> inversefunc(np.log10, y_values=-2, # Should give 0.01
... domain=0, open_domain=True)
array(0.0099999999882423)
>>> inversefunc(np.cos, y_values=[1, 0, -1], # Should give [0, pi / 2, pi]
... domain=[0, np.pi])
array([ 0. , 1.57079632, 3.14159265])
>>> invtan = inversefunc(np.tan,
... domain=[-np.pi / 2, np.pi / 2],
... open_domain=True)
>>> invtan([1, 0, -1]) # Should give [pi / 4, 0, -pi / 4]
array([ 7.85398163e-01, 1.29246971e-26, -7.85398163e-01])
"""
domain, image, open_domain, args = _normparams_inversefunc(domain,
image,
open_domain,
args)
ymin, ymax = image
xmin, xmax = domain
xmin_open, xmax_open = open_domain
# Calculating if the function is increasing or decreasing, using ref points
# anywhere in the valid range (Function has to be strictly monotonic)
ref1, ref2 = _get_valid_refpoints(xmin, xmax)
trend = np.sign(func(ref2, *args) - func(ref1, *args))
if trend == 0:
raise ValueError("Function is not strictly monotonic")
# Calculating the image by default
if ymin is None:
ymin = _auto_ymin(func, args, xmin, xmax, trend)
if ymax is None:
ymax = _auto_ymax(func, args, xmin, xmax, trend)
# Creating bounded function
def bounded_f(x):
if xmin is not None and (x < xmin or (x == xmin and xmin_open)):
val = -1 * np.inf * trend
elif xmax is not None and (x > xmax or (x == xmax and xmax_open)):
val = np.inf * trend
else:
val = func(x, *args)
return val
min_kwargs = {}
min_kwargs['bracket'] = (ref1, ref2)
min_kwargs['tol'] = 1.48e-08
min_kwargs['method'] = 'Brent'
def inv(yin):
yin = np.asarray(yin, dtype=np.float64)
shapein = yin.shape
yin = yin.flatten()
if ymin is not None:
if (xmin_open and trend == 1) or (xmax_open and trend == -1):
mask = yin <= ymin
else:
mask = yin < ymin
if yin[mask].size > 0:
raise ValueError("Requested values %s lower than the"
" lower limit %g of the image" %
(yin[mask], ymin))
if ymax is not None:
if (xmax_open and trend == 1) or (xmin_open and trend == -1):
mask = yin >= ymax
else:
mask = yin > ymax
if yin[mask].size > 0:
raise ValueError("Requested values %s higher than the"
" higher limit %g of the image" %
(yin[mask], ymax))
results = yin.copy() * np.nan
resultsmask = np.zeros(yin.shape, dtype=np.bool)
for j in range(yin.size):
if xmax is not None:
if bounded_f(xmax) == yin[j]:
results[j] = xmax
resultsmask[j] = True
continue
if xmin is not None:
if bounded_f(xmin) == yin[j]:
results[j] = xmin
resultsmask[j] = True
continue
optimizer = (lambda x, j=j,
bounded_f=bounded_f: (((bounded_f(x) - yin[j]))**2))
try:
with warnings.catch_warnings(record=True):
result = minimize_scalar(optimizer, **min_kwargs)
results[j] = result.x
resultsmask[j] = result.success
except:
resultsmask[j] = False
if any(~resultsmask):
warnings.warn("Trouble calculating inverse for values: "
"%s" % str(yin[~resultsmask]), RuntimeWarning)
try:
np.testing.assert_array_almost_equal(yin, func(results, *args),
decimal=accuracy)
except AssertionError:
warnings.warn("Results obtained with less than %g "
"decimal digits of accuracy"
% accuracy, RuntimeWarning)
return results.reshape(shapein)
if y_values is None:
return inv
else:
return inv(y_values) | r"""Obtain the inverse of a function.
Returns the numerical inverse of the function `f`. It may return a callable
that can be used to calculate the inverse, or the inverse of certain points
depending on the `y_values` argument.
In order for the numerical inverse to exist in its domain, the
input function must have, continuous, strictly monotonic behavior i.e. be
purely decreasing or purely increasing in that domain. By default the
domain interval spans all the real numbers, however it can be restricted
with the `domain` and `open_domain` arguments. The image of the function
in the interval may be provided, for cases where the function is non
continuous right at the end of an open interval.
Parameters
----------
func : callable
Callable representing the function to be inverted, able to take a
ndarray or an scalar and return an object of the same kind with the
evaluation of the function. If `func` takes many arguments, it is
inverted along the axis corresponding to the first argument.
The function must not diverge and have a continuous strictly monotonic
behavior in the chosen interval.
y_values : float, ndarray, optional
Values for which calculate the inverse function. If set to None, then
a callable that can be used to calculate the inverse of values is
returned. Default None.
domain : float, ndarray, optional
Boundaries of the domain (`domain[0]`, `domain[1]`).
`domain[1]` must be larger than `domain[0]`.
None values are assumed to be no boundary in that direction.
A single scalar value will set it to [`domain`, None].
Default None (-Inf, Inf).
open_domain : bool, ndarray, optional
Whether the domain is an open interval at each of the ends.
A single scalar boolean will set it to [`open_domain`, `open_domain`].
Default None [False, False].
image : float, ndarray, optional
Image of the function in the domain (`image[0]`, `image[1]`).
`image[1]` must be larger than `image[0]`.
None values are assumed to be no boundary in that direction.
Default None, this is (-Inf, Inf) if domain is None, or the limits
set by func(domain[0]) and func(domain[1]).
args : tuple, optional
Extra arguments to pass to `func`. Default ().
accuracy : int, optional
Number of digits for the desired accuracy. It will give a warning
if the accuracy is worse than this.
Default 2.
Returns
-------
callable or ndarray
Inverse function of `func`. It can take scalars or ndarrays, and return
objects of the same kind with the calculated inverse values.
Notes
-----
.. versionadded:: 0.19.0
Examples
--------
>>> from pynverse import inversefunc
>>> import numpy as np
>>> cube = (lambda x: x**3)
>>> invcube = inversefunc(cube)
>>> invcube(27) # Should give 3
array(3.0000000063797567)
>>> invsquare = inversefunc(np.power, args=(2), domain=0)
>>> invsquare([4, 16, 64]) # Should give [2, 4, 8]
array([ 2., 4., 8.])
>>> inversefunc(np.log10, y_values=-2, # Should give 0.01
... domain=0, open_domain=True)
array(0.0099999999882423)
>>> inversefunc(np.cos, y_values=[1, 0, -1], # Should give [0, pi / 2, pi]
... domain=[0, np.pi])
array([ 0. , 1.57079632, 3.14159265])
>>> invtan = inversefunc(np.tan,
... domain=[-np.pi / 2, np.pi / 2],
... open_domain=True)
>>> invtan([1, 0, -1]) # Should give [pi / 4, 0, -pi / 4]
array([ 7.85398163e-01, 1.29246971e-26, -7.85398163e-01]) | Below is the the instruction that describes the task:
### Input:
r"""Obtain the inverse of a function.
Returns the numerical inverse of the function `f`. It may return a callable
that can be used to calculate the inverse, or the inverse of certain points
depending on the `y_values` argument.
In order for the numerical inverse to exist in its domain, the
input function must have, continuous, strictly monotonic behavior i.e. be
purely decreasing or purely increasing in that domain. By default the
domain interval spans all the real numbers, however it can be restricted
with the `domain` and `open_domain` arguments. The image of the function
in the interval may be provided, for cases where the function is non
continuous right at the end of an open interval.
Parameters
----------
func : callable
Callable representing the function to be inverted, able to take a
ndarray or an scalar and return an object of the same kind with the
evaluation of the function. If `func` takes many arguments, it is
inverted along the axis corresponding to the first argument.
The function must not diverge and have a continuous strictly monotonic
behavior in the chosen interval.
y_values : float, ndarray, optional
Values for which calculate the inverse function. If set to None, then
a callable that can be used to calculate the inverse of values is
returned. Default None.
domain : float, ndarray, optional
Boundaries of the domain (`domain[0]`, `domain[1]`).
`domain[1]` must be larger than `domain[0]`.
None values are assumed to be no boundary in that direction.
A single scalar value will set it to [`domain`, None].
Default None (-Inf, Inf).
open_domain : bool, ndarray, optional
Whether the domain is an open interval at each of the ends.
A single scalar boolean will set it to [`open_domain`, `open_domain`].
Default None [False, False].
image : float, ndarray, optional
Image of the function in the domain (`image[0]`, `image[1]`).
`image[1]` must be larger than `image[0]`.
None values are assumed to be no boundary in that direction.
Default None, this is (-Inf, Inf) if domain is None, or the limits
set by func(domain[0]) and func(domain[1]).
args : tuple, optional
Extra arguments to pass to `func`. Default ().
accuracy : int, optional
Number of digits for the desired accuracy. It will give a warning
if the accuracy is worse than this.
Default 2.
Returns
-------
callable or ndarray
Inverse function of `func`. It can take scalars or ndarrays, and return
objects of the same kind with the calculated inverse values.
Notes
-----
.. versionadded:: 0.19.0
Examples
--------
>>> from pynverse import inversefunc
>>> import numpy as np
>>> cube = (lambda x: x**3)
>>> invcube = inversefunc(cube)
>>> invcube(27) # Should give 3
array(3.0000000063797567)
>>> invsquare = inversefunc(np.power, args=(2), domain=0)
>>> invsquare([4, 16, 64]) # Should give [2, 4, 8]
array([ 2., 4., 8.])
>>> inversefunc(np.log10, y_values=-2, # Should give 0.01
... domain=0, open_domain=True)
array(0.0099999999882423)
>>> inversefunc(np.cos, y_values=[1, 0, -1], # Should give [0, pi / 2, pi]
... domain=[0, np.pi])
array([ 0. , 1.57079632, 3.14159265])
>>> invtan = inversefunc(np.tan,
... domain=[-np.pi / 2, np.pi / 2],
... open_domain=True)
>>> invtan([1, 0, -1]) # Should give [pi / 4, 0, -pi / 4]
array([ 7.85398163e-01, 1.29246971e-26, -7.85398163e-01])
### Response:
def inversefunc(func,
y_values=None,
domain=None,
image=None,
open_domain=None,
args=(),
accuracy=2):
r"""Obtain the inverse of a function.
Returns the numerical inverse of the function `f`. It may return a callable
that can be used to calculate the inverse, or the inverse of certain points
depending on the `y_values` argument.
In order for the numerical inverse to exist in its domain, the
input function must have, continuous, strictly monotonic behavior i.e. be
purely decreasing or purely increasing in that domain. By default the
domain interval spans all the real numbers, however it can be restricted
with the `domain` and `open_domain` arguments. The image of the function
in the interval may be provided, for cases where the function is non
continuous right at the end of an open interval.
Parameters
----------
func : callable
Callable representing the function to be inverted, able to take a
ndarray or an scalar and return an object of the same kind with the
evaluation of the function. If `func` takes many arguments, it is
inverted along the axis corresponding to the first argument.
The function must not diverge and have a continuous strictly monotonic
behavior in the chosen interval.
y_values : float, ndarray, optional
Values for which calculate the inverse function. If set to None, then
a callable that can be used to calculate the inverse of values is
returned. Default None.
domain : float, ndarray, optional
Boundaries of the domain (`domain[0]`, `domain[1]`).
`domain[1]` must be larger than `domain[0]`.
None values are assumed to be no boundary in that direction.
A single scalar value will set it to [`domain`, None].
Default None (-Inf, Inf).
open_domain : bool, ndarray, optional
Whether the domain is an open interval at each of the ends.
A single scalar boolean will set it to [`open_domain`, `open_domain`].
Default None [False, False].
image : float, ndarray, optional
Image of the function in the domain (`image[0]`, `image[1]`).
`image[1]` must be larger than `image[0]`.
None values are assumed to be no boundary in that direction.
Default None, this is (-Inf, Inf) if domain is None, or the limits
set by func(domain[0]) and func(domain[1]).
args : tuple, optional
Extra arguments to pass to `func`. Default ().
accuracy : int, optional
Number of digits for the desired accuracy. It will give a warning
if the accuracy is worse than this.
Default 2.
Returns
-------
callable or ndarray
Inverse function of `func`. It can take scalars or ndarrays, and return
objects of the same kind with the calculated inverse values.
Notes
-----
.. versionadded:: 0.19.0
Examples
--------
>>> from pynverse import inversefunc
>>> import numpy as np
>>> cube = (lambda x: x**3)
>>> invcube = inversefunc(cube)
>>> invcube(27) # Should give 3
array(3.0000000063797567)
>>> invsquare = inversefunc(np.power, args=(2), domain=0)
>>> invsquare([4, 16, 64]) # Should give [2, 4, 8]
array([ 2., 4., 8.])
>>> inversefunc(np.log10, y_values=-2, # Should give 0.01
... domain=0, open_domain=True)
array(0.0099999999882423)
>>> inversefunc(np.cos, y_values=[1, 0, -1], # Should give [0, pi / 2, pi]
... domain=[0, np.pi])
array([ 0. , 1.57079632, 3.14159265])
>>> invtan = inversefunc(np.tan,
... domain=[-np.pi / 2, np.pi / 2],
... open_domain=True)
>>> invtan([1, 0, -1]) # Should give [pi / 4, 0, -pi / 4]
array([ 7.85398163e-01, 1.29246971e-26, -7.85398163e-01])
"""
domain, image, open_domain, args = _normparams_inversefunc(domain,
image,
open_domain,
args)
ymin, ymax = image
xmin, xmax = domain
xmin_open, xmax_open = open_domain
# Calculating if the function is increasing or decreasing, using ref points
# anywhere in the valid range (Function has to be strictly monotonic)
ref1, ref2 = _get_valid_refpoints(xmin, xmax)
trend = np.sign(func(ref2, *args) - func(ref1, *args))
if trend == 0:
raise ValueError("Function is not strictly monotonic")
# Calculating the image by default
if ymin is None:
ymin = _auto_ymin(func, args, xmin, xmax, trend)
if ymax is None:
ymax = _auto_ymax(func, args, xmin, xmax, trend)
# Creating bounded function
def bounded_f(x):
if xmin is not None and (x < xmin or (x == xmin and xmin_open)):
val = -1 * np.inf * trend
elif xmax is not None and (x > xmax or (x == xmax and xmax_open)):
val = np.inf * trend
else:
val = func(x, *args)
return val
min_kwargs = {}
min_kwargs['bracket'] = (ref1, ref2)
min_kwargs['tol'] = 1.48e-08
min_kwargs['method'] = 'Brent'
def inv(yin):
yin = np.asarray(yin, dtype=np.float64)
shapein = yin.shape
yin = yin.flatten()
if ymin is not None:
if (xmin_open and trend == 1) or (xmax_open and trend == -1):
mask = yin <= ymin
else:
mask = yin < ymin
if yin[mask].size > 0:
raise ValueError("Requested values %s lower than the"
" lower limit %g of the image" %
(yin[mask], ymin))
if ymax is not None:
if (xmax_open and trend == 1) or (xmin_open and trend == -1):
mask = yin >= ymax
else:
mask = yin > ymax
if yin[mask].size > 0:
raise ValueError("Requested values %s higher than the"
" higher limit %g of the image" %
(yin[mask], ymax))
results = yin.copy() * np.nan
resultsmask = np.zeros(yin.shape, dtype=np.bool)
for j in range(yin.size):
if xmax is not None:
if bounded_f(xmax) == yin[j]:
results[j] = xmax
resultsmask[j] = True
continue
if xmin is not None:
if bounded_f(xmin) == yin[j]:
results[j] = xmin
resultsmask[j] = True
continue
optimizer = (lambda x, j=j,
bounded_f=bounded_f: (((bounded_f(x) - yin[j]))**2))
try:
with warnings.catch_warnings(record=True):
result = minimize_scalar(optimizer, **min_kwargs)
results[j] = result.x
resultsmask[j] = result.success
except:
resultsmask[j] = False
if any(~resultsmask):
warnings.warn("Trouble calculating inverse for values: "
"%s" % str(yin[~resultsmask]), RuntimeWarning)
try:
np.testing.assert_array_almost_equal(yin, func(results, *args),
decimal=accuracy)
except AssertionError:
warnings.warn("Results obtained with less than %g "
"decimal digits of accuracy"
% accuracy, RuntimeWarning)
return results.reshape(shapein)
if y_values is None:
return inv
else:
return inv(y_values) |
def get_field_mapping(self):
"""Obtain metadata from current state of the widget.
Null or empty list will be removed.
:returns: Dictionary of values by type in this format:
{'fields': {}, 'values': {}}.
:rtype: dict
"""
field_mapping = self.field_mapping_widget.get_field_mapping()
for k, v in list(field_mapping['values'].items()):
if not v:
field_mapping['values'].pop(k)
for k, v in list(field_mapping['fields'].items()):
if not v:
field_mapping['fields'].pop(k)
return field_mapping | Obtain metadata from current state of the widget.
Null or empty list will be removed.
:returns: Dictionary of values by type in this format:
{'fields': {}, 'values': {}}.
:rtype: dict | Below is the the instruction that describes the task:
### Input:
Obtain metadata from current state of the widget.
Null or empty list will be removed.
:returns: Dictionary of values by type in this format:
{'fields': {}, 'values': {}}.
:rtype: dict
### Response:
def get_field_mapping(self):
"""Obtain metadata from current state of the widget.
Null or empty list will be removed.
:returns: Dictionary of values by type in this format:
{'fields': {}, 'values': {}}.
:rtype: dict
"""
field_mapping = self.field_mapping_widget.get_field_mapping()
for k, v in list(field_mapping['values'].items()):
if not v:
field_mapping['values'].pop(k)
for k, v in list(field_mapping['fields'].items()):
if not v:
field_mapping['fields'].pop(k)
return field_mapping |
def burn(self):
"""
Process the template with the data and
config which has been set and return the resulting SVG.
Raises ValueError when no data set has
been added to the graph object.
"""
if not self.data:
raise ValueError("No data available")
if hasattr(self, 'calculations'):
self.calculations()
self.start_svg()
self.calculate_graph_dimensions()
self.foreground = etree.Element("g")
self.draw_graph()
self.draw_titles()
self.draw_legend()
self.draw_data()
self.graph.append(self.foreground)
self.render_inline_styles()
return self.render(self.root) | Process the template with the data and
config which has been set and return the resulting SVG.
Raises ValueError when no data set has
been added to the graph object. | Below is the the instruction that describes the task:
### Input:
Process the template with the data and
config which has been set and return the resulting SVG.
Raises ValueError when no data set has
been added to the graph object.
### Response:
def burn(self):
"""
Process the template with the data and
config which has been set and return the resulting SVG.
Raises ValueError when no data set has
been added to the graph object.
"""
if not self.data:
raise ValueError("No data available")
if hasattr(self, 'calculations'):
self.calculations()
self.start_svg()
self.calculate_graph_dimensions()
self.foreground = etree.Element("g")
self.draw_graph()
self.draw_titles()
self.draw_legend()
self.draw_data()
self.graph.append(self.foreground)
self.render_inline_styles()
return self.render(self.root) |
def finalize_env(env):
"""
Produce a platform specific env for passing into subprocess.Popen
family of external process calling methods, and the supplied env
will be updated on top of it. Returns a new env.
"""
keys = _PLATFORM_ENV_KEYS.get(sys.platform, [])
if 'PATH' not in keys:
# this MUST be available due to Node.js (and others really)
# needing something to look for binary locations when it shells
# out to other binaries.
keys.append('PATH')
results = {
key: os.environ.get(key, '') for key in keys
}
results.update(env)
return results | Produce a platform specific env for passing into subprocess.Popen
family of external process calling methods, and the supplied env
will be updated on top of it. Returns a new env. | Below is the the instruction that describes the task:
### Input:
Produce a platform specific env for passing into subprocess.Popen
family of external process calling methods, and the supplied env
will be updated on top of it. Returns a new env.
### Response:
def finalize_env(env):
"""
Produce a platform specific env for passing into subprocess.Popen
family of external process calling methods, and the supplied env
will be updated on top of it. Returns a new env.
"""
keys = _PLATFORM_ENV_KEYS.get(sys.platform, [])
if 'PATH' not in keys:
# this MUST be available due to Node.js (and others really)
# needing something to look for binary locations when it shells
# out to other binaries.
keys.append('PATH')
results = {
key: os.environ.get(key, '') for key in keys
}
results.update(env)
return results |
def audio_samples(self):
"""
The audio audio_samples, that is, an array of ``float64`` values,
each representing an audio sample in ``[-1.0, 1.0]``.
Note that this function returns a view into the
first ``self.__samples_length`` elements of ``self.__samples``.
If you want to clone the values,
you must use e.g. ``numpy.array(audiofile.audio_samples)``.
:rtype: :class:`numpy.ndarray` (1D, view)
:raises: :class:`~aeneas.audiofile.AudioFileNotInitializedError`: if the audio file is not initialized yet
"""
if self.__samples is None:
if self.file_path is None:
self.log_exc(u"AudioFile object not initialized", None, True, AudioFileNotInitializedError)
else:
self.read_samples_from_file()
return self.__samples[0:self.__samples_length] | The audio audio_samples, that is, an array of ``float64`` values,
each representing an audio sample in ``[-1.0, 1.0]``.
Note that this function returns a view into the
first ``self.__samples_length`` elements of ``self.__samples``.
If you want to clone the values,
you must use e.g. ``numpy.array(audiofile.audio_samples)``.
:rtype: :class:`numpy.ndarray` (1D, view)
:raises: :class:`~aeneas.audiofile.AudioFileNotInitializedError`: if the audio file is not initialized yet | Below is the the instruction that describes the task:
### Input:
The audio audio_samples, that is, an array of ``float64`` values,
each representing an audio sample in ``[-1.0, 1.0]``.
Note that this function returns a view into the
first ``self.__samples_length`` elements of ``self.__samples``.
If you want to clone the values,
you must use e.g. ``numpy.array(audiofile.audio_samples)``.
:rtype: :class:`numpy.ndarray` (1D, view)
:raises: :class:`~aeneas.audiofile.AudioFileNotInitializedError`: if the audio file is not initialized yet
### Response:
def audio_samples(self):
"""
The audio audio_samples, that is, an array of ``float64`` values,
each representing an audio sample in ``[-1.0, 1.0]``.
Note that this function returns a view into the
first ``self.__samples_length`` elements of ``self.__samples``.
If you want to clone the values,
you must use e.g. ``numpy.array(audiofile.audio_samples)``.
:rtype: :class:`numpy.ndarray` (1D, view)
:raises: :class:`~aeneas.audiofile.AudioFileNotInitializedError`: if the audio file is not initialized yet
"""
if self.__samples is None:
if self.file_path is None:
self.log_exc(u"AudioFile object not initialized", None, True, AudioFileNotInitializedError)
else:
self.read_samples_from_file()
return self.__samples[0:self.__samples_length] |
def _appendComponent(self, baseGlyph, transformation=None, identifier=None, **kwargs):
"""
baseGlyph will be a valid glyph name.
The baseGlyph may or may not be in the layer.
offset will be a valid offset (x, y).
scale will be a valid scale (x, y).
identifier will be a valid, nonconflicting identifier.
This must return the new component.
Subclasses may override this method.
"""
pointPen = self.getPointPen()
pointPen.addComponent(baseGlyph, transformation=transformation, identifier=identifier)
return self.components[-1] | baseGlyph will be a valid glyph name.
The baseGlyph may or may not be in the layer.
offset will be a valid offset (x, y).
scale will be a valid scale (x, y).
identifier will be a valid, nonconflicting identifier.
This must return the new component.
Subclasses may override this method. | Below is the the instruction that describes the task:
### Input:
baseGlyph will be a valid glyph name.
The baseGlyph may or may not be in the layer.
offset will be a valid offset (x, y).
scale will be a valid scale (x, y).
identifier will be a valid, nonconflicting identifier.
This must return the new component.
Subclasses may override this method.
### Response:
def _appendComponent(self, baseGlyph, transformation=None, identifier=None, **kwargs):
"""
baseGlyph will be a valid glyph name.
The baseGlyph may or may not be in the layer.
offset will be a valid offset (x, y).
scale will be a valid scale (x, y).
identifier will be a valid, nonconflicting identifier.
This must return the new component.
Subclasses may override this method.
"""
pointPen = self.getPointPen()
pointPen.addComponent(baseGlyph, transformation=transformation, identifier=identifier)
return self.components[-1] |
def add_policy_statements(self, statements):
"""Adds statements to the policy.
Args:
statements (:class:`awacs.aws.Statement` or list): Either a single
Statment, or a list of statements.
"""
if isinstance(statements, Statement):
statements = [statements]
self._policy_statements.extend(statements) | Adds statements to the policy.
Args:
statements (:class:`awacs.aws.Statement` or list): Either a single
Statment, or a list of statements. | Below is the the instruction that describes the task:
### Input:
Adds statements to the policy.
Args:
statements (:class:`awacs.aws.Statement` or list): Either a single
Statment, or a list of statements.
### Response:
def add_policy_statements(self, statements):
"""Adds statements to the policy.
Args:
statements (:class:`awacs.aws.Statement` or list): Either a single
Statment, or a list of statements.
"""
if isinstance(statements, Statement):
statements = [statements]
self._policy_statements.extend(statements) |
def make_embedded_push(value):
"""Returns a closure that pushed the given value onto a Machine's stack.
We use this to embed stack pushes in the VM code, so that the interpreter
can assume that all instructions are callable Python functions. This makes
dispatching much faster than checking if an instruction is a constant
(number, string, etc) or a Python function.
"""
push = lambda vm: vm.push(value)
push.tag = EMBEDDED_PUSH_TAG
return push | Returns a closure that pushed the given value onto a Machine's stack.
We use this to embed stack pushes in the VM code, so that the interpreter
can assume that all instructions are callable Python functions. This makes
dispatching much faster than checking if an instruction is a constant
(number, string, etc) or a Python function. | Below is the the instruction that describes the task:
### Input:
Returns a closure that pushed the given value onto a Machine's stack.
We use this to embed stack pushes in the VM code, so that the interpreter
can assume that all instructions are callable Python functions. This makes
dispatching much faster than checking if an instruction is a constant
(number, string, etc) or a Python function.
### Response:
def make_embedded_push(value):
"""Returns a closure that pushed the given value onto a Machine's stack.
We use this to embed stack pushes in the VM code, so that the interpreter
can assume that all instructions are callable Python functions. This makes
dispatching much faster than checking if an instruction is a constant
(number, string, etc) or a Python function.
"""
push = lambda vm: vm.push(value)
push.tag = EMBEDDED_PUSH_TAG
return push |
def update(name=None,
pkgs=None,
refresh=True,
skip_verify=False,
normalize=True,
minimal=False,
obsoletes=False,
**kwargs):
'''
.. versionadded:: 2019.2.0
Calls :py:func:`pkg.upgrade <salt.modules.yumpkg.upgrade>` with
``obsoletes=False``. Mirrors the CLI behavior of ``yum update``.
See :py:func:`pkg.upgrade <salt.modules.yumpkg.upgrade>` for
further documentation.
.. code-block:: bash
salt '*' pkg.update
'''
return upgrade(name, pkgs, refresh, skip_verify, normalize, minimal, obsoletes, **kwargs) | .. versionadded:: 2019.2.0
Calls :py:func:`pkg.upgrade <salt.modules.yumpkg.upgrade>` with
``obsoletes=False``. Mirrors the CLI behavior of ``yum update``.
See :py:func:`pkg.upgrade <salt.modules.yumpkg.upgrade>` for
further documentation.
.. code-block:: bash
salt '*' pkg.update | Below is the the instruction that describes the task:
### Input:
.. versionadded:: 2019.2.0
Calls :py:func:`pkg.upgrade <salt.modules.yumpkg.upgrade>` with
``obsoletes=False``. Mirrors the CLI behavior of ``yum update``.
See :py:func:`pkg.upgrade <salt.modules.yumpkg.upgrade>` for
further documentation.
.. code-block:: bash
salt '*' pkg.update
### Response:
def update(name=None,
pkgs=None,
refresh=True,
skip_verify=False,
normalize=True,
minimal=False,
obsoletes=False,
**kwargs):
'''
.. versionadded:: 2019.2.0
Calls :py:func:`pkg.upgrade <salt.modules.yumpkg.upgrade>` with
``obsoletes=False``. Mirrors the CLI behavior of ``yum update``.
See :py:func:`pkg.upgrade <salt.modules.yumpkg.upgrade>` for
further documentation.
.. code-block:: bash
salt '*' pkg.update
'''
return upgrade(name, pkgs, refresh, skip_verify, normalize, minimal, obsoletes, **kwargs) |
def updateResults(self, forward=None):
"""
Reload search results or load another "page".
Parameters
----------
forward : bool or None, optional
Whether move forwards or backwards (``True`` or ``False``). If ``None``, then first page is loaded.
"""
# this choses data we need.
files = lambda x: {
i['snippet']['title'].replace('/', '\\'): YTStor(
{'yid': i['id']['videoId'], 'pub_date': i['snippet']['publishedAt']},
opts=self.yts_opts) for i in x['items']
}
descs = lambda x: {
(i['snippet']['title'].replace('/', '\\') + '.txt'): YTMetaStor(
{
'title': i['snippet']['title'],
'yid': i['id']['videoId'],
'desc': i['snippet']['description'],
'channel': i['snippet']['channelTitle'],
'pub_date': i['snippet']['publishedAt']
},
opts=dict()
) for i in x['items']
}
thumbs = lambda x: {
(i['snippet']['title'].replace('/', '\\') + '.jpg'): YTMetaStor(
{'url': i['snippet']['thumbnails']['high']['url'], 'pub_date': i['snippet']['publishedAt']}, opts=dict()
) for i in x['items']
}
try:
if self.adj_tokens[forward] is None: # in case someone would somehow cross boundary.
forward = None
except KeyError:
pass
recv = None
try:
try:
data = self.avail_files[ self.adj_tokens[forward] ] # maybe data is already available locally.
except KeyError:
recv = self.__search( self.adj_tokens[forward] ) # nope, we have to search.
except KeyError: # wrong index in adj_tokens
if forward is None:
recv = self.__search()
else:
raise ValueError("Valid values for forward are True, False or None (default).")
if recv is not None:
_d = files(recv)
if self.preferences['metadata']['desc']: _d.update(descs(recv))
if self.preferences['metadata']['thumb']: _d.update(thumbs(recv))
data = (None, _d) # little format unification.
if len(self.avail_files) > 4:
pop = self.avail_files.popitem(False) # get rid of the oldest data.
for s in pop[1][1].values(): s.clean()
adj_t = deepcopy(self.adj_tokens) # this will we write to avail_files, now we update self.adj_tokens.
if data[0] is None: # get tokens from obtained results.
try:
self.adj_tokens[False] = recv['prevPageToken']
except KeyError:
self.adj_tokens[False] = None
try:
self.adj_tokens[True] = recv['nextPageToken']
except KeyError:
self.adj_tokens[True] = None
else: # already in avail_files.
self.adj_tokens = data[0]
if forward is not None:
# backup last results in avail_files:
self.avail_files[ self.adj_tokens[not forward] ] = (adj_t, self.visible_files)
self.visible_files = data[1] | Reload search results or load another "page".
Parameters
----------
forward : bool or None, optional
Whether move forwards or backwards (``True`` or ``False``). If ``None``, then first page is loaded. | Below is the the instruction that describes the task:
### Input:
Reload search results or load another "page".
Parameters
----------
forward : bool or None, optional
Whether move forwards or backwards (``True`` or ``False``). If ``None``, then first page is loaded.
### Response:
def updateResults(self, forward=None):
"""
Reload search results or load another "page".
Parameters
----------
forward : bool or None, optional
Whether move forwards or backwards (``True`` or ``False``). If ``None``, then first page is loaded.
"""
# this choses data we need.
files = lambda x: {
i['snippet']['title'].replace('/', '\\'): YTStor(
{'yid': i['id']['videoId'], 'pub_date': i['snippet']['publishedAt']},
opts=self.yts_opts) for i in x['items']
}
descs = lambda x: {
(i['snippet']['title'].replace('/', '\\') + '.txt'): YTMetaStor(
{
'title': i['snippet']['title'],
'yid': i['id']['videoId'],
'desc': i['snippet']['description'],
'channel': i['snippet']['channelTitle'],
'pub_date': i['snippet']['publishedAt']
},
opts=dict()
) for i in x['items']
}
thumbs = lambda x: {
(i['snippet']['title'].replace('/', '\\') + '.jpg'): YTMetaStor(
{'url': i['snippet']['thumbnails']['high']['url'], 'pub_date': i['snippet']['publishedAt']}, opts=dict()
) for i in x['items']
}
try:
if self.adj_tokens[forward] is None: # in case someone would somehow cross boundary.
forward = None
except KeyError:
pass
recv = None
try:
try:
data = self.avail_files[ self.adj_tokens[forward] ] # maybe data is already available locally.
except KeyError:
recv = self.__search( self.adj_tokens[forward] ) # nope, we have to search.
except KeyError: # wrong index in adj_tokens
if forward is None:
recv = self.__search()
else:
raise ValueError("Valid values for forward are True, False or None (default).")
if recv is not None:
_d = files(recv)
if self.preferences['metadata']['desc']: _d.update(descs(recv))
if self.preferences['metadata']['thumb']: _d.update(thumbs(recv))
data = (None, _d) # little format unification.
if len(self.avail_files) > 4:
pop = self.avail_files.popitem(False) # get rid of the oldest data.
for s in pop[1][1].values(): s.clean()
adj_t = deepcopy(self.adj_tokens) # this will we write to avail_files, now we update self.adj_tokens.
if data[0] is None: # get tokens from obtained results.
try:
self.adj_tokens[False] = recv['prevPageToken']
except KeyError:
self.adj_tokens[False] = None
try:
self.adj_tokens[True] = recv['nextPageToken']
except KeyError:
self.adj_tokens[True] = None
else: # already in avail_files.
self.adj_tokens = data[0]
if forward is not None:
# backup last results in avail_files:
self.avail_files[ self.adj_tokens[not forward] ] = (adj_t, self.visible_files)
self.visible_files = data[1] |
def get_go_server(settings=None):
"""Returns a `gocd.Server` configured by the `settings`
object.
Args:
settings: a `gocd_cli.settings.Settings` object.
Default: if falsey calls `get_settings`.
Returns:
gocd.Server: a configured gocd.Server instance
"""
if not settings:
settings = get_settings()
return gocd.Server(
settings.get('server'),
user=settings.get('user'),
password=settings.get('password'),
) | Returns a `gocd.Server` configured by the `settings`
object.
Args:
settings: a `gocd_cli.settings.Settings` object.
Default: if falsey calls `get_settings`.
Returns:
gocd.Server: a configured gocd.Server instance | Below is the the instruction that describes the task:
### Input:
Returns a `gocd.Server` configured by the `settings`
object.
Args:
settings: a `gocd_cli.settings.Settings` object.
Default: if falsey calls `get_settings`.
Returns:
gocd.Server: a configured gocd.Server instance
### Response:
def get_go_server(settings=None):
"""Returns a `gocd.Server` configured by the `settings`
object.
Args:
settings: a `gocd_cli.settings.Settings` object.
Default: if falsey calls `get_settings`.
Returns:
gocd.Server: a configured gocd.Server instance
"""
if not settings:
settings = get_settings()
return gocd.Server(
settings.get('server'),
user=settings.get('user'),
password=settings.get('password'),
) |
def demo(delay=1, interactive=True):
"""
Play a demo script showing most of the oct2py api features.
Parameters
==========
delay : float
Time between each command in seconds.
"""
script = """
#########################
# Oct2Py demo
#########################
import numpy as np
from oct2py import Oct2Py
oc = Oct2Py()
# basic commands
print(oc.abs(-1))
print(oc.upper('xyz'))
# plotting
oc.plot([1,2,3],'-o', 'linewidth', 2)
raw_input('Press Enter to continue...')
oc.close()
xx = np.arange(-2*np.pi, 2*np.pi, 0.2)
oc.surf(np.subtract.outer(np.sin(xx), np.cos(xx)))
raw_input('Press Enter to continue...')
oc.close()
# getting help
help(oc.svd)
# single vs. multiple return values
print(oc.svd(np.array([[1,2], [1,3]])))
U, S, V = oc.svd([[1,2], [1,3]], nout=3)
print(U, S, V)
# low level constructs
oc.eval("y=ones(3,3)")
print(oc.pull("y"))
oc.eval("x=zeros(3,3)", verbose=True)
t = oc.eval('rand(1, 2)', verbose=True)
y = np.zeros((3,3))
oc.push('y', y)
print(oc.pull('y'))
from oct2py import Struct
y = Struct()
y.b = 'spam'
y.c.d = 'eggs'
print(y.c['d'])
print(y)
#########################
# Demo Complete!
#########################
"""
if not PY2:
script = script.replace('raw_input', 'input')
for line in script.strip().split('\n'):
line = line.strip()
if not 'input(' in line:
time.sleep(delay)
print(">>> {0}".format(line))
time.sleep(delay)
if not interactive:
if 'plot' in line or 'surf' in line or 'input(' in line:
line = 'print()'
exec(line) | Play a demo script showing most of the oct2py api features.
Parameters
==========
delay : float
Time between each command in seconds. | Below is the the instruction that describes the task:
### Input:
Play a demo script showing most of the oct2py api features.
Parameters
==========
delay : float
Time between each command in seconds.
### Response:
def demo(delay=1, interactive=True):
"""
Play a demo script showing most of the oct2py api features.
Parameters
==========
delay : float
Time between each command in seconds.
"""
script = """
#########################
# Oct2Py demo
#########################
import numpy as np
from oct2py import Oct2Py
oc = Oct2Py()
# basic commands
print(oc.abs(-1))
print(oc.upper('xyz'))
# plotting
oc.plot([1,2,3],'-o', 'linewidth', 2)
raw_input('Press Enter to continue...')
oc.close()
xx = np.arange(-2*np.pi, 2*np.pi, 0.2)
oc.surf(np.subtract.outer(np.sin(xx), np.cos(xx)))
raw_input('Press Enter to continue...')
oc.close()
# getting help
help(oc.svd)
# single vs. multiple return values
print(oc.svd(np.array([[1,2], [1,3]])))
U, S, V = oc.svd([[1,2], [1,3]], nout=3)
print(U, S, V)
# low level constructs
oc.eval("y=ones(3,3)")
print(oc.pull("y"))
oc.eval("x=zeros(3,3)", verbose=True)
t = oc.eval('rand(1, 2)', verbose=True)
y = np.zeros((3,3))
oc.push('y', y)
print(oc.pull('y'))
from oct2py import Struct
y = Struct()
y.b = 'spam'
y.c.d = 'eggs'
print(y.c['d'])
print(y)
#########################
# Demo Complete!
#########################
"""
if not PY2:
script = script.replace('raw_input', 'input')
for line in script.strip().split('\n'):
line = line.strip()
if not 'input(' in line:
time.sleep(delay)
print(">>> {0}".format(line))
time.sleep(delay)
if not interactive:
if 'plot' in line or 'surf' in line or 'input(' in line:
line = 'print()'
exec(line) |
def dependencies(self, deps_dict):
"""Generate graph file with depenndencies map tree
"""
try:
import pygraphviz as pgv
except ImportError:
graph_easy, comma = "", ""
if (self.image == "ascii" and
not os.path.isfile("/usr/bin/graph-easy")):
comma = ","
graph_easy = " graph-easy"
print("Require 'pygraphviz{0}{1}': Install with 'slpkg -s sbo "
"pygraphviz{1}'".format(comma, graph_easy))
raise SystemExit()
if self.image != "ascii":
self.check_file()
try:
G = pgv.AGraph(deps_dict)
G.layout(prog="fdp")
if self.image == "ascii":
G.write("{0}.dot".format(self.image))
self.graph_easy()
G.draw(self.image)
except IOError:
raise SystemExit()
if os.path.isfile(self.image):
print("Graph image file '{0}' created".format(self.image))
raise SystemExit() | Generate graph file with depenndencies map tree | Below is the the instruction that describes the task:
### Input:
Generate graph file with depenndencies map tree
### Response:
def dependencies(self, deps_dict):
"""Generate graph file with depenndencies map tree
"""
try:
import pygraphviz as pgv
except ImportError:
graph_easy, comma = "", ""
if (self.image == "ascii" and
not os.path.isfile("/usr/bin/graph-easy")):
comma = ","
graph_easy = " graph-easy"
print("Require 'pygraphviz{0}{1}': Install with 'slpkg -s sbo "
"pygraphviz{1}'".format(comma, graph_easy))
raise SystemExit()
if self.image != "ascii":
self.check_file()
try:
G = pgv.AGraph(deps_dict)
G.layout(prog="fdp")
if self.image == "ascii":
G.write("{0}.dot".format(self.image))
self.graph_easy()
G.draw(self.image)
except IOError:
raise SystemExit()
if os.path.isfile(self.image):
print("Graph image file '{0}' created".format(self.image))
raise SystemExit() |
def reset(self):
"""
Empties all internal storage containers
"""
super(MorseComplex, self).reset()
self.base_partitions = {}
self.merge_sequence = {}
self.persistences = []
self.max_indices = []
# State properties
self.persistence = 0. | Empties all internal storage containers | Below is the the instruction that describes the task:
### Input:
Empties all internal storage containers
### Response:
def reset(self):
"""
Empties all internal storage containers
"""
super(MorseComplex, self).reset()
self.base_partitions = {}
self.merge_sequence = {}
self.persistences = []
self.max_indices = []
# State properties
self.persistence = 0. |
def get_instance(self, payload):
"""
Build an instance of AuthTypesInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.api.v2010.account.sip.domain.auth_types.AuthTypesInstance
:rtype: twilio.rest.api.v2010.account.sip.domain.auth_types.AuthTypesInstance
"""
return AuthTypesInstance(
self._version,
payload,
account_sid=self._solution['account_sid'],
domain_sid=self._solution['domain_sid'],
) | Build an instance of AuthTypesInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.api.v2010.account.sip.domain.auth_types.AuthTypesInstance
:rtype: twilio.rest.api.v2010.account.sip.domain.auth_types.AuthTypesInstance | Below is the the instruction that describes the task:
### Input:
Build an instance of AuthTypesInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.api.v2010.account.sip.domain.auth_types.AuthTypesInstance
:rtype: twilio.rest.api.v2010.account.sip.domain.auth_types.AuthTypesInstance
### Response:
def get_instance(self, payload):
"""
Build an instance of AuthTypesInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.api.v2010.account.sip.domain.auth_types.AuthTypesInstance
:rtype: twilio.rest.api.v2010.account.sip.domain.auth_types.AuthTypesInstance
"""
return AuthTypesInstance(
self._version,
payload,
account_sid=self._solution['account_sid'],
domain_sid=self._solution['domain_sid'],
) |
def boundary_maximum_power(graph, xxx_todo_changeme7):
"""
Boundary term processing adjacent voxels maximum value using a power relationship.
An implementation of a boundary term, suitable to be used with the
`~medpy.graphcut.generate.graph_from_voxels` function.
The same as `boundary_difference_power`, but working on the gradient image instead
of the original. See there for details.
Parameters
----------
graph : GCGraph
The graph to add the weights to.
gradient_image : ndarray
The gradient image.
sigma : float
The sigma parameter to use in the boundary term.
spacing : sequence of float or False
A sequence containing the slice spacing used for weighting the
computed neighbourhood weight value for different dimensions. If
`False`, no distance based weighting of the graph edges is performed.
Notes
-----
This function requires the gradient image to be passed along. That means that
`~medpy.graphcut.generate.graph_from_voxels` has to be called with ``boundary_term_args`` set to the
gradient image.
"""
(gradient_image, sigma, spacing) = xxx_todo_changeme7
gradient_image = scipy.asarray(gradient_image)
def boundary_term_power(intensities):
"""
Implementation of a power boundary term computation over an array.
"""
# apply (1 / (1 + x))^sigma
intensities = 1. / (intensities + 1)
intensities = scipy.power(intensities, sigma)
intensities[intensities <= 0] = sys.float_info.min
return intensities
__skeleton_maximum(graph, gradient_image, boundary_term_power, spacing) | Boundary term processing adjacent voxels maximum value using a power relationship.
An implementation of a boundary term, suitable to be used with the
`~medpy.graphcut.generate.graph_from_voxels` function.
The same as `boundary_difference_power`, but working on the gradient image instead
of the original. See there for details.
Parameters
----------
graph : GCGraph
The graph to add the weights to.
gradient_image : ndarray
The gradient image.
sigma : float
The sigma parameter to use in the boundary term.
spacing : sequence of float or False
A sequence containing the slice spacing used for weighting the
computed neighbourhood weight value for different dimensions. If
`False`, no distance based weighting of the graph edges is performed.
Notes
-----
This function requires the gradient image to be passed along. That means that
`~medpy.graphcut.generate.graph_from_voxels` has to be called with ``boundary_term_args`` set to the
gradient image. | Below is the the instruction that describes the task:
### Input:
Boundary term processing adjacent voxels maximum value using a power relationship.
An implementation of a boundary term, suitable to be used with the
`~medpy.graphcut.generate.graph_from_voxels` function.
The same as `boundary_difference_power`, but working on the gradient image instead
of the original. See there for details.
Parameters
----------
graph : GCGraph
The graph to add the weights to.
gradient_image : ndarray
The gradient image.
sigma : float
The sigma parameter to use in the boundary term.
spacing : sequence of float or False
A sequence containing the slice spacing used for weighting the
computed neighbourhood weight value for different dimensions. If
`False`, no distance based weighting of the graph edges is performed.
Notes
-----
This function requires the gradient image to be passed along. That means that
`~medpy.graphcut.generate.graph_from_voxels` has to be called with ``boundary_term_args`` set to the
gradient image.
### Response:
def boundary_maximum_power(graph, xxx_todo_changeme7):
"""
Boundary term processing adjacent voxels maximum value using a power relationship.
An implementation of a boundary term, suitable to be used with the
`~medpy.graphcut.generate.graph_from_voxels` function.
The same as `boundary_difference_power`, but working on the gradient image instead
of the original. See there for details.
Parameters
----------
graph : GCGraph
The graph to add the weights to.
gradient_image : ndarray
The gradient image.
sigma : float
The sigma parameter to use in the boundary term.
spacing : sequence of float or False
A sequence containing the slice spacing used for weighting the
computed neighbourhood weight value for different dimensions. If
`False`, no distance based weighting of the graph edges is performed.
Notes
-----
This function requires the gradient image to be passed along. That means that
`~medpy.graphcut.generate.graph_from_voxels` has to be called with ``boundary_term_args`` set to the
gradient image.
"""
(gradient_image, sigma, spacing) = xxx_todo_changeme7
gradient_image = scipy.asarray(gradient_image)
def boundary_term_power(intensities):
"""
Implementation of a power boundary term computation over an array.
"""
# apply (1 / (1 + x))^sigma
intensities = 1. / (intensities + 1)
intensities = scipy.power(intensities, sigma)
intensities[intensities <= 0] = sys.float_info.min
return intensities
__skeleton_maximum(graph, gradient_image, boundary_term_power, spacing) |
def _load_w2v(model_file=_f_model, binary=True):
'''
load word2vec model
'''
if not os.path.exists(model_file):
print("os.path : ", os.path)
raise Exception("Model file [%s] does not exist." % model_file)
return KeyedVectors.load_word2vec_format(
model_file, binary=binary, unicode_errors='ignore') | load word2vec model | Below is the the instruction that describes the task:
### Input:
load word2vec model
### Response:
def _load_w2v(model_file=_f_model, binary=True):
'''
load word2vec model
'''
if not os.path.exists(model_file):
print("os.path : ", os.path)
raise Exception("Model file [%s] does not exist." % model_file)
return KeyedVectors.load_word2vec_format(
model_file, binary=binary, unicode_errors='ignore') |
def set_threshold_overrides(self, override_dict):
"""
Set manual overrides on the threshold (used for determining
warning/critical status) a dict of limits. See
:py:class:`~.AwsLimitChecker` for information on Warning and
Critical thresholds.
Dict is composed of service name keys (string) to dict of
limit names (string), to dict of threshold specifications.
Each threhold specification dict can contain keys 'warning'
or 'critical', each having a value of a dict containing
keys 'percent' or 'count', to an integer value.
Example:
::
{
'EC2': {
'SomeLimit': {
'warning': {
'percent': 80,
'count': 8,
},
'critical': {
'percent': 90,
'count': 9,
}
}
}
}
See :py:meth:`.AwsLimit.set_threshold_override`.
:param override_dict: nested dict of threshold overrides
:type override_dict: dict
"""
for svc_name in sorted(override_dict):
for lim_name in sorted(override_dict[svc_name]):
d = override_dict[svc_name][lim_name]
kwargs = {}
if 'warning' in d:
if 'percent' in d['warning']:
kwargs['warn_percent'] = d['warning']['percent']
if 'count' in d['warning']:
kwargs['warn_count'] = d['warning']['count']
if 'critical' in d:
if 'percent' in d['critical']:
kwargs['crit_percent'] = d['critical']['percent']
if 'count' in d['critical']:
kwargs['crit_count'] = d['critical']['count']
self.services[svc_name].set_threshold_override(
lim_name,
**kwargs
) | Set manual overrides on the threshold (used for determining
warning/critical status) a dict of limits. See
:py:class:`~.AwsLimitChecker` for information on Warning and
Critical thresholds.
Dict is composed of service name keys (string) to dict of
limit names (string), to dict of threshold specifications.
Each threhold specification dict can contain keys 'warning'
or 'critical', each having a value of a dict containing
keys 'percent' or 'count', to an integer value.
Example:
::
{
'EC2': {
'SomeLimit': {
'warning': {
'percent': 80,
'count': 8,
},
'critical': {
'percent': 90,
'count': 9,
}
}
}
}
See :py:meth:`.AwsLimit.set_threshold_override`.
:param override_dict: nested dict of threshold overrides
:type override_dict: dict | Below is the the instruction that describes the task:
### Input:
Set manual overrides on the threshold (used for determining
warning/critical status) a dict of limits. See
:py:class:`~.AwsLimitChecker` for information on Warning and
Critical thresholds.
Dict is composed of service name keys (string) to dict of
limit names (string), to dict of threshold specifications.
Each threhold specification dict can contain keys 'warning'
or 'critical', each having a value of a dict containing
keys 'percent' or 'count', to an integer value.
Example:
::
{
'EC2': {
'SomeLimit': {
'warning': {
'percent': 80,
'count': 8,
},
'critical': {
'percent': 90,
'count': 9,
}
}
}
}
See :py:meth:`.AwsLimit.set_threshold_override`.
:param override_dict: nested dict of threshold overrides
:type override_dict: dict
### Response:
def set_threshold_overrides(self, override_dict):
"""
Set manual overrides on the threshold (used for determining
warning/critical status) a dict of limits. See
:py:class:`~.AwsLimitChecker` for information on Warning and
Critical thresholds.
Dict is composed of service name keys (string) to dict of
limit names (string), to dict of threshold specifications.
Each threhold specification dict can contain keys 'warning'
or 'critical', each having a value of a dict containing
keys 'percent' or 'count', to an integer value.
Example:
::
{
'EC2': {
'SomeLimit': {
'warning': {
'percent': 80,
'count': 8,
},
'critical': {
'percent': 90,
'count': 9,
}
}
}
}
See :py:meth:`.AwsLimit.set_threshold_override`.
:param override_dict: nested dict of threshold overrides
:type override_dict: dict
"""
for svc_name in sorted(override_dict):
for lim_name in sorted(override_dict[svc_name]):
d = override_dict[svc_name][lim_name]
kwargs = {}
if 'warning' in d:
if 'percent' in d['warning']:
kwargs['warn_percent'] = d['warning']['percent']
if 'count' in d['warning']:
kwargs['warn_count'] = d['warning']['count']
if 'critical' in d:
if 'percent' in d['critical']:
kwargs['crit_percent'] = d['critical']['percent']
if 'count' in d['critical']:
kwargs['crit_count'] = d['critical']['count']
self.services[svc_name].set_threshold_override(
lim_name,
**kwargs
) |
def commit_account_debit(self, opcode, account_payment_info, current_block_number, current_vtxindex, current_txid):
"""
Given the account info set by a state-create or state-transition or a token-operation,
debit the relevant account.
Do not call this directly.
Return True on success
Abort on error
"""
account_address = account_payment_info['address']
account_payment = account_payment_info['amount']
account_token_type = account_payment_info['type']
if account_address is not None and account_payment is not None and account_token_type is not None:
# sanity check
try:
assert account_payment >= 0, 'Negative account payment {}'.format(account_payment)
assert self.is_token_type_supported(account_token_type), 'Unsupported token type {}'.format(account_token_type)
except Exception as e:
log.exception(e)
log.fatal("Sanity check failed")
os.abort()
# have to debit this account
cur = self.db.cursor()
rc = namedb_account_debit(cur, account_address, account_token_type, account_payment, current_block_number, current_vtxindex, current_txid)
if not rc:
traceback.print_stack()
log.fatal("Failed to debit address {} {} {}".format(account_address, account_payment, account_token_type))
os.abort()
log.debug("COMMIT DEBIT ACCOUNT {} for {} units of {}(s) for {}".format(account_address, account_payment, account_token_type, opcode))
return True | Given the account info set by a state-create or state-transition or a token-operation,
debit the relevant account.
Do not call this directly.
Return True on success
Abort on error | Below is the the instruction that describes the task:
### Input:
Given the account info set by a state-create or state-transition or a token-operation,
debit the relevant account.
Do not call this directly.
Return True on success
Abort on error
### Response:
def commit_account_debit(self, opcode, account_payment_info, current_block_number, current_vtxindex, current_txid):
"""
Given the account info set by a state-create or state-transition or a token-operation,
debit the relevant account.
Do not call this directly.
Return True on success
Abort on error
"""
account_address = account_payment_info['address']
account_payment = account_payment_info['amount']
account_token_type = account_payment_info['type']
if account_address is not None and account_payment is not None and account_token_type is not None:
# sanity check
try:
assert account_payment >= 0, 'Negative account payment {}'.format(account_payment)
assert self.is_token_type_supported(account_token_type), 'Unsupported token type {}'.format(account_token_type)
except Exception as e:
log.exception(e)
log.fatal("Sanity check failed")
os.abort()
# have to debit this account
cur = self.db.cursor()
rc = namedb_account_debit(cur, account_address, account_token_type, account_payment, current_block_number, current_vtxindex, current_txid)
if not rc:
traceback.print_stack()
log.fatal("Failed to debit address {} {} {}".format(account_address, account_payment, account_token_type))
os.abort()
log.debug("COMMIT DEBIT ACCOUNT {} for {} units of {}(s) for {}".format(account_address, account_payment, account_token_type, opcode))
return True |
def __cache_covered_data(self):
"""!
@brief Cache covered data.
"""
self.__cache_points = True
self.__points = []
for index_point in range(len(self.__data)):
if self.__data[index_point] in self.__spatial_block:
self.__cache_point(index_point) | !
@brief Cache covered data. | Below is the the instruction that describes the task:
### Input:
!
@brief Cache covered data.
### Response:
def __cache_covered_data(self):
"""!
@brief Cache covered data.
"""
self.__cache_points = True
self.__points = []
for index_point in range(len(self.__data)):
if self.__data[index_point] in self.__spatial_block:
self.__cache_point(index_point) |
def _update_pypi_version(self):
"""Get the latest PyPI version (as a string) via the RESTful JSON API"""
logger.debug("Get latest Glances version from the PyPI RESTful API ({})".format(PYPI_API_URL))
# Update the current time
self.data[u'refresh_date'] = datetime.now()
try:
res = urlopen(PYPI_API_URL, timeout=3).read()
except (HTTPError, URLError, CertificateError) as e:
logger.debug("Cannot get Glances version from the PyPI RESTful API ({})".format(e))
else:
self.data[u'latest_version'] = json.loads(nativestr(res))['info']['version']
logger.debug("Save Glances version to the cache file")
# Save result to the cache file
# Note: also saved if the Glances PyPI version cannot be grabbed
self._save_cache()
return self.data | Get the latest PyPI version (as a string) via the RESTful JSON API | Below is the the instruction that describes the task:
### Input:
Get the latest PyPI version (as a string) via the RESTful JSON API
### Response:
def _update_pypi_version(self):
"""Get the latest PyPI version (as a string) via the RESTful JSON API"""
logger.debug("Get latest Glances version from the PyPI RESTful API ({})".format(PYPI_API_URL))
# Update the current time
self.data[u'refresh_date'] = datetime.now()
try:
res = urlopen(PYPI_API_URL, timeout=3).read()
except (HTTPError, URLError, CertificateError) as e:
logger.debug("Cannot get Glances version from the PyPI RESTful API ({})".format(e))
else:
self.data[u'latest_version'] = json.loads(nativestr(res))['info']['version']
logger.debug("Save Glances version to the cache file")
# Save result to the cache file
# Note: also saved if the Glances PyPI version cannot be grabbed
self._save_cache()
return self.data |
def features_extraction(windowed_signal, functions):
"""
-----
Brief
-----
Function to extract features given in functions, that returns an array with the set of features
for each time window.
-----------
Description
-----------
Machine learning pipelines usually use features to represent each sample of the input signals.
Those features should be relevant in the sense that they should be useful to distinguish between the classes
of each specific problem, and not redundant, as the usage of redundant features usually improves the complexity
of the problem without improving its results.
This function allows to extract features from the various windows provided in windowed_signal.
----------
Parameters
----------
windowed_signal: list or numpy.array
Input windowed signal (if your signal is not windowed, you can use the function windowing of this module).
functions: list
List of functions that will be applied to each window. For example: [numpy.mean, numpy.std]
Returns
-------
Array of features with shape (n_windows, n_features).
"""
features = np.zeros(shape=(windowed_signal.shape[0], len(functions)))
for i, window in enumerate(windowed_signal):
fea = []
for f in functions:
fea.append(f(window))
features[i] = fea
return features | -----
Brief
-----
Function to extract features given in functions, that returns an array with the set of features
for each time window.
-----------
Description
-----------
Machine learning pipelines usually use features to represent each sample of the input signals.
Those features should be relevant in the sense that they should be useful to distinguish between the classes
of each specific problem, and not redundant, as the usage of redundant features usually improves the complexity
of the problem without improving its results.
This function allows to extract features from the various windows provided in windowed_signal.
----------
Parameters
----------
windowed_signal: list or numpy.array
Input windowed signal (if your signal is not windowed, you can use the function windowing of this module).
functions: list
List of functions that will be applied to each window. For example: [numpy.mean, numpy.std]
Returns
-------
Array of features with shape (n_windows, n_features). | Below is the the instruction that describes the task:
### Input:
-----
Brief
-----
Function to extract features given in functions, that returns an array with the set of features
for each time window.
-----------
Description
-----------
Machine learning pipelines usually use features to represent each sample of the input signals.
Those features should be relevant in the sense that they should be useful to distinguish between the classes
of each specific problem, and not redundant, as the usage of redundant features usually improves the complexity
of the problem without improving its results.
This function allows to extract features from the various windows provided in windowed_signal.
----------
Parameters
----------
windowed_signal: list or numpy.array
Input windowed signal (if your signal is not windowed, you can use the function windowing of this module).
functions: list
List of functions that will be applied to each window. For example: [numpy.mean, numpy.std]
Returns
-------
Array of features with shape (n_windows, n_features).
### Response:
def features_extraction(windowed_signal, functions):
"""
-----
Brief
-----
Function to extract features given in functions, that returns an array with the set of features
for each time window.
-----------
Description
-----------
Machine learning pipelines usually use features to represent each sample of the input signals.
Those features should be relevant in the sense that they should be useful to distinguish between the classes
of each specific problem, and not redundant, as the usage of redundant features usually improves the complexity
of the problem without improving its results.
This function allows to extract features from the various windows provided in windowed_signal.
----------
Parameters
----------
windowed_signal: list or numpy.array
Input windowed signal (if your signal is not windowed, you can use the function windowing of this module).
functions: list
List of functions that will be applied to each window. For example: [numpy.mean, numpy.std]
Returns
-------
Array of features with shape (n_windows, n_features).
"""
features = np.zeros(shape=(windowed_signal.shape[0], len(functions)))
for i, window in enumerate(windowed_signal):
fea = []
for f in functions:
fea.append(f(window))
features[i] = fea
return features |
def import_image_tags(self, name, stream_import, tags, repository, insecure):
"""
Import image tags from a Docker registry into an ImageStream
:return: bool, whether tags were imported
"""
# Get the JSON for the ImageStream
imagestream_json = self.get_image_stream(name).json()
logger.debug("imagestream: %r", imagestream_json)
changed = False
# existence of dockerImageRepository is limiting how many tags are updated
if 'dockerImageRepository' in imagestream_json.get('spec', {}):
logger.debug("Removing 'dockerImageRepository' from ImageStream %s", name)
imagestream_json['spec'].pop('dockerImageRepository')
changed = True
all_annotations = imagestream_json.get('metadata', {}).get('annotations', {})
# remove annotations about registry, since method will get them as arguments
for annotation in ANNOTATION_SOURCE_REPO, ANNOTATION_INSECURE_REPO:
if annotation in all_annotations:
imagestream_json['metadata']['annotations'].pop(annotation)
changed = True
if changed:
imagestream_json = self.update_image_stream(name, imagestream_json).json()
# Note the tags before import
oldtags = imagestream_json.get('status', {}).get('tags', [])
logger.debug("tags before import: %r", oldtags)
stream_import['metadata']['name'] = name
stream_import['spec']['images'] = []
tags_set = set(tags) if tags else set()
if not tags_set:
logger.debug('No tags to import')
return False
for tag in tags_set:
image_import = {
'from': {"kind": "DockerImage",
"name": '{}:{}'.format(repository, tag)},
'to': {'name': tag},
'importPolicy': {'insecure': insecure},
# referencePolicy will default to "type: source"
# so we don't have to explicitly set it
}
stream_import['spec']['images'].append(image_import)
import_url = self._build_url("imagestreamimports/")
import_response = self._post(import_url, data=json.dumps(stream_import),
use_json=True)
self._check_import_image_response(import_response)
new_tags = [
image['tag']
for image in import_response.json().get('status', {}).get('images', [])]
logger.debug("tags after import: %r", new_tags)
return True | Import image tags from a Docker registry into an ImageStream
:return: bool, whether tags were imported | Below is the the instruction that describes the task:
### Input:
Import image tags from a Docker registry into an ImageStream
:return: bool, whether tags were imported
### Response:
def import_image_tags(self, name, stream_import, tags, repository, insecure):
"""
Import image tags from a Docker registry into an ImageStream
:return: bool, whether tags were imported
"""
# Get the JSON for the ImageStream
imagestream_json = self.get_image_stream(name).json()
logger.debug("imagestream: %r", imagestream_json)
changed = False
# existence of dockerImageRepository is limiting how many tags are updated
if 'dockerImageRepository' in imagestream_json.get('spec', {}):
logger.debug("Removing 'dockerImageRepository' from ImageStream %s", name)
imagestream_json['spec'].pop('dockerImageRepository')
changed = True
all_annotations = imagestream_json.get('metadata', {}).get('annotations', {})
# remove annotations about registry, since method will get them as arguments
for annotation in ANNOTATION_SOURCE_REPO, ANNOTATION_INSECURE_REPO:
if annotation in all_annotations:
imagestream_json['metadata']['annotations'].pop(annotation)
changed = True
if changed:
imagestream_json = self.update_image_stream(name, imagestream_json).json()
# Note the tags before import
oldtags = imagestream_json.get('status', {}).get('tags', [])
logger.debug("tags before import: %r", oldtags)
stream_import['metadata']['name'] = name
stream_import['spec']['images'] = []
tags_set = set(tags) if tags else set()
if not tags_set:
logger.debug('No tags to import')
return False
for tag in tags_set:
image_import = {
'from': {"kind": "DockerImage",
"name": '{}:{}'.format(repository, tag)},
'to': {'name': tag},
'importPolicy': {'insecure': insecure},
# referencePolicy will default to "type: source"
# so we don't have to explicitly set it
}
stream_import['spec']['images'].append(image_import)
import_url = self._build_url("imagestreamimports/")
import_response = self._post(import_url, data=json.dumps(stream_import),
use_json=True)
self._check_import_image_response(import_response)
new_tags = [
image['tag']
for image in import_response.json().get('status', {}).get('images', [])]
logger.debug("tags after import: %r", new_tags)
return True |
def from_ad_date(cls, date):
""" Gets a NepDate object from gregorian calendar date """
functions.check_valid_ad_range(date)
days = values.START_EN_DATE - date
# Add the required number of days to the start nepali date
start_date = NepDate(values.START_NP_YEAR, 1, 1)
# No need to update as addition already calls update
return start_date + (date - values.START_EN_DATE) | Gets a NepDate object from gregorian calendar date | Below is the the instruction that describes the task:
### Input:
Gets a NepDate object from gregorian calendar date
### Response:
def from_ad_date(cls, date):
""" Gets a NepDate object from gregorian calendar date """
functions.check_valid_ad_range(date)
days = values.START_EN_DATE - date
# Add the required number of days to the start nepali date
start_date = NepDate(values.START_NP_YEAR, 1, 1)
# No need to update as addition already calls update
return start_date + (date - values.START_EN_DATE) |
def _load_tmp_fact(filepath):
"""
Load an 'ongoing fact' from a given location.
Args:
filepath: Full path to the tmpfile location.
Returns:
hamster_lib.Fact: ``Fact`` representing the 'ongoing fact'. Returns ``False``
if no file was found.
Raises:
TypeError: If for some reason our stored instance is no instance of
``hamster_lib.Fact``.
"""
from hamster_lib import Fact
try:
with open(filepath, 'rb') as fobj:
fact = pickle.load(fobj)
except IOError:
fact = False
else:
if not isinstance(fact, Fact):
raise TypeError(_(
"Something went wrong. It seems our pickled file does not contain"
" valid Fact instance. [Content: '{content}'; Type: {type}".format(
content=fact, type=type(fact))
))
return fact | Load an 'ongoing fact' from a given location.
Args:
filepath: Full path to the tmpfile location.
Returns:
hamster_lib.Fact: ``Fact`` representing the 'ongoing fact'. Returns ``False``
if no file was found.
Raises:
TypeError: If for some reason our stored instance is no instance of
``hamster_lib.Fact``. | Below is the the instruction that describes the task:
### Input:
Load an 'ongoing fact' from a given location.
Args:
filepath: Full path to the tmpfile location.
Returns:
hamster_lib.Fact: ``Fact`` representing the 'ongoing fact'. Returns ``False``
if no file was found.
Raises:
TypeError: If for some reason our stored instance is no instance of
``hamster_lib.Fact``.
### Response:
def _load_tmp_fact(filepath):
"""
Load an 'ongoing fact' from a given location.
Args:
filepath: Full path to the tmpfile location.
Returns:
hamster_lib.Fact: ``Fact`` representing the 'ongoing fact'. Returns ``False``
if no file was found.
Raises:
TypeError: If for some reason our stored instance is no instance of
``hamster_lib.Fact``.
"""
from hamster_lib import Fact
try:
with open(filepath, 'rb') as fobj:
fact = pickle.load(fobj)
except IOError:
fact = False
else:
if not isinstance(fact, Fact):
raise TypeError(_(
"Something went wrong. It seems our pickled file does not contain"
" valid Fact instance. [Content: '{content}'; Type: {type}".format(
content=fact, type=type(fact))
))
return fact |
def _to_zipfile(self, file_generator):
"""Convert files to zip archive.
:return: None
:rtype: :py:obj:`None`
"""
with zipfile.ZipFile(file_generator.to_path, mode="w", compression=zipfile.ZIP_DEFLATED) as outfile:
for f in file_generator:
outpath = self._output_path(f.source, file_generator.to_format, archive=True)
outfile.writestr(outpath, f.writestr(file_generator.to_format)) | Convert files to zip archive.
:return: None
:rtype: :py:obj:`None` | Below is the the instruction that describes the task:
### Input:
Convert files to zip archive.
:return: None
:rtype: :py:obj:`None`
### Response:
def _to_zipfile(self, file_generator):
"""Convert files to zip archive.
:return: None
:rtype: :py:obj:`None`
"""
with zipfile.ZipFile(file_generator.to_path, mode="w", compression=zipfile.ZIP_DEFLATED) as outfile:
for f in file_generator:
outpath = self._output_path(f.source, file_generator.to_format, archive=True)
outfile.writestr(outpath, f.writestr(file_generator.to_format)) |
def subset(args):
"""
%prog subset pairsfile ksfile1 ksfile2 ... -o pairs.ks
Subset some pre-calculated ks ka values (in ksfile) according to pairs
in tab delimited pairsfile/anchorfile.
"""
p = OptionParser(subset.__doc__)
p.add_option("--noheader", action="store_true",
help="don't write ksfile header line [default: %default]")
p.add_option("--block", action="store_true",
help="preserve block structure in input [default: %default]")
p.set_stripnames()
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
pairsfile, ksfiles = args[0], args[1:]
noheader = opts.noheader
block = opts.block
if block:
noheader = True
outfile = opts.outfile
ksvals = {}
for ksfile in ksfiles:
ksvals.update(dict((line.name, line) for line in \
KsFile(ksfile, strip_names=opts.strip_names)))
fp = open(pairsfile)
fw = must_open(outfile, "w")
if not noheader:
print(fields, file=fw)
i = j = 0
for row in fp:
if row[0] == '#':
if block:
print(row.strip(), file=fw)
continue
a, b = row.split()[:2]
name = ";".join((a, b))
if name not in ksvals:
name = ";".join((b, a))
if name not in ksvals:
j += 1
print("\t".join((a, b, ".", ".")), file=fw)
continue
ksline = ksvals[name]
if block:
print("\t".join(str(x) for x in (a, b, ksline.ks)), file=fw)
else:
ksline.name = ";".join((a, b))
print(ksline, file=fw)
i += 1
fw.close()
logging.debug("{0} pairs not found in ksfiles".format(j))
logging.debug("{0} ks records written to `{1}`".format(i, outfile))
return outfile | %prog subset pairsfile ksfile1 ksfile2 ... -o pairs.ks
Subset some pre-calculated ks ka values (in ksfile) according to pairs
in tab delimited pairsfile/anchorfile. | Below is the the instruction that describes the task:
### Input:
%prog subset pairsfile ksfile1 ksfile2 ... -o pairs.ks
Subset some pre-calculated ks ka values (in ksfile) according to pairs
in tab delimited pairsfile/anchorfile.
### Response:
def subset(args):
"""
%prog subset pairsfile ksfile1 ksfile2 ... -o pairs.ks
Subset some pre-calculated ks ka values (in ksfile) according to pairs
in tab delimited pairsfile/anchorfile.
"""
p = OptionParser(subset.__doc__)
p.add_option("--noheader", action="store_true",
help="don't write ksfile header line [default: %default]")
p.add_option("--block", action="store_true",
help="preserve block structure in input [default: %default]")
p.set_stripnames()
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
pairsfile, ksfiles = args[0], args[1:]
noheader = opts.noheader
block = opts.block
if block:
noheader = True
outfile = opts.outfile
ksvals = {}
for ksfile in ksfiles:
ksvals.update(dict((line.name, line) for line in \
KsFile(ksfile, strip_names=opts.strip_names)))
fp = open(pairsfile)
fw = must_open(outfile, "w")
if not noheader:
print(fields, file=fw)
i = j = 0
for row in fp:
if row[0] == '#':
if block:
print(row.strip(), file=fw)
continue
a, b = row.split()[:2]
name = ";".join((a, b))
if name not in ksvals:
name = ";".join((b, a))
if name not in ksvals:
j += 1
print("\t".join((a, b, ".", ".")), file=fw)
continue
ksline = ksvals[name]
if block:
print("\t".join(str(x) for x in (a, b, ksline.ks)), file=fw)
else:
ksline.name = ";".join((a, b))
print(ksline, file=fw)
i += 1
fw.close()
logging.debug("{0} pairs not found in ksfiles".format(j))
logging.debug("{0} ks records written to `{1}`".format(i, outfile))
return outfile |
def copy(self):
"""Return a copy"""
mapping = OrderedDict(self.mapping.items())
return self.__class__(self.key, self.value, mapping) | Return a copy | Below is the the instruction that describes the task:
### Input:
Return a copy
### Response:
def copy(self):
"""Return a copy"""
mapping = OrderedDict(self.mapping.items())
return self.__class__(self.key, self.value, mapping) |
def rename(self, old_key, new_key):
"""Rename a file.
:param old_key: Old key that holds the object.
:param new_key: New key that will hold the object.
:returns: The object that has been renamed.
"""
assert new_key not in self
assert old_key != new_key
file_ = self[old_key]
old_data = self.filesmap[old_key]
# Create a new version with the new name
obj = ObjectVersion.create(
bucket=self.bucket, key=new_key,
_file_id=file_.obj.file_id
)
# Delete old key
self.filesmap[new_key] = self.file_cls(obj, old_data).dumps()
del self[old_key]
return obj | Rename a file.
:param old_key: Old key that holds the object.
:param new_key: New key that will hold the object.
:returns: The object that has been renamed. | Below is the the instruction that describes the task:
### Input:
Rename a file.
:param old_key: Old key that holds the object.
:param new_key: New key that will hold the object.
:returns: The object that has been renamed.
### Response:
def rename(self, old_key, new_key):
"""Rename a file.
:param old_key: Old key that holds the object.
:param new_key: New key that will hold the object.
:returns: The object that has been renamed.
"""
assert new_key not in self
assert old_key != new_key
file_ = self[old_key]
old_data = self.filesmap[old_key]
# Create a new version with the new name
obj = ObjectVersion.create(
bucket=self.bucket, key=new_key,
_file_id=file_.obj.file_id
)
# Delete old key
self.filesmap[new_key] = self.file_cls(obj, old_data).dumps()
del self[old_key]
return obj |
def ows_security_tween_factory(handler, registry):
"""A tween factory which produces a tween which raises an exception
if access to OWS service is not allowed."""
security = owssecurity_factory(registry)
def ows_security_tween(request):
try:
security.check_request(request)
return handler(request)
except OWSException as err:
logger.exception("security check failed.")
return err
except Exception as err:
logger.exception("unknown error")
return OWSNoApplicableCode("{}".format(err))
return ows_security_tween | A tween factory which produces a tween which raises an exception
if access to OWS service is not allowed. | Below is the the instruction that describes the task:
### Input:
A tween factory which produces a tween which raises an exception
if access to OWS service is not allowed.
### Response:
def ows_security_tween_factory(handler, registry):
"""A tween factory which produces a tween which raises an exception
if access to OWS service is not allowed."""
security = owssecurity_factory(registry)
def ows_security_tween(request):
try:
security.check_request(request)
return handler(request)
except OWSException as err:
logger.exception("security check failed.")
return err
except Exception as err:
logger.exception("unknown error")
return OWSNoApplicableCode("{}".format(err))
return ows_security_tween |
def __potential_connection_failure(self, e):
""" OperationalError's are emitted by the _mysql library for
almost every error code emitted by MySQL. Because of this we
verify that the error is actually a connection error before
terminating the connection and firing off a PoolConnectionException
"""
try:
self._conn.query('SELECT 1')
except (IOError, _mysql.OperationalError):
# ok, it's actually an issue.
self.__handle_connection_failure(e)
else:
# seems ok, probably programmer error
raise _mysql.DatabaseError(*e.args) | OperationalError's are emitted by the _mysql library for
almost every error code emitted by MySQL. Because of this we
verify that the error is actually a connection error before
terminating the connection and firing off a PoolConnectionException | Below is the the instruction that describes the task:
### Input:
OperationalError's are emitted by the _mysql library for
almost every error code emitted by MySQL. Because of this we
verify that the error is actually a connection error before
terminating the connection and firing off a PoolConnectionException
### Response:
def __potential_connection_failure(self, e):
""" OperationalError's are emitted by the _mysql library for
almost every error code emitted by MySQL. Because of this we
verify that the error is actually a connection error before
terminating the connection and firing off a PoolConnectionException
"""
try:
self._conn.query('SELECT 1')
except (IOError, _mysql.OperationalError):
# ok, it's actually an issue.
self.__handle_connection_failure(e)
else:
# seems ok, probably programmer error
raise _mysql.DatabaseError(*e.args) |
def current(config, **kwargs):
"""
Display the current revision for a database.
"""
with alembic_lock(
config.registry["sqlalchemy.engine"], config.alembic_config()
) as alembic_config:
alembic.command.current(alembic_config, **kwargs) | Display the current revision for a database. | Below is the the instruction that describes the task:
### Input:
Display the current revision for a database.
### Response:
def current(config, **kwargs):
"""
Display the current revision for a database.
"""
with alembic_lock(
config.registry["sqlalchemy.engine"], config.alembic_config()
) as alembic_config:
alembic.command.current(alembic_config, **kwargs) |
def get_encoded_url_to_dict(string):
"""
Converts an encoded URL to a dict.
Example: given string = 'a=1&b=2' it returns {'a': 1, 'b': 2}
"""
data = urllib.parse.parse_qsl(string, keep_blank_values=True)
data = dict(data)
return data | Converts an encoded URL to a dict.
Example: given string = 'a=1&b=2' it returns {'a': 1, 'b': 2} | Below is the the instruction that describes the task:
### Input:
Converts an encoded URL to a dict.
Example: given string = 'a=1&b=2' it returns {'a': 1, 'b': 2}
### Response:
def get_encoded_url_to_dict(string):
"""
Converts an encoded URL to a dict.
Example: given string = 'a=1&b=2' it returns {'a': 1, 'b': 2}
"""
data = urllib.parse.parse_qsl(string, keep_blank_values=True)
data = dict(data)
return data |
def ccx(self, ctl1, ctl2, tgt):
"""Apply Toffoli to from ctl1 and ctl2 to tgt."""
return self.append(ToffoliGate(), [ctl1, ctl2, tgt], []) | Apply Toffoli to from ctl1 and ctl2 to tgt. | Below is the the instruction that describes the task:
### Input:
Apply Toffoli to from ctl1 and ctl2 to tgt.
### Response:
def ccx(self, ctl1, ctl2, tgt):
"""Apply Toffoli to from ctl1 and ctl2 to tgt."""
return self.append(ToffoliGate(), [ctl1, ctl2, tgt], []) |
def handle_combo(self,combo,symbol,modifiers,release=False,mod=True):
"""
Handles a key combination and dispatches associated events.
First, all keybind handlers registered via :py:meth:`add` will be handled,
then the pyglet event :peng3d:pgevent:`on_key_combo` with params ``(combo,symbol,modifiers,release,mod)`` is sent to the :py:class:`Peng()` instance.
Also sends the events :peng3d:event:`peng3d:keybind.combo`\, :peng3d:event:`peng3d:keybind.combo.press` and :peng3d:event`peng3d:keybind.combo.release`\ .
:params str combo: Key combination pressed
:params int symbol: Key pressed, passed from the same argument within pyglet
:params int modifiers: Modifiers held while the key was pressed
:params bool release: If the combo was released
:params bool mod: If the combo was sent without mods
"""
if self.peng.cfg["controls.keybinds.debug"]:
print("combo: nm=%s %s"%(mod,combo))
if mod:
for kbname in self.keybinds.get(combo,[]):
self.kbname[kbname](symbol,modifiers,release)
else:
for kbname in self.keybinds_nm.get(combo,[]):
self.kbname[kbname](symbol,modifiers,release)
self.peng.sendPygletEvent("on_key_combo",(combo,symbol,modifiers,release,mod))
self.peng.sendEvent("peng3d:keybind.combo",{"peng":self.peng,"combo":combo,"symbol":symbol,"modifiers":modifiers,"release":release,"mod":mod})
if release:
self.peng.sendEvent("peng3d:keybind.combo.release",{"peng":self.peng,"combo":combo,"symbol":symbol,"modifiers":modifiers,"release":release,"mod":mod})
else:
self.peng.sendEvent("peng3d:keybind.combo.press",{"peng":self.peng,"combo":combo,"symbol":symbol,"modifiers":modifiers,"release":release,"mod":mod}) | Handles a key combination and dispatches associated events.
First, all keybind handlers registered via :py:meth:`add` will be handled,
then the pyglet event :peng3d:pgevent:`on_key_combo` with params ``(combo,symbol,modifiers,release,mod)`` is sent to the :py:class:`Peng()` instance.
Also sends the events :peng3d:event:`peng3d:keybind.combo`\, :peng3d:event:`peng3d:keybind.combo.press` and :peng3d:event`peng3d:keybind.combo.release`\ .
:params str combo: Key combination pressed
:params int symbol: Key pressed, passed from the same argument within pyglet
:params int modifiers: Modifiers held while the key was pressed
:params bool release: If the combo was released
:params bool mod: If the combo was sent without mods | Below is the the instruction that describes the task:
### Input:
Handles a key combination and dispatches associated events.
First, all keybind handlers registered via :py:meth:`add` will be handled,
then the pyglet event :peng3d:pgevent:`on_key_combo` with params ``(combo,symbol,modifiers,release,mod)`` is sent to the :py:class:`Peng()` instance.
Also sends the events :peng3d:event:`peng3d:keybind.combo`\, :peng3d:event:`peng3d:keybind.combo.press` and :peng3d:event`peng3d:keybind.combo.release`\ .
:params str combo: Key combination pressed
:params int symbol: Key pressed, passed from the same argument within pyglet
:params int modifiers: Modifiers held while the key was pressed
:params bool release: If the combo was released
:params bool mod: If the combo was sent without mods
### Response:
def handle_combo(self,combo,symbol,modifiers,release=False,mod=True):
"""
Handles a key combination and dispatches associated events.
First, all keybind handlers registered via :py:meth:`add` will be handled,
then the pyglet event :peng3d:pgevent:`on_key_combo` with params ``(combo,symbol,modifiers,release,mod)`` is sent to the :py:class:`Peng()` instance.
Also sends the events :peng3d:event:`peng3d:keybind.combo`\, :peng3d:event:`peng3d:keybind.combo.press` and :peng3d:event`peng3d:keybind.combo.release`\ .
:params str combo: Key combination pressed
:params int symbol: Key pressed, passed from the same argument within pyglet
:params int modifiers: Modifiers held while the key was pressed
:params bool release: If the combo was released
:params bool mod: If the combo was sent without mods
"""
if self.peng.cfg["controls.keybinds.debug"]:
print("combo: nm=%s %s"%(mod,combo))
if mod:
for kbname in self.keybinds.get(combo,[]):
self.kbname[kbname](symbol,modifiers,release)
else:
for kbname in self.keybinds_nm.get(combo,[]):
self.kbname[kbname](symbol,modifiers,release)
self.peng.sendPygletEvent("on_key_combo",(combo,symbol,modifiers,release,mod))
self.peng.sendEvent("peng3d:keybind.combo",{"peng":self.peng,"combo":combo,"symbol":symbol,"modifiers":modifiers,"release":release,"mod":mod})
if release:
self.peng.sendEvent("peng3d:keybind.combo.release",{"peng":self.peng,"combo":combo,"symbol":symbol,"modifiers":modifiers,"release":release,"mod":mod})
else:
self.peng.sendEvent("peng3d:keybind.combo.press",{"peng":self.peng,"combo":combo,"symbol":symbol,"modifiers":modifiers,"release":release,"mod":mod}) |
def main():
"""
This is the main body of the process that does the work.
Summary:
- load the raw data
- read in rules list
- create log events for AIKIF according to rules [map]
- create new facts / reports based on rules [report]
OUTPUT =
AIKIF mapping : Date_of_transaction => event
AIKIF mapping : Amount => fact
AIKIF mapping : Details => location
New column : trans_type = DB WHERE amount > 0 ELSE CR
summing : details contains "CALTEX" into Travel Expense
Done
"""
print('AIKIF example: Processing Finance data\n')
data = read_bank_statements('your_statement.csv')
print(data)
maps = load_column_maps()
rules = load_rules()
for m in maps:
print('AIKIF mapping : ' + m[0] + ' => ' + m[1])
for rule in rules:
#print(rule)
if rule[0] == 'agg':
print('summing : ' + rule[1] + ' into ' + rule[2] )
elif rule[0] == 'derive':
print('New column : ' + rule[1] + ' = ' + rule[2] + ' WHERE ' + rule[1] + ' ELSE ' + rule[3] )
print('Done\n') | This is the main body of the process that does the work.
Summary:
- load the raw data
- read in rules list
- create log events for AIKIF according to rules [map]
- create new facts / reports based on rules [report]
OUTPUT =
AIKIF mapping : Date_of_transaction => event
AIKIF mapping : Amount => fact
AIKIF mapping : Details => location
New column : trans_type = DB WHERE amount > 0 ELSE CR
summing : details contains "CALTEX" into Travel Expense
Done | Below is the the instruction that describes the task:
### Input:
This is the main body of the process that does the work.
Summary:
- load the raw data
- read in rules list
- create log events for AIKIF according to rules [map]
- create new facts / reports based on rules [report]
OUTPUT =
AIKIF mapping : Date_of_transaction => event
AIKIF mapping : Amount => fact
AIKIF mapping : Details => location
New column : trans_type = DB WHERE amount > 0 ELSE CR
summing : details contains "CALTEX" into Travel Expense
Done
### Response:
def main():
"""
This is the main body of the process that does the work.
Summary:
- load the raw data
- read in rules list
- create log events for AIKIF according to rules [map]
- create new facts / reports based on rules [report]
OUTPUT =
AIKIF mapping : Date_of_transaction => event
AIKIF mapping : Amount => fact
AIKIF mapping : Details => location
New column : trans_type = DB WHERE amount > 0 ELSE CR
summing : details contains "CALTEX" into Travel Expense
Done
"""
print('AIKIF example: Processing Finance data\n')
data = read_bank_statements('your_statement.csv')
print(data)
maps = load_column_maps()
rules = load_rules()
for m in maps:
print('AIKIF mapping : ' + m[0] + ' => ' + m[1])
for rule in rules:
#print(rule)
if rule[0] == 'agg':
print('summing : ' + rule[1] + ' into ' + rule[2] )
elif rule[0] == 'derive':
print('New column : ' + rule[1] + ' = ' + rule[2] + ' WHERE ' + rule[1] + ' ELSE ' + rule[3] )
print('Done\n') |
def get_datetime_now():
"""
Returns datetime object with current point in time.
In Django 1.4+ it uses Django's django.utils.timezone.now() which returns
an aware or naive datetime that represents the current point in time
when ``USE_TZ`` in project's settings is True or False respectively.
In older versions of Django it uses datetime.datetime.now().
"""
try:
from django.utils import timezone
return timezone.now()
except ImportError:
return datetime.datetime.now() | Returns datetime object with current point in time.
In Django 1.4+ it uses Django's django.utils.timezone.now() which returns
an aware or naive datetime that represents the current point in time
when ``USE_TZ`` in project's settings is True or False respectively.
In older versions of Django it uses datetime.datetime.now(). | Below is the the instruction that describes the task:
### Input:
Returns datetime object with current point in time.
In Django 1.4+ it uses Django's django.utils.timezone.now() which returns
an aware or naive datetime that represents the current point in time
when ``USE_TZ`` in project's settings is True or False respectively.
In older versions of Django it uses datetime.datetime.now().
### Response:
def get_datetime_now():
"""
Returns datetime object with current point in time.
In Django 1.4+ it uses Django's django.utils.timezone.now() which returns
an aware or naive datetime that represents the current point in time
when ``USE_TZ`` in project's settings is True or False respectively.
In older versions of Django it uses datetime.datetime.now().
"""
try:
from django.utils import timezone
return timezone.now()
except ImportError:
return datetime.datetime.now() |
def triangle_areas(p1,p2,p3):
"""Compute an array of triangle areas given three arrays of triangle pts
p1,p2,p3 - three Nx2 arrays of points
"""
v1 = (p2 - p1).astype(np.float)
v2 = (p3 - p1).astype(np.float)
# Original:
# cross1 = v1[:,1] * v2[:,0]
# cross2 = v2[:,1] * v1[:,0]
# a = (cross1-cross2) / 2
# Memory reduced:
cross1 = v1[:, 1]
cross1 *= v2[:, 0]
cross2 = v2[:, 1]
cross2 *= v1[:, 0]
a = cross1
a -= cross2
a /= 2.0
del v1, v2, cross1, cross2
a = a.copy() # a is a view on v1; shed one dimension.
a = np.abs(a)
#
# Handle small round-off errors
#
a[a<np.finfo(np.float32).eps] = 0
return a | Compute an array of triangle areas given three arrays of triangle pts
p1,p2,p3 - three Nx2 arrays of points | Below is the the instruction that describes the task:
### Input:
Compute an array of triangle areas given three arrays of triangle pts
p1,p2,p3 - three Nx2 arrays of points
### Response:
def triangle_areas(p1,p2,p3):
"""Compute an array of triangle areas given three arrays of triangle pts
p1,p2,p3 - three Nx2 arrays of points
"""
v1 = (p2 - p1).astype(np.float)
v2 = (p3 - p1).astype(np.float)
# Original:
# cross1 = v1[:,1] * v2[:,0]
# cross2 = v2[:,1] * v1[:,0]
# a = (cross1-cross2) / 2
# Memory reduced:
cross1 = v1[:, 1]
cross1 *= v2[:, 0]
cross2 = v2[:, 1]
cross2 *= v1[:, 0]
a = cross1
a -= cross2
a /= 2.0
del v1, v2, cross1, cross2
a = a.copy() # a is a view on v1; shed one dimension.
a = np.abs(a)
#
# Handle small round-off errors
#
a[a<np.finfo(np.float32).eps] = 0
return a |
def when_i_send_the_request(context, method):
"""
:type method: str
:type context: behave.runner.Context
"""
data = context.apiRequestData
context.apiRequest = context.apiClient.generic(
method,
data['url'],
data=json.dumps(data['params']),
content_type=data['content-type'],
format=data['format'],
) | :type method: str
:type context: behave.runner.Context | Below is the the instruction that describes the task:
### Input:
:type method: str
:type context: behave.runner.Context
### Response:
def when_i_send_the_request(context, method):
"""
:type method: str
:type context: behave.runner.Context
"""
data = context.apiRequestData
context.apiRequest = context.apiClient.generic(
method,
data['url'],
data=json.dumps(data['params']),
content_type=data['content-type'],
format=data['format'],
) |
def sanitize_word(word):
"""
sanitize a word by removing its accents, special characters, etc
"""
# use an unicode string for `unidecode`
if type(word) == str:
try:
word = word.decode()
except AttributeError:
pass # Python3
# remove trailing spaces
word = word.strip()
# remove stuff between parentheses
word = re.sub(r'\([^)]*\)', '', word)
# remove accents, hyphens & other special chars
word = re.sub(r'[ "\'-;.]+', '', unidecode(word))
# only lowercase
return word.lower() | sanitize a word by removing its accents, special characters, etc | Below is the the instruction that describes the task:
### Input:
sanitize a word by removing its accents, special characters, etc
### Response:
def sanitize_word(word):
"""
sanitize a word by removing its accents, special characters, etc
"""
# use an unicode string for `unidecode`
if type(word) == str:
try:
word = word.decode()
except AttributeError:
pass # Python3
# remove trailing spaces
word = word.strip()
# remove stuff between parentheses
word = re.sub(r'\([^)]*\)', '', word)
# remove accents, hyphens & other special chars
word = re.sub(r'[ "\'-;.]+', '', unidecode(word))
# only lowercase
return word.lower() |
Subsets and Splits