code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def exec_command(self, cmd, args='', vdglobals=None, keystrokes=None):
"Execute `cmd` tuple with `vdglobals` as globals and this sheet's attributes as locals. Returns True if user cancelled."
global sheet
sheet = vd.sheets[0]
if not cmd:
debug('no command "%s"' % keystrokes)
return True
if isinstance(cmd, CommandLog):
cmd.replay()
return False
escaped = False
err = ''
if vdglobals is None:
vdglobals = getGlobals()
if not self.vd:
self.vd = vd()
self.sheet = self
try:
self.vd.callHook('preexec', self, cmd, '', keystrokes)
exec(cmd.execstr, vdglobals, LazyMap(self))
except EscapeException as e: # user aborted
status('aborted')
escaped = True
except Exception as e:
debug(cmd.execstr)
err = self.vd.exceptionCaught(e)
escaped = True
try:
self.vd.callHook('postexec', self.vd.sheets[0] if self.vd.sheets else None, escaped, err)
except Exception:
self.vd.exceptionCaught(e)
catchapply(self.checkCursor)
self.vd.refresh()
return escaped | Execute `cmd` tuple with `vdglobals` as globals and this sheet's attributes as locals. Returns True if user cancelled. | Below is the the instruction that describes the task:
### Input:
Execute `cmd` tuple with `vdglobals` as globals and this sheet's attributes as locals. Returns True if user cancelled.
### Response:
def exec_command(self, cmd, args='', vdglobals=None, keystrokes=None):
"Execute `cmd` tuple with `vdglobals` as globals and this sheet's attributes as locals. Returns True if user cancelled."
global sheet
sheet = vd.sheets[0]
if not cmd:
debug('no command "%s"' % keystrokes)
return True
if isinstance(cmd, CommandLog):
cmd.replay()
return False
escaped = False
err = ''
if vdglobals is None:
vdglobals = getGlobals()
if not self.vd:
self.vd = vd()
self.sheet = self
try:
self.vd.callHook('preexec', self, cmd, '', keystrokes)
exec(cmd.execstr, vdglobals, LazyMap(self))
except EscapeException as e: # user aborted
status('aborted')
escaped = True
except Exception as e:
debug(cmd.execstr)
err = self.vd.exceptionCaught(e)
escaped = True
try:
self.vd.callHook('postexec', self.vd.sheets[0] if self.vd.sheets else None, escaped, err)
except Exception:
self.vd.exceptionCaught(e)
catchapply(self.checkCursor)
self.vd.refresh()
return escaped |
def find_systemjs_location():
"""
Figure out where `jspm_packages/system.js` will be put by JSPM.
"""
location = os.path.abspath(os.path.dirname(locate_package_json()))
conf = parse_package_json()
if 'jspm' in conf:
conf = conf['jspm']
try:
conf = conf['directories']
except TypeError:
raise ImproperlyConfigured("`package.json` doesn't appear to be a valid json object. "
"Location: %s" % location)
except KeyError:
raise ImproperlyConfigured("The `directories` configuarion was not found in package.json. "
"Please check your jspm install and/or configuarion. `package.json` "
"location: %s" % location)
# check for explicit location, else fall back to the default as jspm does
jspm_packages = conf['packages'] if 'packages' in conf else 'jspm_packages'
base = conf['baseURL'] if 'baseURL' in conf else '.'
return os.path.join(location, base, jspm_packages, 'system.js') | Figure out where `jspm_packages/system.js` will be put by JSPM. | Below is the the instruction that describes the task:
### Input:
Figure out where `jspm_packages/system.js` will be put by JSPM.
### Response:
def find_systemjs_location():
"""
Figure out where `jspm_packages/system.js` will be put by JSPM.
"""
location = os.path.abspath(os.path.dirname(locate_package_json()))
conf = parse_package_json()
if 'jspm' in conf:
conf = conf['jspm']
try:
conf = conf['directories']
except TypeError:
raise ImproperlyConfigured("`package.json` doesn't appear to be a valid json object. "
"Location: %s" % location)
except KeyError:
raise ImproperlyConfigured("The `directories` configuarion was not found in package.json. "
"Please check your jspm install and/or configuarion. `package.json` "
"location: %s" % location)
# check for explicit location, else fall back to the default as jspm does
jspm_packages = conf['packages'] if 'packages' in conf else 'jspm_packages'
base = conf['baseURL'] if 'baseURL' in conf else '.'
return os.path.join(location, base, jspm_packages, 'system.js') |
def function(self, func_name):
"""
Returns the Function instance associated with the given func_name, or raises a
RpcException if no function matches.
"""
if self.functions.has_key(func_name):
return self.functions[func_name]
else:
raise RpcException(ERR_METHOD_NOT_FOUND,
"%s: Unknown function: '%s'", self.name, func_name) | Returns the Function instance associated with the given func_name, or raises a
RpcException if no function matches. | Below is the the instruction that describes the task:
### Input:
Returns the Function instance associated with the given func_name, or raises a
RpcException if no function matches.
### Response:
def function(self, func_name):
"""
Returns the Function instance associated with the given func_name, or raises a
RpcException if no function matches.
"""
if self.functions.has_key(func_name):
return self.functions[func_name]
else:
raise RpcException(ERR_METHOD_NOT_FOUND,
"%s: Unknown function: '%s'", self.name, func_name) |
def schema_remove(dbname, name,
user=None,
db_user=None, db_password=None,
db_host=None, db_port=None):
'''
Removes a schema from the Postgres server.
CLI Example:
.. code-block:: bash
salt '*' postgres.schema_remove dbname schemaname
dbname
Database name we work on
schemaname
The schema's name we'll remove
user
System user all operations should be performed on behalf of
db_user
database username if different from config or default
db_password
user password if any password for a specified user
db_host
Database host if different from config or default
db_port
Database port if different from config or default
'''
# check if schema exists
if not schema_exists(dbname, name, user=None,
db_user=db_user, db_password=db_password,
db_host=db_host, db_port=db_port):
log.info('Schema \'%s\' does not exist in \'%s\'', name, dbname)
return False
# schema exists, proceed
sub_cmd = 'DROP SCHEMA "{0}"'.format(name)
_psql_prepare_and_run(
['-c', sub_cmd],
runas=user,
maintenance_db=dbname,
host=db_host, user=db_user, port=db_port, password=db_password)
if not schema_exists(dbname, name, user,
db_user=db_user, db_password=db_password,
db_host=db_host, db_port=db_port):
return True
else:
log.info('Failed to delete schema \'%s\'.', name)
return False | Removes a schema from the Postgres server.
CLI Example:
.. code-block:: bash
salt '*' postgres.schema_remove dbname schemaname
dbname
Database name we work on
schemaname
The schema's name we'll remove
user
System user all operations should be performed on behalf of
db_user
database username if different from config or default
db_password
user password if any password for a specified user
db_host
Database host if different from config or default
db_port
Database port if different from config or default | Below is the the instruction that describes the task:
### Input:
Removes a schema from the Postgres server.
CLI Example:
.. code-block:: bash
salt '*' postgres.schema_remove dbname schemaname
dbname
Database name we work on
schemaname
The schema's name we'll remove
user
System user all operations should be performed on behalf of
db_user
database username if different from config or default
db_password
user password if any password for a specified user
db_host
Database host if different from config or default
db_port
Database port if different from config or default
### Response:
def schema_remove(dbname, name,
user=None,
db_user=None, db_password=None,
db_host=None, db_port=None):
'''
Removes a schema from the Postgres server.
CLI Example:
.. code-block:: bash
salt '*' postgres.schema_remove dbname schemaname
dbname
Database name we work on
schemaname
The schema's name we'll remove
user
System user all operations should be performed on behalf of
db_user
database username if different from config or default
db_password
user password if any password for a specified user
db_host
Database host if different from config or default
db_port
Database port if different from config or default
'''
# check if schema exists
if not schema_exists(dbname, name, user=None,
db_user=db_user, db_password=db_password,
db_host=db_host, db_port=db_port):
log.info('Schema \'%s\' does not exist in \'%s\'', name, dbname)
return False
# schema exists, proceed
sub_cmd = 'DROP SCHEMA "{0}"'.format(name)
_psql_prepare_and_run(
['-c', sub_cmd],
runas=user,
maintenance_db=dbname,
host=db_host, user=db_user, port=db_port, password=db_password)
if not schema_exists(dbname, name, user,
db_user=db_user, db_password=db_password,
db_host=db_host, db_port=db_port):
return True
else:
log.info('Failed to delete schema \'%s\'.', name)
return False |
def get_table_rate_shipping_by_id(cls, table_rate_shipping_id, **kwargs):
"""Find TableRateShipping
Return single instance of TableRateShipping by its ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_table_rate_shipping_by_id(table_rate_shipping_id, async=True)
>>> result = thread.get()
:param async bool
:param str table_rate_shipping_id: ID of tableRateShipping to return (required)
:return: TableRateShipping
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._get_table_rate_shipping_by_id_with_http_info(table_rate_shipping_id, **kwargs)
else:
(data) = cls._get_table_rate_shipping_by_id_with_http_info(table_rate_shipping_id, **kwargs)
return data | Find TableRateShipping
Return single instance of TableRateShipping by its ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_table_rate_shipping_by_id(table_rate_shipping_id, async=True)
>>> result = thread.get()
:param async bool
:param str table_rate_shipping_id: ID of tableRateShipping to return (required)
:return: TableRateShipping
If the method is called asynchronously,
returns the request thread. | Below is the the instruction that describes the task:
### Input:
Find TableRateShipping
Return single instance of TableRateShipping by its ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_table_rate_shipping_by_id(table_rate_shipping_id, async=True)
>>> result = thread.get()
:param async bool
:param str table_rate_shipping_id: ID of tableRateShipping to return (required)
:return: TableRateShipping
If the method is called asynchronously,
returns the request thread.
### Response:
def get_table_rate_shipping_by_id(cls, table_rate_shipping_id, **kwargs):
"""Find TableRateShipping
Return single instance of TableRateShipping by its ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_table_rate_shipping_by_id(table_rate_shipping_id, async=True)
>>> result = thread.get()
:param async bool
:param str table_rate_shipping_id: ID of tableRateShipping to return (required)
:return: TableRateShipping
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._get_table_rate_shipping_by_id_with_http_info(table_rate_shipping_id, **kwargs)
else:
(data) = cls._get_table_rate_shipping_by_id_with_http_info(table_rate_shipping_id, **kwargs)
return data |
def reconnect(self):
"""
Handle reconnect logic if connection
to crossbar is lost
"""
connect_attempt = 0
max_retries = self.config['max_reconnect_retries']
logging.info('attempting to reconnect to crossbar')
runner = self.setup_runner()
while True:
if connect_attempt == max_retries:
logging.info('max retries reached; stopping service')
sys.exit(1)
self.check_event_loop()
try:
logging.info('waiting 5 seconds')
time.sleep(5)
if self.check_transport_host():
logging.info('waiting 10 seconds to ensure that crossbar has initialized before reconnecting')
time.sleep(10)
runner.run(Component)
else:
logging.error('crossbar host port 8080 not available...')
except RuntimeError as error:
logging.error(error)
except ConnectionRefusedError as error:
logging.error(error)
except ConnectionError as error:
logging.error(error)
except KeyboardInterrupt:
logging.info('User initiated shutdown')
loop = asyncio.get_event_loop()
loop.stop()
sys.exit(1)
connect_attempt += 1 | Handle reconnect logic if connection
to crossbar is lost | Below is the the instruction that describes the task:
### Input:
Handle reconnect logic if connection
to crossbar is lost
### Response:
def reconnect(self):
"""
Handle reconnect logic if connection
to crossbar is lost
"""
connect_attempt = 0
max_retries = self.config['max_reconnect_retries']
logging.info('attempting to reconnect to crossbar')
runner = self.setup_runner()
while True:
if connect_attempt == max_retries:
logging.info('max retries reached; stopping service')
sys.exit(1)
self.check_event_loop()
try:
logging.info('waiting 5 seconds')
time.sleep(5)
if self.check_transport_host():
logging.info('waiting 10 seconds to ensure that crossbar has initialized before reconnecting')
time.sleep(10)
runner.run(Component)
else:
logging.error('crossbar host port 8080 not available...')
except RuntimeError as error:
logging.error(error)
except ConnectionRefusedError as error:
logging.error(error)
except ConnectionError as error:
logging.error(error)
except KeyboardInterrupt:
logging.info('User initiated shutdown')
loop = asyncio.get_event_loop()
loop.stop()
sys.exit(1)
connect_attempt += 1 |
def create_from_string(self, string, context=EMPTY_CONTEXT, *args, **kwargs):
"""
Deserializes a new instance from a string.
This is a convenience method that creates a StringIO object and calls create_instance_from_stream().
"""
if not PY2 and not isinstance(string, bytes):
raise TypeError("string should be an instance of bytes in Python 3")
io = StringIO(string)
instance = self.create_from_stream(io, context, *args, **kwargs)
io.close()
return instance | Deserializes a new instance from a string.
This is a convenience method that creates a StringIO object and calls create_instance_from_stream(). | Below is the the instruction that describes the task:
### Input:
Deserializes a new instance from a string.
This is a convenience method that creates a StringIO object and calls create_instance_from_stream().
### Response:
def create_from_string(self, string, context=EMPTY_CONTEXT, *args, **kwargs):
"""
Deserializes a new instance from a string.
This is a convenience method that creates a StringIO object and calls create_instance_from_stream().
"""
if not PY2 and not isinstance(string, bytes):
raise TypeError("string should be an instance of bytes in Python 3")
io = StringIO(string)
instance = self.create_from_stream(io, context, *args, **kwargs)
io.close()
return instance |
def disconnect(self):
'''
Disconnect from the serial port
'''
if self._poll_stop_event:
self._poll_stop_event.set()
if self._driver:
if self.status != 'idle':
self.deactivate()
self._driver.disconnect() | Disconnect from the serial port | Below is the the instruction that describes the task:
### Input:
Disconnect from the serial port
### Response:
def disconnect(self):
'''
Disconnect from the serial port
'''
if self._poll_stop_event:
self._poll_stop_event.set()
if self._driver:
if self.status != 'idle':
self.deactivate()
self._driver.disconnect() |
def move(self, from_path, to_path, **kwargs):
"""移动单个文件或目录.
:param from_path: 源文件/目录在网盘中的路径(包括文件名)。
.. warning::
* 路径长度限制为1000;
* 径中不能包含以下字符:``\\\\ ? | " > < : *``;
* 文件名或路径名开头结尾不能是 ``.``
或空白字符,空白字符包括:
``\\r, \\n, \\t, 空格, \\0, \\x0B`` 。
:param to_path: 目标文件/目录在网盘中的路径(包括文件名)。
.. warning::
* 路径长度限制为1000;
* 径中不能包含以下字符:``\\\\ ? | " > < : *``;
* 文件名或路径名开头结尾不能是 ``.``
或空白字符,空白字符包括:
``\\r, \\n, \\t, 空格, \\0, \\x0B`` 。
:return: Response 对象
"""
data = {
'from': from_path,
'to': to_path,
}
return self._request('file', 'move', data=data, **kwargs) | 移动单个文件或目录.
:param from_path: 源文件/目录在网盘中的路径(包括文件名)。
.. warning::
* 路径长度限制为1000;
* 径中不能包含以下字符:``\\\\ ? | " > < : *``;
* 文件名或路径名开头结尾不能是 ``.``
或空白字符,空白字符包括:
``\\r, \\n, \\t, 空格, \\0, \\x0B`` 。
:param to_path: 目标文件/目录在网盘中的路径(包括文件名)。
.. warning::
* 路径长度限制为1000;
* 径中不能包含以下字符:``\\\\ ? | " > < : *``;
* 文件名或路径名开头结尾不能是 ``.``
或空白字符,空白字符包括:
``\\r, \\n, \\t, 空格, \\0, \\x0B`` 。
:return: Response 对象 | Below is the the instruction that describes the task:
### Input:
移动单个文件或目录.
:param from_path: 源文件/目录在网盘中的路径(包括文件名)。
.. warning::
* 路径长度限制为1000;
* 径中不能包含以下字符:``\\\\ ? | " > < : *``;
* 文件名或路径名开头结尾不能是 ``.``
或空白字符,空白字符包括:
``\\r, \\n, \\t, 空格, \\0, \\x0B`` 。
:param to_path: 目标文件/目录在网盘中的路径(包括文件名)。
.. warning::
* 路径长度限制为1000;
* 径中不能包含以下字符:``\\\\ ? | " > < : *``;
* 文件名或路径名开头结尾不能是 ``.``
或空白字符,空白字符包括:
``\\r, \\n, \\t, 空格, \\0, \\x0B`` 。
:return: Response 对象
### Response:
def move(self, from_path, to_path, **kwargs):
"""移动单个文件或目录.
:param from_path: 源文件/目录在网盘中的路径(包括文件名)。
.. warning::
* 路径长度限制为1000;
* 径中不能包含以下字符:``\\\\ ? | " > < : *``;
* 文件名或路径名开头结尾不能是 ``.``
或空白字符,空白字符包括:
``\\r, \\n, \\t, 空格, \\0, \\x0B`` 。
:param to_path: 目标文件/目录在网盘中的路径(包括文件名)。
.. warning::
* 路径长度限制为1000;
* 径中不能包含以下字符:``\\\\ ? | " > < : *``;
* 文件名或路径名开头结尾不能是 ``.``
或空白字符,空白字符包括:
``\\r, \\n, \\t, 空格, \\0, \\x0B`` 。
:return: Response 对象
"""
data = {
'from': from_path,
'to': to_path,
}
return self._request('file', 'move', data=data, **kwargs) |
def handle_cmd(self, command, application):
"""Handle running a given dot command from a user.
:type command: str
:param command: The full dot command string, e.g. ``.edit``,
of ``.profile prod``.
:type application: AWSShell
:param application: The application object.
"""
parts = command.split()
cmd_name = parts[0][1:]
if cmd_name not in self.HANDLER_CLASSES:
self._unknown_cmd(parts, application)
else:
# Note we expect the class to support no-arg
# instantiation.
return self.HANDLER_CLASSES[cmd_name]().run(parts, application) | Handle running a given dot command from a user.
:type command: str
:param command: The full dot command string, e.g. ``.edit``,
of ``.profile prod``.
:type application: AWSShell
:param application: The application object. | Below is the the instruction that describes the task:
### Input:
Handle running a given dot command from a user.
:type command: str
:param command: The full dot command string, e.g. ``.edit``,
of ``.profile prod``.
:type application: AWSShell
:param application: The application object.
### Response:
def handle_cmd(self, command, application):
"""Handle running a given dot command from a user.
:type command: str
:param command: The full dot command string, e.g. ``.edit``,
of ``.profile prod``.
:type application: AWSShell
:param application: The application object.
"""
parts = command.split()
cmd_name = parts[0][1:]
if cmd_name not in self.HANDLER_CLASSES:
self._unknown_cmd(parts, application)
else:
# Note we expect the class to support no-arg
# instantiation.
return self.HANDLER_CLASSES[cmd_name]().run(parts, application) |
def put_str(content, path, share='C$', conn=None, host=None, username=None, password=None):
'''
Wrapper around impacket.smbconnection.putFile() that allows a string to be
uploaded, without first writing it as a local file
'''
if HAS_SMBPROTOCOL:
return _put_str_smbprotocol(
content, path, share, conn=conn, host=host,
username=username, password=password
)
elif HAS_IMPACKET:
return _put_str_impacket(
content, path, share, conn=conn, host=host, username=username, password=password
)
raise MissingSmb("SMB library required (impacket or smbprotocol)") | Wrapper around impacket.smbconnection.putFile() that allows a string to be
uploaded, without first writing it as a local file | Below is the the instruction that describes the task:
### Input:
Wrapper around impacket.smbconnection.putFile() that allows a string to be
uploaded, without first writing it as a local file
### Response:
def put_str(content, path, share='C$', conn=None, host=None, username=None, password=None):
'''
Wrapper around impacket.smbconnection.putFile() that allows a string to be
uploaded, without first writing it as a local file
'''
if HAS_SMBPROTOCOL:
return _put_str_smbprotocol(
content, path, share, conn=conn, host=host,
username=username, password=password
)
elif HAS_IMPACKET:
return _put_str_impacket(
content, path, share, conn=conn, host=host, username=username, password=password
)
raise MissingSmb("SMB library required (impacket or smbprotocol)") |
def touch(self):
"""
Touch all of the related models for the relationship.
"""
column = self.get_related().get_updated_at_column()
self.raw_update({column: self.get_related().fresh_timestamp()}) | Touch all of the related models for the relationship. | Below is the the instruction that describes the task:
### Input:
Touch all of the related models for the relationship.
### Response:
def touch(self):
"""
Touch all of the related models for the relationship.
"""
column = self.get_related().get_updated_at_column()
self.raw_update({column: self.get_related().fresh_timestamp()}) |
def remove_technique(self, tech):
"""
Remove an exploration technique from a list of active techniques.
:param tech: An ExplorationTechnique object.
:type tech: ExplorationTechnique
"""
if not isinstance(tech, ExplorationTechnique):
raise SimulationManagerError
def _is_overriden(name):
return getattr(tech, name).__code__ is not getattr(ExplorationTechnique, name).__code__
overriden = filter(_is_overriden, ('step', 'filter', 'selector', 'step_state', 'successors'))
hooks = {name: getattr(tech, name) for name in overriden}
HookSet.remove_hooks(self, **hooks)
self._techniques.remove(tech)
return tech | Remove an exploration technique from a list of active techniques.
:param tech: An ExplorationTechnique object.
:type tech: ExplorationTechnique | Below is the the instruction that describes the task:
### Input:
Remove an exploration technique from a list of active techniques.
:param tech: An ExplorationTechnique object.
:type tech: ExplorationTechnique
### Response:
def remove_technique(self, tech):
"""
Remove an exploration technique from a list of active techniques.
:param tech: An ExplorationTechnique object.
:type tech: ExplorationTechnique
"""
if not isinstance(tech, ExplorationTechnique):
raise SimulationManagerError
def _is_overriden(name):
return getattr(tech, name).__code__ is not getattr(ExplorationTechnique, name).__code__
overriden = filter(_is_overriden, ('step', 'filter', 'selector', 'step_state', 'successors'))
hooks = {name: getattr(tech, name) for name in overriden}
HookSet.remove_hooks(self, **hooks)
self._techniques.remove(tech)
return tech |
def cmp(cls, v1: 'VersionBase', v2: 'VersionBase') -> int:
""" Compares two instances. """
# TODO types checking
if v1._version > v2._version:
return 1
elif v1._version == v2._version:
return 0
else:
return -1 | Compares two instances. | Below is the the instruction that describes the task:
### Input:
Compares two instances.
### Response:
def cmp(cls, v1: 'VersionBase', v2: 'VersionBase') -> int:
""" Compares two instances. """
# TODO types checking
if v1._version > v2._version:
return 1
elif v1._version == v2._version:
return 0
else:
return -1 |
def _is_file(filename):
"""Check that the size of the thing which is supposed to be a filename has
size greater than zero, without following symbolic links or using
:func:os.path.isfile.
:param filename: An object to check.
:rtype: bool
:returns: True if **filename** is file-like, False otherwise.
"""
try:
statinfo = os.lstat(filename)
log.debug("lstat(%r) with type=%s gave us %r"
% (repr(filename), type(filename), repr(statinfo)))
if not (statinfo.st_size > 0):
raise ValueError("'%s' appears to be an empty file!" % filename)
except OSError as oserr:
log.error(oserr)
if filename == '-':
log.debug("Got '-' for filename, assuming sys.stdin...")
return True
except (ValueError, TypeError, IOError) as err:
log.error(err)
else:
return True
return False | Check that the size of the thing which is supposed to be a filename has
size greater than zero, without following symbolic links or using
:func:os.path.isfile.
:param filename: An object to check.
:rtype: bool
:returns: True if **filename** is file-like, False otherwise. | Below is the the instruction that describes the task:
### Input:
Check that the size of the thing which is supposed to be a filename has
size greater than zero, without following symbolic links or using
:func:os.path.isfile.
:param filename: An object to check.
:rtype: bool
:returns: True if **filename** is file-like, False otherwise.
### Response:
def _is_file(filename):
"""Check that the size of the thing which is supposed to be a filename has
size greater than zero, without following symbolic links or using
:func:os.path.isfile.
:param filename: An object to check.
:rtype: bool
:returns: True if **filename** is file-like, False otherwise.
"""
try:
statinfo = os.lstat(filename)
log.debug("lstat(%r) with type=%s gave us %r"
% (repr(filename), type(filename), repr(statinfo)))
if not (statinfo.st_size > 0):
raise ValueError("'%s' appears to be an empty file!" % filename)
except OSError as oserr:
log.error(oserr)
if filename == '-':
log.debug("Got '-' for filename, assuming sys.stdin...")
return True
except (ValueError, TypeError, IOError) as err:
log.error(err)
else:
return True
return False |
def _parse_uptime_string(self, uptime):
""" Parse u' 16:53:48 up 1 day, 21:34, 3 users, load average: 0.04, 0.14, 0.19\n' """
uptime = uptime.strip()
load_averages = uptime[uptime.find('load average:') :].split(':')[1].strip().split(',')
load_averages = [float(load_avg) for load_avg in load_averages]
return load_averages | Parse u' 16:53:48 up 1 day, 21:34, 3 users, load average: 0.04, 0.14, 0.19\n' | Below is the the instruction that describes the task:
### Input:
Parse u' 16:53:48 up 1 day, 21:34, 3 users, load average: 0.04, 0.14, 0.19\n'
### Response:
def _parse_uptime_string(self, uptime):
""" Parse u' 16:53:48 up 1 day, 21:34, 3 users, load average: 0.04, 0.14, 0.19\n' """
uptime = uptime.strip()
load_averages = uptime[uptime.find('load average:') :].split(':')[1].strip().split(',')
load_averages = [float(load_avg) for load_avg in load_averages]
return load_averages |
def find_near_matches_substitutions(subsequence, sequence, max_substitutions):
"""Find near-matches of the subsequence in the sequence.
This chooses a suitable fuzzy search implementation according to the given
parameters.
Returns a list of fuzzysearch.Match objects describing the matching parts
of the sequence.
"""
_check_arguments(subsequence, sequence, max_substitutions)
if max_substitutions == 0:
return [
Match(start_index, start_index + len(subsequence), 0)
for start_index in search_exact(subsequence, sequence)
]
elif len(subsequence) // (max_substitutions + 1) >= 3:
return find_near_matches_substitutions_ngrams(
subsequence, sequence, max_substitutions,
)
else:
return find_near_matches_substitutions_lp(
subsequence, sequence, max_substitutions,
) | Find near-matches of the subsequence in the sequence.
This chooses a suitable fuzzy search implementation according to the given
parameters.
Returns a list of fuzzysearch.Match objects describing the matching parts
of the sequence. | Below is the the instruction that describes the task:
### Input:
Find near-matches of the subsequence in the sequence.
This chooses a suitable fuzzy search implementation according to the given
parameters.
Returns a list of fuzzysearch.Match objects describing the matching parts
of the sequence.
### Response:
def find_near_matches_substitutions(subsequence, sequence, max_substitutions):
"""Find near-matches of the subsequence in the sequence.
This chooses a suitable fuzzy search implementation according to the given
parameters.
Returns a list of fuzzysearch.Match objects describing the matching parts
of the sequence.
"""
_check_arguments(subsequence, sequence, max_substitutions)
if max_substitutions == 0:
return [
Match(start_index, start_index + len(subsequence), 0)
for start_index in search_exact(subsequence, sequence)
]
elif len(subsequence) // (max_substitutions + 1) >= 3:
return find_near_matches_substitutions_ngrams(
subsequence, sequence, max_substitutions,
)
else:
return find_near_matches_substitutions_lp(
subsequence, sequence, max_substitutions,
) |
def split_dae_alg(eqs: SYM, dx: SYM) -> Dict[str, SYM]:
"""Split equations into differential algebraic and algebraic only"""
dae = []
alg = []
for eq in ca.vertsplit(eqs):
if ca.depends_on(eq, dx):
dae.append(eq)
else:
alg.append(eq)
return {
'dae': ca.vertcat(*dae),
'alg': ca.vertcat(*alg)
} | Split equations into differential algebraic and algebraic only | Below is the the instruction that describes the task:
### Input:
Split equations into differential algebraic and algebraic only
### Response:
def split_dae_alg(eqs: SYM, dx: SYM) -> Dict[str, SYM]:
"""Split equations into differential algebraic and algebraic only"""
dae = []
alg = []
for eq in ca.vertsplit(eqs):
if ca.depends_on(eq, dx):
dae.append(eq)
else:
alg.append(eq)
return {
'dae': ca.vertcat(*dae),
'alg': ca.vertcat(*alg)
} |
def get_view_names(engine: Engine) -> List[str]:
"""
Returns a list of database view names from the :class:`Engine`.
"""
insp = Inspector.from_engine(engine)
return insp.get_view_names() | Returns a list of database view names from the :class:`Engine`. | Below is the the instruction that describes the task:
### Input:
Returns a list of database view names from the :class:`Engine`.
### Response:
def get_view_names(engine: Engine) -> List[str]:
"""
Returns a list of database view names from the :class:`Engine`.
"""
insp = Inspector.from_engine(engine)
return insp.get_view_names() |
def reverse(self):
'S.reverse() -- reverse *IN PLACE*'
n = len(self)
for i in range(n//2):
self[i], self[n-i-1] = self[n-i-1], self[i] | S.reverse() -- reverse *IN PLACE* | Below is the the instruction that describes the task:
### Input:
S.reverse() -- reverse *IN PLACE*
### Response:
def reverse(self):
'S.reverse() -- reverse *IN PLACE*'
n = len(self)
for i in range(n//2):
self[i], self[n-i-1] = self[n-i-1], self[i] |
def _send_to_timeseries(self, message):
"""
Establish or reuse socket connection and send
the given message to the timeseries service.
"""
logging.debug("MESSAGE=" + str(message))
result = None
try:
ws = self._get_websocket()
ws.send(json.dumps(message))
result = ws.recv()
except (websocket.WebSocketConnectionClosedException, Exception) as e:
logging.debug("Connection failed, will try again.")
logging.debug(e)
ws = self._get_websocket(reuse=False)
ws.send(json.dumps(message))
result = ws.recv()
logging.debug("RESULT=" + str(result))
return result | Establish or reuse socket connection and send
the given message to the timeseries service. | Below is the the instruction that describes the task:
### Input:
Establish or reuse socket connection and send
the given message to the timeseries service.
### Response:
def _send_to_timeseries(self, message):
"""
Establish or reuse socket connection and send
the given message to the timeseries service.
"""
logging.debug("MESSAGE=" + str(message))
result = None
try:
ws = self._get_websocket()
ws.send(json.dumps(message))
result = ws.recv()
except (websocket.WebSocketConnectionClosedException, Exception) as e:
logging.debug("Connection failed, will try again.")
logging.debug(e)
ws = self._get_websocket(reuse=False)
ws.send(json.dumps(message))
result = ws.recv()
logging.debug("RESULT=" + str(result))
return result |
def exists(cls, query=None, path=None, **kwargs):
"""
Like __init__ but simply returns a boolean as to whether or not the
object exists, rather than returning the whole object.
NOTE: if you pass in a single argument to exists, this will
match against ID_KEY.
@param query: a dictionary specifying key-value pairs that the result
must match. If query is None, use kwargs in it's place
@param path: the path of the database to query, in the form
"database.colletion"; pass None to use the value of the
PATH property of the object
@param **kwargs: used as query parameters if query is None
@raise Exception: if path and self.PATH are None; the database path
must be defined in at least one of these
"""
if query is None and len(kwargs) > 0:
query = kwargs
if query is None:
return False
return cls.db(path).find_one(query) is not None | Like __init__ but simply returns a boolean as to whether or not the
object exists, rather than returning the whole object.
NOTE: if you pass in a single argument to exists, this will
match against ID_KEY.
@param query: a dictionary specifying key-value pairs that the result
must match. If query is None, use kwargs in it's place
@param path: the path of the database to query, in the form
"database.colletion"; pass None to use the value of the
PATH property of the object
@param **kwargs: used as query parameters if query is None
@raise Exception: if path and self.PATH are None; the database path
must be defined in at least one of these | Below is the the instruction that describes the task:
### Input:
Like __init__ but simply returns a boolean as to whether or not the
object exists, rather than returning the whole object.
NOTE: if you pass in a single argument to exists, this will
match against ID_KEY.
@param query: a dictionary specifying key-value pairs that the result
must match. If query is None, use kwargs in it's place
@param path: the path of the database to query, in the form
"database.colletion"; pass None to use the value of the
PATH property of the object
@param **kwargs: used as query parameters if query is None
@raise Exception: if path and self.PATH are None; the database path
must be defined in at least one of these
### Response:
def exists(cls, query=None, path=None, **kwargs):
"""
Like __init__ but simply returns a boolean as to whether or not the
object exists, rather than returning the whole object.
NOTE: if you pass in a single argument to exists, this will
match against ID_KEY.
@param query: a dictionary specifying key-value pairs that the result
must match. If query is None, use kwargs in it's place
@param path: the path of the database to query, in the form
"database.colletion"; pass None to use the value of the
PATH property of the object
@param **kwargs: used as query parameters if query is None
@raise Exception: if path and self.PATH are None; the database path
must be defined in at least one of these
"""
if query is None and len(kwargs) > 0:
query = kwargs
if query is None:
return False
return cls.db(path).find_one(query) is not None |
def random_date_between(start_date, end_date):
"""Return random date between start/end"""
assert isinstance(start_date, datetime.date)
delta_secs = int((end_date - start_date).total_seconds())
delta = datetime.timedelta(seconds=random.randint(0, delta_secs))
return (start_date + delta) | Return random date between start/end | Below is the the instruction that describes the task:
### Input:
Return random date between start/end
### Response:
def random_date_between(start_date, end_date):
"""Return random date between start/end"""
assert isinstance(start_date, datetime.date)
delta_secs = int((end_date - start_date).total_seconds())
delta = datetime.timedelta(seconds=random.randint(0, delta_secs))
return (start_date + delta) |
def snmp_server_user_ipv6_acl(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
snmp_server = ET.SubElement(config, "snmp-server", xmlns="urn:brocade.com:mgmt:brocade-snmp")
user = ET.SubElement(snmp_server, "user")
username_key = ET.SubElement(user, "username")
username_key.text = kwargs.pop('username')
ipv6_acl = ET.SubElement(user, "ipv6-acl")
ipv6_acl.text = kwargs.pop('ipv6_acl')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code | Below is the the instruction that describes the task:
### Input:
Auto Generated Code
### Response:
def snmp_server_user_ipv6_acl(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
snmp_server = ET.SubElement(config, "snmp-server", xmlns="urn:brocade.com:mgmt:brocade-snmp")
user = ET.SubElement(snmp_server, "user")
username_key = ET.SubElement(user, "username")
username_key.text = kwargs.pop('username')
ipv6_acl = ET.SubElement(user, "ipv6-acl")
ipv6_acl.text = kwargs.pop('ipv6_acl')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def add_data(self, path, time_info, data, exprs):
"""
Stores data before it can be put into a time series
"""
# Dont add if empty
if not nonempty(data):
for d in self.data[path]:
if nonempty(d['values']):
return
# Add data to path
for expr in exprs:
self.paths[expr].add(path)
self.data[path].append({
'time_info': time_info,
'values': data
}) | Stores data before it can be put into a time series | Below is the the instruction that describes the task:
### Input:
Stores data before it can be put into a time series
### Response:
def add_data(self, path, time_info, data, exprs):
"""
Stores data before it can be put into a time series
"""
# Dont add if empty
if not nonempty(data):
for d in self.data[path]:
if nonempty(d['values']):
return
# Add data to path
for expr in exprs:
self.paths[expr].add(path)
self.data[path].append({
'time_info': time_info,
'values': data
}) |
def list_nodes_full(call=None, for_output=True):
'''
Return a list of the VMs that are on the provider
'''
if call == 'action':
raise SaltCloudSystemExit(
'The list_nodes_full function must be called with -f or --function.'
)
return _list_nodes(full=True, for_output=for_output) | Return a list of the VMs that are on the provider | Below is the the instruction that describes the task:
### Input:
Return a list of the VMs that are on the provider
### Response:
def list_nodes_full(call=None, for_output=True):
'''
Return a list of the VMs that are on the provider
'''
if call == 'action':
raise SaltCloudSystemExit(
'The list_nodes_full function must be called with -f or --function.'
)
return _list_nodes(full=True, for_output=for_output) |
def create(client, _type, **kwargs):
"""Create a suds object of the requested _type."""
obj = client.factory.create("ns0:%s" % _type)
for key, value in kwargs.items():
setattr(obj, key, value)
return obj | Create a suds object of the requested _type. | Below is the the instruction that describes the task:
### Input:
Create a suds object of the requested _type.
### Response:
def create(client, _type, **kwargs):
"""Create a suds object of the requested _type."""
obj = client.factory.create("ns0:%s" % _type)
for key, value in kwargs.items():
setattr(obj, key, value)
return obj |
def get_current_time(self):
"""
returns current time of simulation
"""
current_time = c_double()
self.library.get_current_time.argtypes = [POINTER(c_double)]
self.library.get_current_time.restype = None
self.library.get_current_time(byref(current_time))
return current_time.value | returns current time of simulation | Below is the the instruction that describes the task:
### Input:
returns current time of simulation
### Response:
def get_current_time(self):
"""
returns current time of simulation
"""
current_time = c_double()
self.library.get_current_time.argtypes = [POINTER(c_double)]
self.library.get_current_time.restype = None
self.library.get_current_time(byref(current_time))
return current_time.value |
def _build(self, inputs, prev_state, is_training=None, test_local_stats=True):
"""Connects the LSTM module into the graph.
If this is not the first time the module has been connected to the graph,
the Tensors provided as inputs and state must have the same final
dimension, in order for the existing variables to be the correct size for
their corresponding multiplications. The batch size may differ for each
connection.
Args:
inputs: Tensor of size `[batch_size, input_size]`.
prev_state: Tuple (prev_hidden, prev_cell), or if batch norm is enabled
and `max_unique_stats > 1`, then (prev_hidden, prev_cell, time_step).
Here, prev_hidden and prev_cell are tensors of size
`[batch_size, hidden_size]`, and time_step is used to indicate the
current RNN step.
is_training: Boolean indicating whether we are in training mode (as
opposed to testing mode), passed to the batch norm
modules. Note to use this you must wrap the cell via the
`with_batch_norm_control` function.
test_local_stats: Boolean indicating whether to use local batch statistics
in test mode. See the `BatchNorm` documentation for more on this.
Returns:
A tuple (output, next_state) where 'output' is a Tensor of size
`[batch_size, hidden_size]` and 'next_state' is a tuple
(next_hidden, next_cell) or (next_hidden, next_cell, time_step + 1),
where next_hidden and next_cell have size `[batch_size, hidden_size]`.
Raises:
ValueError: If connecting the module into the graph any time after the
first time, and the inferred size of the inputs does not match previous
invocations.
"""
if is_training is None:
raise ValueError("Boolean is_training flag must be explicitly specified "
"when using batch normalization.")
if self._max_unique_stats == 1:
prev_hidden, prev_cell = prev_state
time_step = None
else:
prev_hidden, prev_cell, time_step = prev_state
# pylint: disable=invalid-unary-operand-type
if self._hidden_clip_value is not None:
prev_hidden = tf.clip_by_value(
prev_hidden, -self._hidden_clip_value, self._hidden_clip_value)
if self._cell_clip_value is not None:
prev_cell = tf.clip_by_value(
prev_cell, -self._cell_clip_value, self._cell_clip_value)
# pylint: enable=invalid-unary-operand-type
self._create_gate_variables(inputs.get_shape(), inputs.dtype)
self._create_batch_norm_variables(inputs.dtype)
# pylint false positive: calling module of same file;
# pylint: disable=not-callable
if self._use_batch_norm_h or self._use_batch_norm_x:
gates_h = tf.matmul(prev_hidden, self._w_h)
gates_x = tf.matmul(inputs, self._w_x)
if self._use_batch_norm_h:
gates_h = self._gamma_h * self._batch_norm_h(gates_h,
time_step,
is_training,
test_local_stats)
if self._use_batch_norm_x:
gates_x = self._gamma_x * self._batch_norm_x(gates_x,
time_step,
is_training,
test_local_stats)
gates = gates_h + gates_x
else:
# Parameters of gates are concatenated into one multiply for efficiency.
inputs_and_hidden = tf.concat([inputs, prev_hidden], 1)
gates = tf.matmul(inputs_and_hidden, self._w_xh)
gates += self._b
# i = input_gate, j = next_input, f = forget_gate, o = output_gate
i, j, f, o = tf.split(value=gates, num_or_size_splits=4, axis=1)
if self._use_peepholes: # diagonal connections
self._create_peephole_variables(inputs.dtype)
f += self._w_f_diag * prev_cell
i += self._w_i_diag * prev_cell
forget_mask = tf.sigmoid(f + self._forget_bias)
next_cell = forget_mask * prev_cell + tf.sigmoid(i) * tf.tanh(j)
cell_output = next_cell
if self._use_batch_norm_c:
cell_output = (self._beta_c
+ self._gamma_c * self._batch_norm_c(cell_output,
time_step,
is_training,
test_local_stats))
if self._use_peepholes:
cell_output += self._w_o_diag * cell_output
next_hidden = tf.tanh(cell_output) * tf.sigmoid(o)
if self._max_unique_stats == 1:
return next_hidden, (next_hidden, next_cell)
else:
return next_hidden, (next_hidden, next_cell, time_step + 1) | Connects the LSTM module into the graph.
If this is not the first time the module has been connected to the graph,
the Tensors provided as inputs and state must have the same final
dimension, in order for the existing variables to be the correct size for
their corresponding multiplications. The batch size may differ for each
connection.
Args:
inputs: Tensor of size `[batch_size, input_size]`.
prev_state: Tuple (prev_hidden, prev_cell), or if batch norm is enabled
and `max_unique_stats > 1`, then (prev_hidden, prev_cell, time_step).
Here, prev_hidden and prev_cell are tensors of size
`[batch_size, hidden_size]`, and time_step is used to indicate the
current RNN step.
is_training: Boolean indicating whether we are in training mode (as
opposed to testing mode), passed to the batch norm
modules. Note to use this you must wrap the cell via the
`with_batch_norm_control` function.
test_local_stats: Boolean indicating whether to use local batch statistics
in test mode. See the `BatchNorm` documentation for more on this.
Returns:
A tuple (output, next_state) where 'output' is a Tensor of size
`[batch_size, hidden_size]` and 'next_state' is a tuple
(next_hidden, next_cell) or (next_hidden, next_cell, time_step + 1),
where next_hidden and next_cell have size `[batch_size, hidden_size]`.
Raises:
ValueError: If connecting the module into the graph any time after the
first time, and the inferred size of the inputs does not match previous
invocations. | Below is the the instruction that describes the task:
### Input:
Connects the LSTM module into the graph.
If this is not the first time the module has been connected to the graph,
the Tensors provided as inputs and state must have the same final
dimension, in order for the existing variables to be the correct size for
their corresponding multiplications. The batch size may differ for each
connection.
Args:
inputs: Tensor of size `[batch_size, input_size]`.
prev_state: Tuple (prev_hidden, prev_cell), or if batch norm is enabled
and `max_unique_stats > 1`, then (prev_hidden, prev_cell, time_step).
Here, prev_hidden and prev_cell are tensors of size
`[batch_size, hidden_size]`, and time_step is used to indicate the
current RNN step.
is_training: Boolean indicating whether we are in training mode (as
opposed to testing mode), passed to the batch norm
modules. Note to use this you must wrap the cell via the
`with_batch_norm_control` function.
test_local_stats: Boolean indicating whether to use local batch statistics
in test mode. See the `BatchNorm` documentation for more on this.
Returns:
A tuple (output, next_state) where 'output' is a Tensor of size
`[batch_size, hidden_size]` and 'next_state' is a tuple
(next_hidden, next_cell) or (next_hidden, next_cell, time_step + 1),
where next_hidden and next_cell have size `[batch_size, hidden_size]`.
Raises:
ValueError: If connecting the module into the graph any time after the
first time, and the inferred size of the inputs does not match previous
invocations.
### Response:
def _build(self, inputs, prev_state, is_training=None, test_local_stats=True):
"""Connects the LSTM module into the graph.
If this is not the first time the module has been connected to the graph,
the Tensors provided as inputs and state must have the same final
dimension, in order for the existing variables to be the correct size for
their corresponding multiplications. The batch size may differ for each
connection.
Args:
inputs: Tensor of size `[batch_size, input_size]`.
prev_state: Tuple (prev_hidden, prev_cell), or if batch norm is enabled
and `max_unique_stats > 1`, then (prev_hidden, prev_cell, time_step).
Here, prev_hidden and prev_cell are tensors of size
`[batch_size, hidden_size]`, and time_step is used to indicate the
current RNN step.
is_training: Boolean indicating whether we are in training mode (as
opposed to testing mode), passed to the batch norm
modules. Note to use this you must wrap the cell via the
`with_batch_norm_control` function.
test_local_stats: Boolean indicating whether to use local batch statistics
in test mode. See the `BatchNorm` documentation for more on this.
Returns:
A tuple (output, next_state) where 'output' is a Tensor of size
`[batch_size, hidden_size]` and 'next_state' is a tuple
(next_hidden, next_cell) or (next_hidden, next_cell, time_step + 1),
where next_hidden and next_cell have size `[batch_size, hidden_size]`.
Raises:
ValueError: If connecting the module into the graph any time after the
first time, and the inferred size of the inputs does not match previous
invocations.
"""
if is_training is None:
raise ValueError("Boolean is_training flag must be explicitly specified "
"when using batch normalization.")
if self._max_unique_stats == 1:
prev_hidden, prev_cell = prev_state
time_step = None
else:
prev_hidden, prev_cell, time_step = prev_state
# pylint: disable=invalid-unary-operand-type
if self._hidden_clip_value is not None:
prev_hidden = tf.clip_by_value(
prev_hidden, -self._hidden_clip_value, self._hidden_clip_value)
if self._cell_clip_value is not None:
prev_cell = tf.clip_by_value(
prev_cell, -self._cell_clip_value, self._cell_clip_value)
# pylint: enable=invalid-unary-operand-type
self._create_gate_variables(inputs.get_shape(), inputs.dtype)
self._create_batch_norm_variables(inputs.dtype)
# pylint false positive: calling module of same file;
# pylint: disable=not-callable
if self._use_batch_norm_h or self._use_batch_norm_x:
gates_h = tf.matmul(prev_hidden, self._w_h)
gates_x = tf.matmul(inputs, self._w_x)
if self._use_batch_norm_h:
gates_h = self._gamma_h * self._batch_norm_h(gates_h,
time_step,
is_training,
test_local_stats)
if self._use_batch_norm_x:
gates_x = self._gamma_x * self._batch_norm_x(gates_x,
time_step,
is_training,
test_local_stats)
gates = gates_h + gates_x
else:
# Parameters of gates are concatenated into one multiply for efficiency.
inputs_and_hidden = tf.concat([inputs, prev_hidden], 1)
gates = tf.matmul(inputs_and_hidden, self._w_xh)
gates += self._b
# i = input_gate, j = next_input, f = forget_gate, o = output_gate
i, j, f, o = tf.split(value=gates, num_or_size_splits=4, axis=1)
if self._use_peepholes: # diagonal connections
self._create_peephole_variables(inputs.dtype)
f += self._w_f_diag * prev_cell
i += self._w_i_diag * prev_cell
forget_mask = tf.sigmoid(f + self._forget_bias)
next_cell = forget_mask * prev_cell + tf.sigmoid(i) * tf.tanh(j)
cell_output = next_cell
if self._use_batch_norm_c:
cell_output = (self._beta_c
+ self._gamma_c * self._batch_norm_c(cell_output,
time_step,
is_training,
test_local_stats))
if self._use_peepholes:
cell_output += self._w_o_diag * cell_output
next_hidden = tf.tanh(cell_output) * tf.sigmoid(o)
if self._max_unique_stats == 1:
return next_hidden, (next_hidden, next_cell)
else:
return next_hidden, (next_hidden, next_cell, time_step + 1) |
def add_action(self, dash, dashdash, action_code):
"""Add a specialized option that is the action to execute."""
option = self.add_option(dash, dashdash, action='callback',
callback=self._append_action
)
option.action_code = action_code | Add a specialized option that is the action to execute. | Below is the the instruction that describes the task:
### Input:
Add a specialized option that is the action to execute.
### Response:
def add_action(self, dash, dashdash, action_code):
"""Add a specialized option that is the action to execute."""
option = self.add_option(dash, dashdash, action='callback',
callback=self._append_action
)
option.action_code = action_code |
async def get_search_page(self, term: str):
"""Get search page.
This function will get the first link from the search term we do on term and then
it will return the link we want to parse from.
:param term: Light Novel to Search For
"""
# Uses the BASEURL and also builds link for the page we want using the term given
params = {'s': term, 'post_type': 'seriesplan'}
async with self.session.get(self.BASEURL, params=params) as response:
# If the response is 200 OK
if response.status == 200:
search = BeautifulSoup(await response.text(), 'lxml')
# Return the link that we need
return search.find('a', class_='w-blog-entry-link').get('href')
else:
# Raise an error with the response status
raise aiohttp.ClientResponseError(response.status) | Get search page.
This function will get the first link from the search term we do on term and then
it will return the link we want to parse from.
:param term: Light Novel to Search For | Below is the the instruction that describes the task:
### Input:
Get search page.
This function will get the first link from the search term we do on term and then
it will return the link we want to parse from.
:param term: Light Novel to Search For
### Response:
async def get_search_page(self, term: str):
"""Get search page.
This function will get the first link from the search term we do on term and then
it will return the link we want to parse from.
:param term: Light Novel to Search For
"""
# Uses the BASEURL and also builds link for the page we want using the term given
params = {'s': term, 'post_type': 'seriesplan'}
async with self.session.get(self.BASEURL, params=params) as response:
# If the response is 200 OK
if response.status == 200:
search = BeautifulSoup(await response.text(), 'lxml')
# Return the link that we need
return search.find('a', class_='w-blog-entry-link').get('href')
else:
# Raise an error with the response status
raise aiohttp.ClientResponseError(response.status) |
def get_detail_intro(self, content_id):
"""
Inquire detail introduction
:param content_id: Content ID to inquire
:type content_id: str
:rtype: dict
"""
content_type_id = self.get_detail_common(content_id)['content_type_id']
# Get content type id
resp = json.loads(urlopen(self.detail_intro_url.format(content_id, content_type_id)).read().decode('utf-8'))
data = resp['response']['body']['items']['item']
# Extract data
del data['contentid']
del data['contenttypeid']
if content_type_id == 12:
# 관광지
keychain = {
'accomcount': ('capacity', None),
'chkbabycarriage': ('baby_carriage', None),
'chkcreditcard': ('credit_card', None),
'chkpet': ('pet', None),
'expagerange': ('age_range', None),
'expguide': ('guide', None),
'infocenter': ('info_center', None),
'opendate': ('open_date', None),
'parking': ('parking', None),
'restdate': ('rest_date', None),
'useseason': ('season', None),
'usetime': ('use_time', None)
}
_dict_key_changer(data, keychain)
data['cultural_heritage'] = data.pop('heritage1', None) == 1
data['natural_heritage'] = data.pop('heritage2', None) == 1
data['archival_heritage'] = data.pop('heritage3', None) == 1
elif content_type_id == 14:
# 문화시설
keychain = {
'accomcountculture': ('capacity', None),
'chkbabycarriageculture': ('baby_carriage', None),
'chkcreditcardculture': ('credit_card', None),
'chkpetculture': ('pet', None),
'discountinfo': ('discount_info', None),
'infocenterculture': ('info_center', None),
'parkingculture': ('parking', None),
'parkingfee': ('parking_fee', None),
'restdateculture': ('rest_date', None),
'usefee': ('use_fee', None),
'usetimeculture': ('use_time', None),
# 이용시간
'scale': ('scale', None),
'spendtime': ('spend_time', None)
# 관람 소요시간
}
_dict_key_changer(data, keychain)
elif content_type_id == 15:
# 축제/공연/행사
keychain = {
'agelimit': ('age_limit', None),
'bookingplace': ('reservation_place', None),
'eventstartdate': ('start_date', None),
'eventenddate': ('end_date', None),
'eventplace': ('place', None),
'festivalgrade': ('festival_grade', None),
'placeinfo': ('place_guide', None),
'spendtimefestival': ('spend_time', None),
'sponsor1': ('organizer', None),
'sponsor2': ('host', None),
'subevent': ('sub_event', None),
'usetimefestival': ('use_fee', None)
}
_dict_key_changer(data, keychain)
data.pop('eventhomepage', None)
elif content_type_id == 25:
# 여행코스
keychain = {
'distance': ('distance', None),
'infocentertourcourse': ('info_center', None),
'schedule': ('schedule', None),
'taketime': ('spend_time', None),
'theme': ('theme', None)
}
_dict_key_changer(data, keychain)
elif content_type_id == 28:
# 레포츠
keychain = {
'accomcountleports': ('capacity', None),
'chkbabycarriageleports': ('baby_carriage', None),
'chkcreditcardleports': ('credit_card', None),
'chkpetleports': ('pet', None),
'expagerangeleports': ('age_range', None),
'infocenterleports': ('info_center', None),
'openperiod': ('open_period', None),
'parkingleports': ('parking', None),
'parkingfeeleports': ('parking_fee', None),
'reservation': ('reservation_info', None),
'restdateleports': ('rest_date', None),
'scaleleports': ('scale', None),
'usetimeleports': ('use_time', None),
'usefeeleports': ('use_fee', None),
}
_dict_key_changer(data, keychain)
elif content_type_id == 32:
# 숙박
keychain = {
'accomcountlodging': ('capacity', None),
'checkintime': ('checkin_time', None),
'checkouttime': ('checkout_time', None),
'foodplace': ('food_field', None),
'infocenterlodging': ('info_center', None),
'parkinglodging': ('parking', None),
'pickup': ('pickup_service', None),
'reservationlodging': ('reservation_info', None),
'roomtype': ('room_type', None),
'scalelodging': ('scale', None),
'subfacility': ('sub_facility', None)
}
_dict_key_changer(data, keychain)
data['benikia'] = data.pop('benikia', False) == 1
data['cooking'] = data.pop('chkcooking', False) == 1
data['goodstay'] = data.pop('goodstay', False) == 1
data['korean_house'] = data.pop('hanok', False) == 1
data['barbecue'] = data.pop('barbecue', False) == 1
data['beauty'] = data.pop('beauty', False) == 1
data['beverage'] = data.pop('beverage', False) == 1
data['bicycle'] = data.pop('bicycle', False) == 1
data['campfire'] = data.pop('campfire', False) == 1
data['fitness'] = data.pop('fitness', False) == 1
data['karaoke'] = data.pop('karaoke', False) == 1
data['public_bath'] = data.pop('publicbath', False) == 1
data['public_pc'] = data.pop('publicpc', False) == 1
data['sauna'] = data.pop('sauna', False) == 1
data['seminar'] = data.pop('seminar', False) == 1
data['sports'] = data.pop('sports', False) == 1
elif content_type_id == 38:
# 쇼핑
keychain = {
'chkbabycarriageshopping': ('baby_carriage', None),
'chkcreditcardshopping': ('credit_card', None),
'chkpetshopping': ('pet', None),
'fairday': ('fair_day', None),
'infocentershopping': ('info_center', None),
'opendateshopping': ('open_date', None),
'opentime': ('use_time', None),
'parkingshopping': ('parking', None),
'restdateshopping': ('rest_date', None),
'restroom': ('restroom_info', None),
'saleitem': ('sale_item', None),
'saleitemcost': ('sale_item_cost', None),
'scaleshopping': ('scale', None),
'shopguide': ('guide', None)
}
_dict_key_changer(data, keychain)
elif content_type_id == 39:
# 음식
keychain = {
'chkcreditcardfood': ('credit_card', None),
'discountinfofodd': ('discount_info', None),
'firstmenu': ('rep_menu', None),
'infocenterfood': ('info_center', None),
'kidsfacility': ('kids_facility', None),
'opendatefood': ('open_date', None),
'opentimefood': ('open_time', None),
'packing': ('packing', None),
'parkingfood': ('parking', None),
'reservationfood': ('reservation_info', None),
'restdatefood': ('rest_date', None),
'scalefood': ('scale', None),
'seat': ('seat', None),
'smoking': ('smoking', None),
'treatmenu': ('treat_menus', None)
}
_dict_key_changer(data, keychain)
data['kids_facility'] = data.pop('kidsfacility') == 1 if 'kidsfacility' in data else False
return data | Inquire detail introduction
:param content_id: Content ID to inquire
:type content_id: str
:rtype: dict | Below is the the instruction that describes the task:
### Input:
Inquire detail introduction
:param content_id: Content ID to inquire
:type content_id: str
:rtype: dict
### Response:
def get_detail_intro(self, content_id):
"""
Inquire detail introduction
:param content_id: Content ID to inquire
:type content_id: str
:rtype: dict
"""
content_type_id = self.get_detail_common(content_id)['content_type_id']
# Get content type id
resp = json.loads(urlopen(self.detail_intro_url.format(content_id, content_type_id)).read().decode('utf-8'))
data = resp['response']['body']['items']['item']
# Extract data
del data['contentid']
del data['contenttypeid']
if content_type_id == 12:
# 관광지
keychain = {
'accomcount': ('capacity', None),
'chkbabycarriage': ('baby_carriage', None),
'chkcreditcard': ('credit_card', None),
'chkpet': ('pet', None),
'expagerange': ('age_range', None),
'expguide': ('guide', None),
'infocenter': ('info_center', None),
'opendate': ('open_date', None),
'parking': ('parking', None),
'restdate': ('rest_date', None),
'useseason': ('season', None),
'usetime': ('use_time', None)
}
_dict_key_changer(data, keychain)
data['cultural_heritage'] = data.pop('heritage1', None) == 1
data['natural_heritage'] = data.pop('heritage2', None) == 1
data['archival_heritage'] = data.pop('heritage3', None) == 1
elif content_type_id == 14:
# 문화시설
keychain = {
'accomcountculture': ('capacity', None),
'chkbabycarriageculture': ('baby_carriage', None),
'chkcreditcardculture': ('credit_card', None),
'chkpetculture': ('pet', None),
'discountinfo': ('discount_info', None),
'infocenterculture': ('info_center', None),
'parkingculture': ('parking', None),
'parkingfee': ('parking_fee', None),
'restdateculture': ('rest_date', None),
'usefee': ('use_fee', None),
'usetimeculture': ('use_time', None),
# 이용시간
'scale': ('scale', None),
'spendtime': ('spend_time', None)
# 관람 소요시간
}
_dict_key_changer(data, keychain)
elif content_type_id == 15:
# 축제/공연/행사
keychain = {
'agelimit': ('age_limit', None),
'bookingplace': ('reservation_place', None),
'eventstartdate': ('start_date', None),
'eventenddate': ('end_date', None),
'eventplace': ('place', None),
'festivalgrade': ('festival_grade', None),
'placeinfo': ('place_guide', None),
'spendtimefestival': ('spend_time', None),
'sponsor1': ('organizer', None),
'sponsor2': ('host', None),
'subevent': ('sub_event', None),
'usetimefestival': ('use_fee', None)
}
_dict_key_changer(data, keychain)
data.pop('eventhomepage', None)
elif content_type_id == 25:
# 여행코스
keychain = {
'distance': ('distance', None),
'infocentertourcourse': ('info_center', None),
'schedule': ('schedule', None),
'taketime': ('spend_time', None),
'theme': ('theme', None)
}
_dict_key_changer(data, keychain)
elif content_type_id == 28:
# 레포츠
keychain = {
'accomcountleports': ('capacity', None),
'chkbabycarriageleports': ('baby_carriage', None),
'chkcreditcardleports': ('credit_card', None),
'chkpetleports': ('pet', None),
'expagerangeleports': ('age_range', None),
'infocenterleports': ('info_center', None),
'openperiod': ('open_period', None),
'parkingleports': ('parking', None),
'parkingfeeleports': ('parking_fee', None),
'reservation': ('reservation_info', None),
'restdateleports': ('rest_date', None),
'scaleleports': ('scale', None),
'usetimeleports': ('use_time', None),
'usefeeleports': ('use_fee', None),
}
_dict_key_changer(data, keychain)
elif content_type_id == 32:
# 숙박
keychain = {
'accomcountlodging': ('capacity', None),
'checkintime': ('checkin_time', None),
'checkouttime': ('checkout_time', None),
'foodplace': ('food_field', None),
'infocenterlodging': ('info_center', None),
'parkinglodging': ('parking', None),
'pickup': ('pickup_service', None),
'reservationlodging': ('reservation_info', None),
'roomtype': ('room_type', None),
'scalelodging': ('scale', None),
'subfacility': ('sub_facility', None)
}
_dict_key_changer(data, keychain)
data['benikia'] = data.pop('benikia', False) == 1
data['cooking'] = data.pop('chkcooking', False) == 1
data['goodstay'] = data.pop('goodstay', False) == 1
data['korean_house'] = data.pop('hanok', False) == 1
data['barbecue'] = data.pop('barbecue', False) == 1
data['beauty'] = data.pop('beauty', False) == 1
data['beverage'] = data.pop('beverage', False) == 1
data['bicycle'] = data.pop('bicycle', False) == 1
data['campfire'] = data.pop('campfire', False) == 1
data['fitness'] = data.pop('fitness', False) == 1
data['karaoke'] = data.pop('karaoke', False) == 1
data['public_bath'] = data.pop('publicbath', False) == 1
data['public_pc'] = data.pop('publicpc', False) == 1
data['sauna'] = data.pop('sauna', False) == 1
data['seminar'] = data.pop('seminar', False) == 1
data['sports'] = data.pop('sports', False) == 1
elif content_type_id == 38:
# 쇼핑
keychain = {
'chkbabycarriageshopping': ('baby_carriage', None),
'chkcreditcardshopping': ('credit_card', None),
'chkpetshopping': ('pet', None),
'fairday': ('fair_day', None),
'infocentershopping': ('info_center', None),
'opendateshopping': ('open_date', None),
'opentime': ('use_time', None),
'parkingshopping': ('parking', None),
'restdateshopping': ('rest_date', None),
'restroom': ('restroom_info', None),
'saleitem': ('sale_item', None),
'saleitemcost': ('sale_item_cost', None),
'scaleshopping': ('scale', None),
'shopguide': ('guide', None)
}
_dict_key_changer(data, keychain)
elif content_type_id == 39:
# 음식
keychain = {
'chkcreditcardfood': ('credit_card', None),
'discountinfofodd': ('discount_info', None),
'firstmenu': ('rep_menu', None),
'infocenterfood': ('info_center', None),
'kidsfacility': ('kids_facility', None),
'opendatefood': ('open_date', None),
'opentimefood': ('open_time', None),
'packing': ('packing', None),
'parkingfood': ('parking', None),
'reservationfood': ('reservation_info', None),
'restdatefood': ('rest_date', None),
'scalefood': ('scale', None),
'seat': ('seat', None),
'smoking': ('smoking', None),
'treatmenu': ('treat_menus', None)
}
_dict_key_changer(data, keychain)
data['kids_facility'] = data.pop('kidsfacility') == 1 if 'kidsfacility' in data else False
return data |
def copy_on_s3(self, src_file_name, dst_file_name, bucket_name):
"""
Copies src file to destination within a bucket.
"""
try:
self.s3_client.head_bucket(Bucket=bucket_name)
except botocore.exceptions.ClientError as e: # pragma: no cover
# If a client error is thrown, then check that it was a 404 error.
# If it was a 404 error, then the bucket does not exist.
error_code = int(e.response['Error']['Code'])
if error_code == 404:
return False
copy_src = {
"Bucket": bucket_name,
"Key": src_file_name
}
try:
self.s3_client.copy(
CopySource=copy_src,
Bucket=bucket_name,
Key=dst_file_name
)
return True
except botocore.exceptions.ClientError: # pragma: no cover
return False | Copies src file to destination within a bucket. | Below is the the instruction that describes the task:
### Input:
Copies src file to destination within a bucket.
### Response:
def copy_on_s3(self, src_file_name, dst_file_name, bucket_name):
"""
Copies src file to destination within a bucket.
"""
try:
self.s3_client.head_bucket(Bucket=bucket_name)
except botocore.exceptions.ClientError as e: # pragma: no cover
# If a client error is thrown, then check that it was a 404 error.
# If it was a 404 error, then the bucket does not exist.
error_code = int(e.response['Error']['Code'])
if error_code == 404:
return False
copy_src = {
"Bucket": bucket_name,
"Key": src_file_name
}
try:
self.s3_client.copy(
CopySource=copy_src,
Bucket=bucket_name,
Key=dst_file_name
)
return True
except botocore.exceptions.ClientError: # pragma: no cover
return False |
def get_share_uk_and_shareid(cookie, url):
'''从共享链接中提示uk和shareid.
如果共享文件需要输入密码, 就会将need_pwd设为True
如果成功, 返回(need_pwd, uk, shareid)
如果失败, 就返回None
目前支持的链接格式有:
* http://pan.baidu.com/wap/link?uk=202032639&shareid=420754&third=0
* http://pan.baidu.com/share/link?uk=202032639&shareid=420754
* http://pan.baidu.com/s/1i3iQY48
'''
def parse_share_uk(content):
'''代码片段如下:
yunData.SHARE_ID = "677200861";
yunData.SHARE_UK = "1295729848";
'''
uk_reg = re.compile('yunData.SHARE_UK\s*=\s*"(\d+)"')
shareid_reg = re.compile('yunData.SHARE_ID\s*=\s*"(\d+)"')
uk_match = uk_reg.search(content)
shareid_match = shareid_reg.search(content)
if uk_match and shareid_match:
return False, uk_match.group(1), shareid_match.group(1)
else:
return None
def parse_uk_from_url(url):
uk_reg = re.compile('uk=(\d+)')
uk_match = uk_reg.search(url)
shareid_reg = re.compile('shareid=(\d+)')
shareid_match = shareid_reg.search(url)
if not uk_match or not shareid_match:
return '', ''
uk = uk_match.group(1)
shareid = shareid_match.group(1)
return uk, shareid
# 识别加密链接
req = net.urlopen_without_redirect(url, headers={
'Cookie': cookie.header_output(),
})
if req and req.headers.get('Location'):
init_url = req.headers.get('Location')
if init_url.find('share/init') > -1:
uk, shareid = parse_uk_from_url(init_url)
return True, uk, shareid
# 处理短链接
if url.startswith('http://pan.baidu.com/s/'):
req = net.urlopen(url, headers={
'Cookie': cookie.header_output(),
})
if req:
return parse_share_uk(req.data.decode())
# 处理正常链接
uk, shareid = parse_uk_from_url(url)
return False, uk, shareid | 从共享链接中提示uk和shareid.
如果共享文件需要输入密码, 就会将need_pwd设为True
如果成功, 返回(need_pwd, uk, shareid)
如果失败, 就返回None
目前支持的链接格式有:
* http://pan.baidu.com/wap/link?uk=202032639&shareid=420754&third=0
* http://pan.baidu.com/share/link?uk=202032639&shareid=420754
* http://pan.baidu.com/s/1i3iQY48 | Below is the the instruction that describes the task:
### Input:
从共享链接中提示uk和shareid.
如果共享文件需要输入密码, 就会将need_pwd设为True
如果成功, 返回(need_pwd, uk, shareid)
如果失败, 就返回None
目前支持的链接格式有:
* http://pan.baidu.com/wap/link?uk=202032639&shareid=420754&third=0
* http://pan.baidu.com/share/link?uk=202032639&shareid=420754
* http://pan.baidu.com/s/1i3iQY48
### Response:
def get_share_uk_and_shareid(cookie, url):
'''从共享链接中提示uk和shareid.
如果共享文件需要输入密码, 就会将need_pwd设为True
如果成功, 返回(need_pwd, uk, shareid)
如果失败, 就返回None
目前支持的链接格式有:
* http://pan.baidu.com/wap/link?uk=202032639&shareid=420754&third=0
* http://pan.baidu.com/share/link?uk=202032639&shareid=420754
* http://pan.baidu.com/s/1i3iQY48
'''
def parse_share_uk(content):
'''代码片段如下:
yunData.SHARE_ID = "677200861";
yunData.SHARE_UK = "1295729848";
'''
uk_reg = re.compile('yunData.SHARE_UK\s*=\s*"(\d+)"')
shareid_reg = re.compile('yunData.SHARE_ID\s*=\s*"(\d+)"')
uk_match = uk_reg.search(content)
shareid_match = shareid_reg.search(content)
if uk_match and shareid_match:
return False, uk_match.group(1), shareid_match.group(1)
else:
return None
def parse_uk_from_url(url):
uk_reg = re.compile('uk=(\d+)')
uk_match = uk_reg.search(url)
shareid_reg = re.compile('shareid=(\d+)')
shareid_match = shareid_reg.search(url)
if not uk_match or not shareid_match:
return '', ''
uk = uk_match.group(1)
shareid = shareid_match.group(1)
return uk, shareid
# 识别加密链接
req = net.urlopen_without_redirect(url, headers={
'Cookie': cookie.header_output(),
})
if req and req.headers.get('Location'):
init_url = req.headers.get('Location')
if init_url.find('share/init') > -1:
uk, shareid = parse_uk_from_url(init_url)
return True, uk, shareid
# 处理短链接
if url.startswith('http://pan.baidu.com/s/'):
req = net.urlopen(url, headers={
'Cookie': cookie.header_output(),
})
if req:
return parse_share_uk(req.data.decode())
# 处理正常链接
uk, shareid = parse_uk_from_url(url)
return False, uk, shareid |
def send_theme_file(self, filename):
"""
Function used to send static theme files from the theme folder to the browser.
"""
cache_timeout = self.get_send_file_max_age(filename)
return send_from_directory(self.config['THEME_STATIC_FOLDER'], filename,
cache_timeout=cache_timeout) | Function used to send static theme files from the theme folder to the browser. | Below is the the instruction that describes the task:
### Input:
Function used to send static theme files from the theme folder to the browser.
### Response:
def send_theme_file(self, filename):
"""
Function used to send static theme files from the theme folder to the browser.
"""
cache_timeout = self.get_send_file_max_age(filename)
return send_from_directory(self.config['THEME_STATIC_FOLDER'], filename,
cache_timeout=cache_timeout) |
def load_fasta_file_as_dict_of_seqs(filename):
"""Load a FASTA file and return the sequences as a dict of {ID: sequence string}
Args:
filename (str): Path to the FASTA file to load
Returns:
dict: Dictionary of IDs to their sequence strings
"""
results = {}
records = load_fasta_file(filename)
for r in records:
results[r.id] = str(r.seq)
return results | Load a FASTA file and return the sequences as a dict of {ID: sequence string}
Args:
filename (str): Path to the FASTA file to load
Returns:
dict: Dictionary of IDs to their sequence strings | Below is the the instruction that describes the task:
### Input:
Load a FASTA file and return the sequences as a dict of {ID: sequence string}
Args:
filename (str): Path to the FASTA file to load
Returns:
dict: Dictionary of IDs to their sequence strings
### Response:
def load_fasta_file_as_dict_of_seqs(filename):
"""Load a FASTA file and return the sequences as a dict of {ID: sequence string}
Args:
filename (str): Path to the FASTA file to load
Returns:
dict: Dictionary of IDs to their sequence strings
"""
results = {}
records = load_fasta_file(filename)
for r in records:
results[r.id] = str(r.seq)
return results |
def pelican_init(pelicanobj):
global global_siteurl
global_siteurl = pelicanobj.settings['SITEURL']
""" Prepare configurations for the MD plugin """
try:
import markdown
from .plantuml_md import PlantUMLMarkdownExtension
except:
# Markdown not available
logger.debug("[plantuml] Markdown support not available")
return
# Register the Markdown plugin
config = { 'siteurl': pelicanobj.settings['SITEURL'] }
try:
if 'MD_EXTENSIONS' in pelicanobj.settings.keys(): # pre pelican 3.7.0
pelicanobj.settings['MD_EXTENSIONS'].append(PlantUMLMarkdownExtension(config))
elif 'MARKDOWN' in pelicanobj.settings.keys() and \
not ('extension_configs' in pelicanobj.settings['MARKDOWN']['extension_configs']): # from pelican 3.7.0
pelicanobj.settings['MARKDOWN']['extension_configs']['plantuml.plantuml_md'] = {}
except:
logger.error("[plantuml] Unable to configure plantuml markdown extension") | Prepare configurations for the MD plugin | Below is the the instruction that describes the task:
### Input:
Prepare configurations for the MD plugin
### Response:
def pelican_init(pelicanobj):
global global_siteurl
global_siteurl = pelicanobj.settings['SITEURL']
""" Prepare configurations for the MD plugin """
try:
import markdown
from .plantuml_md import PlantUMLMarkdownExtension
except:
# Markdown not available
logger.debug("[plantuml] Markdown support not available")
return
# Register the Markdown plugin
config = { 'siteurl': pelicanobj.settings['SITEURL'] }
try:
if 'MD_EXTENSIONS' in pelicanobj.settings.keys(): # pre pelican 3.7.0
pelicanobj.settings['MD_EXTENSIONS'].append(PlantUMLMarkdownExtension(config))
elif 'MARKDOWN' in pelicanobj.settings.keys() and \
not ('extension_configs' in pelicanobj.settings['MARKDOWN']['extension_configs']): # from pelican 3.7.0
pelicanobj.settings['MARKDOWN']['extension_configs']['plantuml.plantuml_md'] = {}
except:
logger.error("[plantuml] Unable to configure plantuml markdown extension") |
def create_for_data(cls, data, format, width, height, stride=None):
"""Same as ``ImageSurface(format, width, height, data, stride)``.
Exists for compatibility with pycairo.
"""
return cls(format, width, height, data, stride) | Same as ``ImageSurface(format, width, height, data, stride)``.
Exists for compatibility with pycairo. | Below is the the instruction that describes the task:
### Input:
Same as ``ImageSurface(format, width, height, data, stride)``.
Exists for compatibility with pycairo.
### Response:
def create_for_data(cls, data, format, width, height, stride=None):
"""Same as ``ImageSurface(format, width, height, data, stride)``.
Exists for compatibility with pycairo.
"""
return cls(format, width, height, data, stride) |
def usage(self, auth, resource, metric, starttime, endtime, defer=False):
""" Returns metric usage for client and its subhierarchy.
Args:
auth: <cik> for authentication
resource: ResourceID
metrics: Metric to measure (as string), it may be an entity or consumable.
starttime: Start time of window to measure useage (format is ___).
endtime: End time of window to measure useage (format is ___).
"""
return self._call('usage', auth,
[resource, metric, starttime, endtime], defer) | Returns metric usage for client and its subhierarchy.
Args:
auth: <cik> for authentication
resource: ResourceID
metrics: Metric to measure (as string), it may be an entity or consumable.
starttime: Start time of window to measure useage (format is ___).
endtime: End time of window to measure useage (format is ___). | Below is the the instruction that describes the task:
### Input:
Returns metric usage for client and its subhierarchy.
Args:
auth: <cik> for authentication
resource: ResourceID
metrics: Metric to measure (as string), it may be an entity or consumable.
starttime: Start time of window to measure useage (format is ___).
endtime: End time of window to measure useage (format is ___).
### Response:
def usage(self, auth, resource, metric, starttime, endtime, defer=False):
""" Returns metric usage for client and its subhierarchy.
Args:
auth: <cik> for authentication
resource: ResourceID
metrics: Metric to measure (as string), it may be an entity or consumable.
starttime: Start time of window to measure useage (format is ___).
endtime: End time of window to measure useage (format is ___).
"""
return self._call('usage', auth,
[resource, metric, starttime, endtime], defer) |
def to_dict(ramons, flatten=False):
"""
Converts a RAMON object list to a JSON-style dictionary. Useful for going
from an array of RAMONs to a dictionary, indexed by ID.
Arguments:
ramons (RAMON[]): A list of RAMON objects
flatten (boolean: False): Not implemented
Returns:
dict: A python dictionary of RAMON objects.
"""
if type(ramons) is not list:
ramons = [ramons]
out_ramons = {}
for r in ramons:
out_ramons[r.id] = {
"id": r.id,
"type": _reverse_ramon_types[type(r)],
"metadata": vars(r)
}
return out_ramons | Converts a RAMON object list to a JSON-style dictionary. Useful for going
from an array of RAMONs to a dictionary, indexed by ID.
Arguments:
ramons (RAMON[]): A list of RAMON objects
flatten (boolean: False): Not implemented
Returns:
dict: A python dictionary of RAMON objects. | Below is the the instruction that describes the task:
### Input:
Converts a RAMON object list to a JSON-style dictionary. Useful for going
from an array of RAMONs to a dictionary, indexed by ID.
Arguments:
ramons (RAMON[]): A list of RAMON objects
flatten (boolean: False): Not implemented
Returns:
dict: A python dictionary of RAMON objects.
### Response:
def to_dict(ramons, flatten=False):
"""
Converts a RAMON object list to a JSON-style dictionary. Useful for going
from an array of RAMONs to a dictionary, indexed by ID.
Arguments:
ramons (RAMON[]): A list of RAMON objects
flatten (boolean: False): Not implemented
Returns:
dict: A python dictionary of RAMON objects.
"""
if type(ramons) is not list:
ramons = [ramons]
out_ramons = {}
for r in ramons:
out_ramons[r.id] = {
"id": r.id,
"type": _reverse_ramon_types[type(r)],
"metadata": vars(r)
}
return out_ramons |
def alter(self, id_option_vip, tipo_opcao, nome_opcao_txt):
"""Change Option VIP from by the identifier.
:param id_option_vip: Identifier of the Option VIP. Integer value and greater than zero.
:param tipo_opcao: Type. String with a maximum of 50 characters and respect [a-zA-Z\_-]
:param nome_opcao_txt: Name Option. String with a maximum of 50 characters and respect [a-zA-Z\_-]
:return: None
:raise InvalidParameterError: Option VIP identifier is null and invalid.
:raise InvalidParameterError: The value of tipo_opcao or nome_opcao_txt is invalid.
:raise OptionVipNotFoundError: Option VIP not registered.
:raise DataBaseError: Networkapi failed to access the database.
:raise XMLError: Networkapi failed to generate the XML response.
"""
if not is_valid_int_param(id_option_vip):
raise InvalidParameterError(
u'The identifier of Option VIP is invalid or was not informed.')
optionvip_map = dict()
optionvip_map['tipo_opcao'] = tipo_opcao
optionvip_map['nome_opcao_txt'] = nome_opcao_txt
url = 'optionvip/' + str(id_option_vip) + '/'
code, xml = self.submit({'option_vip': optionvip_map}, 'PUT', url)
return self.response(code, xml) | Change Option VIP from by the identifier.
:param id_option_vip: Identifier of the Option VIP. Integer value and greater than zero.
:param tipo_opcao: Type. String with a maximum of 50 characters and respect [a-zA-Z\_-]
:param nome_opcao_txt: Name Option. String with a maximum of 50 characters and respect [a-zA-Z\_-]
:return: None
:raise InvalidParameterError: Option VIP identifier is null and invalid.
:raise InvalidParameterError: The value of tipo_opcao or nome_opcao_txt is invalid.
:raise OptionVipNotFoundError: Option VIP not registered.
:raise DataBaseError: Networkapi failed to access the database.
:raise XMLError: Networkapi failed to generate the XML response. | Below is the the instruction that describes the task:
### Input:
Change Option VIP from by the identifier.
:param id_option_vip: Identifier of the Option VIP. Integer value and greater than zero.
:param tipo_opcao: Type. String with a maximum of 50 characters and respect [a-zA-Z\_-]
:param nome_opcao_txt: Name Option. String with a maximum of 50 characters and respect [a-zA-Z\_-]
:return: None
:raise InvalidParameterError: Option VIP identifier is null and invalid.
:raise InvalidParameterError: The value of tipo_opcao or nome_opcao_txt is invalid.
:raise OptionVipNotFoundError: Option VIP not registered.
:raise DataBaseError: Networkapi failed to access the database.
:raise XMLError: Networkapi failed to generate the XML response.
### Response:
def alter(self, id_option_vip, tipo_opcao, nome_opcao_txt):
"""Change Option VIP from by the identifier.
:param id_option_vip: Identifier of the Option VIP. Integer value and greater than zero.
:param tipo_opcao: Type. String with a maximum of 50 characters and respect [a-zA-Z\_-]
:param nome_opcao_txt: Name Option. String with a maximum of 50 characters and respect [a-zA-Z\_-]
:return: None
:raise InvalidParameterError: Option VIP identifier is null and invalid.
:raise InvalidParameterError: The value of tipo_opcao or nome_opcao_txt is invalid.
:raise OptionVipNotFoundError: Option VIP not registered.
:raise DataBaseError: Networkapi failed to access the database.
:raise XMLError: Networkapi failed to generate the XML response.
"""
if not is_valid_int_param(id_option_vip):
raise InvalidParameterError(
u'The identifier of Option VIP is invalid or was not informed.')
optionvip_map = dict()
optionvip_map['tipo_opcao'] = tipo_opcao
optionvip_map['nome_opcao_txt'] = nome_opcao_txt
url = 'optionvip/' + str(id_option_vip) + '/'
code, xml = self.submit({'option_vip': optionvip_map}, 'PUT', url)
return self.response(code, xml) |
def add(self, name, description):
"""Inserts a new Filter and returns its identifier.
:param name: Name. String with a maximum of 100 characters and respect [a-zA-Z\_-]
:param description: Description. String with a maximum of 200 characters and respect [a-zA-Z\_-]
:return: Following dictionary:
::
{'filter': {'id': < id >}}
:raise InvalidParameterError: The value of name or description is invalid.
:raise FilterDuplicateError: A filter named by name already exists.
:raise DataBaseError: Networkapi failed to access the database.
:raise XMLError: Networkapi failed to generate the XML response.
"""
filter_map = dict()
filter_map['name'] = name
filter_map['description'] = description
code, xml = self.submit({'filter': filter_map}, 'POST', 'filter/')
return self.response(code, xml) | Inserts a new Filter and returns its identifier.
:param name: Name. String with a maximum of 100 characters and respect [a-zA-Z\_-]
:param description: Description. String with a maximum of 200 characters and respect [a-zA-Z\_-]
:return: Following dictionary:
::
{'filter': {'id': < id >}}
:raise InvalidParameterError: The value of name or description is invalid.
:raise FilterDuplicateError: A filter named by name already exists.
:raise DataBaseError: Networkapi failed to access the database.
:raise XMLError: Networkapi failed to generate the XML response. | Below is the the instruction that describes the task:
### Input:
Inserts a new Filter and returns its identifier.
:param name: Name. String with a maximum of 100 characters and respect [a-zA-Z\_-]
:param description: Description. String with a maximum of 200 characters and respect [a-zA-Z\_-]
:return: Following dictionary:
::
{'filter': {'id': < id >}}
:raise InvalidParameterError: The value of name or description is invalid.
:raise FilterDuplicateError: A filter named by name already exists.
:raise DataBaseError: Networkapi failed to access the database.
:raise XMLError: Networkapi failed to generate the XML response.
### Response:
def add(self, name, description):
"""Inserts a new Filter and returns its identifier.
:param name: Name. String with a maximum of 100 characters and respect [a-zA-Z\_-]
:param description: Description. String with a maximum of 200 characters and respect [a-zA-Z\_-]
:return: Following dictionary:
::
{'filter': {'id': < id >}}
:raise InvalidParameterError: The value of name or description is invalid.
:raise FilterDuplicateError: A filter named by name already exists.
:raise DataBaseError: Networkapi failed to access the database.
:raise XMLError: Networkapi failed to generate the XML response.
"""
filter_map = dict()
filter_map['name'] = name
filter_map['description'] = description
code, xml = self.submit({'filter': filter_map}, 'POST', 'filter/')
return self.response(code, xml) |
def save(self, annot=None, output_path=None):
"""Saves the collage to disk as an image.
Parameters
-----------
annot : str
text to annotate the figure with a super title
output_path : str
path to save the figure to.
Note: any spaces in the filename will be replace with ``_``
"""
if annot is not None:
self.fig.suptitle(annot, backgroundcolor='black', color='g')
if output_path is not None:
output_path = output_path.replace(' ', '_')
# TODO improve bbox calculations to include ONLY the axes from collage
# and nothing else
self.fig.savefig(output_path + '.png', bbox_inches='tight', dpi=200,
bbox_extra_artists=self.flat_grid) | Saves the collage to disk as an image.
Parameters
-----------
annot : str
text to annotate the figure with a super title
output_path : str
path to save the figure to.
Note: any spaces in the filename will be replace with ``_`` | Below is the the instruction that describes the task:
### Input:
Saves the collage to disk as an image.
Parameters
-----------
annot : str
text to annotate the figure with a super title
output_path : str
path to save the figure to.
Note: any spaces in the filename will be replace with ``_``
### Response:
def save(self, annot=None, output_path=None):
"""Saves the collage to disk as an image.
Parameters
-----------
annot : str
text to annotate the figure with a super title
output_path : str
path to save the figure to.
Note: any spaces in the filename will be replace with ``_``
"""
if annot is not None:
self.fig.suptitle(annot, backgroundcolor='black', color='g')
if output_path is not None:
output_path = output_path.replace(' ', '_')
# TODO improve bbox calculations to include ONLY the axes from collage
# and nothing else
self.fig.savefig(output_path + '.png', bbox_inches='tight', dpi=200,
bbox_extra_artists=self.flat_grid) |
def _term(self, term):
"""Add a term to the query.
Arguments:
term (str): The term to add.
Returns:
SearchHelper: Self
"""
# All terms must be strings for Elasticsearch
term = str(term)
if term:
self.__query["q"] += term
return self | Add a term to the query.
Arguments:
term (str): The term to add.
Returns:
SearchHelper: Self | Below is the the instruction that describes the task:
### Input:
Add a term to the query.
Arguments:
term (str): The term to add.
Returns:
SearchHelper: Self
### Response:
def _term(self, term):
"""Add a term to the query.
Arguments:
term (str): The term to add.
Returns:
SearchHelper: Self
"""
# All terms must be strings for Elasticsearch
term = str(term)
if term:
self.__query["q"] += term
return self |
def search_simple_form(context, **kwargs):
"""
Форма поиска
Пример использования::
{% search_simple_form %}
:param context: контекст
:param kwargs: html атрибуты формы
:return:
"""
data = kwargs
if 'action' in data:
del data['action']
if 'method' in data:
del data['method']
return {'query': context['request'].GET.get('q', ''), 'data': data} | Форма поиска
Пример использования::
{% search_simple_form %}
:param context: контекст
:param kwargs: html атрибуты формы
:return: | Below is the the instruction that describes the task:
### Input:
Форма поиска
Пример использования::
{% search_simple_form %}
:param context: контекст
:param kwargs: html атрибуты формы
:return:
### Response:
def search_simple_form(context, **kwargs):
"""
Форма поиска
Пример использования::
{% search_simple_form %}
:param context: контекст
:param kwargs: html атрибуты формы
:return:
"""
data = kwargs
if 'action' in data:
del data['action']
if 'method' in data:
del data['method']
return {'query': context['request'].GET.get('q', ''), 'data': data} |
def check_can_migrate(cls, resource):
"""Check if virtual machine can be migrated to another datacenter."""
vm_id = cls.usable_id(resource)
result = cls.call('hosting.vm.can_migrate', vm_id)
if not result['can_migrate']:
if result['matched']:
matched = result['matched'][0]
cls.echo('Your VM %s cannot be migrated yet. Migration will '
'be available when datacenter %s is opened.'
% (resource, matched))
else:
cls.echo('Your VM %s cannot be migrated.' % resource)
return False
return True | Check if virtual machine can be migrated to another datacenter. | Below is the the instruction that describes the task:
### Input:
Check if virtual machine can be migrated to another datacenter.
### Response:
def check_can_migrate(cls, resource):
"""Check if virtual machine can be migrated to another datacenter."""
vm_id = cls.usable_id(resource)
result = cls.call('hosting.vm.can_migrate', vm_id)
if not result['can_migrate']:
if result['matched']:
matched = result['matched'][0]
cls.echo('Your VM %s cannot be migrated yet. Migration will '
'be available when datacenter %s is opened.'
% (resource, matched))
else:
cls.echo('Your VM %s cannot be migrated.' % resource)
return False
return True |
def create(self, path, value=b"", acl=None, ephemeral=False,
sequence=False):
""" wrapper that handles encoding (yay Py3k) """
super(XTransactionRequest, self).create(path, to_bytes(value), acl, ephemeral, sequence) | wrapper that handles encoding (yay Py3k) | Below is the the instruction that describes the task:
### Input:
wrapper that handles encoding (yay Py3k)
### Response:
def create(self, path, value=b"", acl=None, ephemeral=False,
sequence=False):
""" wrapper that handles encoding (yay Py3k) """
super(XTransactionRequest, self).create(path, to_bytes(value), acl, ephemeral, sequence) |
def _update_internal_column_state(self, column_names):
""" Update the internal state with some (possibly) new columns
:param column_names: an iterable which contains new column names
"""
for k in column_names:
if k not in self._column_name_idx:
self._column_name_idx[k] = len(self._column_name_list)
self._column_name_list.append(k) | Update the internal state with some (possibly) new columns
:param column_names: an iterable which contains new column names | Below is the the instruction that describes the task:
### Input:
Update the internal state with some (possibly) new columns
:param column_names: an iterable which contains new column names
### Response:
def _update_internal_column_state(self, column_names):
""" Update the internal state with some (possibly) new columns
:param column_names: an iterable which contains new column names
"""
for k in column_names:
if k not in self._column_name_idx:
self._column_name_idx[k] = len(self._column_name_list)
self._column_name_list.append(k) |
def request(cls, name, *args, **kwargs):
"""Helper method for creating request messages.
Parameters
----------
name : str
The name of the message.
args : list of strings
The message arguments.
Keyword arguments
-----------------
mid : str or None
Message ID to use or None (default) for no Message ID
"""
mid = kwargs.pop('mid', None)
if len(kwargs) > 0:
raise TypeError('Invalid keyword argument(s): %r' % kwargs)
return cls(cls.REQUEST, name, args, mid) | Helper method for creating request messages.
Parameters
----------
name : str
The name of the message.
args : list of strings
The message arguments.
Keyword arguments
-----------------
mid : str or None
Message ID to use or None (default) for no Message ID | Below is the the instruction that describes the task:
### Input:
Helper method for creating request messages.
Parameters
----------
name : str
The name of the message.
args : list of strings
The message arguments.
Keyword arguments
-----------------
mid : str or None
Message ID to use or None (default) for no Message ID
### Response:
def request(cls, name, *args, **kwargs):
"""Helper method for creating request messages.
Parameters
----------
name : str
The name of the message.
args : list of strings
The message arguments.
Keyword arguments
-----------------
mid : str or None
Message ID to use or None (default) for no Message ID
"""
mid = kwargs.pop('mid', None)
if len(kwargs) > 0:
raise TypeError('Invalid keyword argument(s): %r' % kwargs)
return cls(cls.REQUEST, name, args, mid) |
def samples(self, nsamples, rstate=None):
"""
Draw `nsamples` samples randomly distributed within the unit cube.
Returns
-------
x : `~numpy.ndarray` with shape (nsamples, ndim)
A collection of coordinates within the unit cube.
"""
if rstate is None:
rstate = np.random
xs = np.array([self.sample(rstate=rstate) for i in range(nsamples)])
return xs | Draw `nsamples` samples randomly distributed within the unit cube.
Returns
-------
x : `~numpy.ndarray` with shape (nsamples, ndim)
A collection of coordinates within the unit cube. | Below is the the instruction that describes the task:
### Input:
Draw `nsamples` samples randomly distributed within the unit cube.
Returns
-------
x : `~numpy.ndarray` with shape (nsamples, ndim)
A collection of coordinates within the unit cube.
### Response:
def samples(self, nsamples, rstate=None):
"""
Draw `nsamples` samples randomly distributed within the unit cube.
Returns
-------
x : `~numpy.ndarray` with shape (nsamples, ndim)
A collection of coordinates within the unit cube.
"""
if rstate is None:
rstate = np.random
xs = np.array([self.sample(rstate=rstate) for i in range(nsamples)])
return xs |
def psetex(self, key, milliseconds, value):
"""Set the value and expiration in milliseconds of a key.
:raises TypeError: if milliseconds is not int
"""
if not isinstance(milliseconds, int):
raise TypeError("milliseconds argument must be int")
fut = self.execute(b'PSETEX', key, milliseconds, value)
return wait_ok(fut) | Set the value and expiration in milliseconds of a key.
:raises TypeError: if milliseconds is not int | Below is the the instruction that describes the task:
### Input:
Set the value and expiration in milliseconds of a key.
:raises TypeError: if milliseconds is not int
### Response:
def psetex(self, key, milliseconds, value):
"""Set the value and expiration in milliseconds of a key.
:raises TypeError: if milliseconds is not int
"""
if not isinstance(milliseconds, int):
raise TypeError("milliseconds argument must be int")
fut = self.execute(b'PSETEX', key, milliseconds, value)
return wait_ok(fut) |
def position_and_velocity(self, name, tdb, tdb2=0.0):
"""[DEPRECATED] Compute the position and velocity of `name` at ``tdb [+ tdb2]``.
The position and velocity are returned in a 2-tuple::
([x y z], [xdot ydot zdot])
The barycentric dynamical time `tdb` argument should be a float.
If there are many dates you want computed, then make `tdb` an
array, which is more efficient than calling this method multiple
times; the return values will be two-dimensional arrays giving a
row of values for each coordinate.
For extra precision, the time can be split into two floats; a
popular choice is to use `tdb` for the integer or half-integer
date, and `tdb2` to hold the remaining fraction.
Consult the `names` attribute of this ephemeris for the values
of `name` it supports, such as ``'mars'`` or ``'earthmoon'``.
"""
bundle = self.compute_bundle(name, tdb, tdb2)
position = self.position_from_bundle(bundle)
velocity = self.velocity_from_bundle(bundle)
return position, velocity | [DEPRECATED] Compute the position and velocity of `name` at ``tdb [+ tdb2]``.
The position and velocity are returned in a 2-tuple::
([x y z], [xdot ydot zdot])
The barycentric dynamical time `tdb` argument should be a float.
If there are many dates you want computed, then make `tdb` an
array, which is more efficient than calling this method multiple
times; the return values will be two-dimensional arrays giving a
row of values for each coordinate.
For extra precision, the time can be split into two floats; a
popular choice is to use `tdb` for the integer or half-integer
date, and `tdb2` to hold the remaining fraction.
Consult the `names` attribute of this ephemeris for the values
of `name` it supports, such as ``'mars'`` or ``'earthmoon'``. | Below is the the instruction that describes the task:
### Input:
[DEPRECATED] Compute the position and velocity of `name` at ``tdb [+ tdb2]``.
The position and velocity are returned in a 2-tuple::
([x y z], [xdot ydot zdot])
The barycentric dynamical time `tdb` argument should be a float.
If there are many dates you want computed, then make `tdb` an
array, which is more efficient than calling this method multiple
times; the return values will be two-dimensional arrays giving a
row of values for each coordinate.
For extra precision, the time can be split into two floats; a
popular choice is to use `tdb` for the integer or half-integer
date, and `tdb2` to hold the remaining fraction.
Consult the `names` attribute of this ephemeris for the values
of `name` it supports, such as ``'mars'`` or ``'earthmoon'``.
### Response:
def position_and_velocity(self, name, tdb, tdb2=0.0):
"""[DEPRECATED] Compute the position and velocity of `name` at ``tdb [+ tdb2]``.
The position and velocity are returned in a 2-tuple::
([x y z], [xdot ydot zdot])
The barycentric dynamical time `tdb` argument should be a float.
If there are many dates you want computed, then make `tdb` an
array, which is more efficient than calling this method multiple
times; the return values will be two-dimensional arrays giving a
row of values for each coordinate.
For extra precision, the time can be split into two floats; a
popular choice is to use `tdb` for the integer or half-integer
date, and `tdb2` to hold the remaining fraction.
Consult the `names` attribute of this ephemeris for the values
of `name` it supports, such as ``'mars'`` or ``'earthmoon'``.
"""
bundle = self.compute_bundle(name, tdb, tdb2)
position = self.position_from_bundle(bundle)
velocity = self.velocity_from_bundle(bundle)
return position, velocity |
def _longest_contig(self, contig_set, contig_lengths):
'''Returns the name of the longest contig, from the set of names contig_set. contig_lengths
is expected to be a dictionary of contig name => length.'''
longest_name = None
max_length = -1
for name in contig_set:
if contig_lengths[name] > max_length:
longest_name = name
max_length = contig_lengths[name]
assert max_length != -1
assert longest_name is not None
return longest_name | Returns the name of the longest contig, from the set of names contig_set. contig_lengths
is expected to be a dictionary of contig name => length. | Below is the the instruction that describes the task:
### Input:
Returns the name of the longest contig, from the set of names contig_set. contig_lengths
is expected to be a dictionary of contig name => length.
### Response:
def _longest_contig(self, contig_set, contig_lengths):
'''Returns the name of the longest contig, from the set of names contig_set. contig_lengths
is expected to be a dictionary of contig name => length.'''
longest_name = None
max_length = -1
for name in contig_set:
if contig_lengths[name] > max_length:
longest_name = name
max_length = contig_lengths[name]
assert max_length != -1
assert longest_name is not None
return longest_name |
def split_predicate(ex: Extraction) -> Extraction:
"""
Ensure single word predicate
by adding "before-predicate" and "after-predicate"
arguments.
"""
rel_toks = ex.toks[char_to_word_index(ex.rel.span[0], ex.sent) \
: char_to_word_index(ex.rel.span[1], ex.sent) + 1]
if not rel_toks:
return ex
verb_inds = [tok_ind for (tok_ind, tok)
in enumerate(rel_toks)
if tok.tag_.startswith('VB')]
last_verb_ind = verb_inds[-1] if verb_inds \
else (len(rel_toks) - 1)
rel_parts = [element_from_span([rel_toks[last_verb_ind]],
'V')]
before_verb = rel_toks[ : last_verb_ind]
after_verb = rel_toks[last_verb_ind + 1 : ]
if before_verb:
rel_parts.append(element_from_span(before_verb, "BV"))
if after_verb:
rel_parts.append(element_from_span(after_verb, "AV"))
return Extraction(ex.sent, ex.toks, ex.arg1, rel_parts, ex.args2, ex.confidence) | Ensure single word predicate
by adding "before-predicate" and "after-predicate"
arguments. | Below is the the instruction that describes the task:
### Input:
Ensure single word predicate
by adding "before-predicate" and "after-predicate"
arguments.
### Response:
def split_predicate(ex: Extraction) -> Extraction:
"""
Ensure single word predicate
by adding "before-predicate" and "after-predicate"
arguments.
"""
rel_toks = ex.toks[char_to_word_index(ex.rel.span[0], ex.sent) \
: char_to_word_index(ex.rel.span[1], ex.sent) + 1]
if not rel_toks:
return ex
verb_inds = [tok_ind for (tok_ind, tok)
in enumerate(rel_toks)
if tok.tag_.startswith('VB')]
last_verb_ind = verb_inds[-1] if verb_inds \
else (len(rel_toks) - 1)
rel_parts = [element_from_span([rel_toks[last_verb_ind]],
'V')]
before_verb = rel_toks[ : last_verb_ind]
after_verb = rel_toks[last_verb_ind + 1 : ]
if before_verb:
rel_parts.append(element_from_span(before_verb, "BV"))
if after_verb:
rel_parts.append(element_from_span(after_verb, "AV"))
return Extraction(ex.sent, ex.toks, ex.arg1, rel_parts, ex.args2, ex.confidence) |
def get_invocation_command_nodefault(
toolset, tool, user_provided_command=[], additional_paths=[], path_last=False):
"""
A helper rule to get the command to invoke some tool. If
'user-provided-command' is not given, tries to find binary named 'tool' in
PATH and in the passed 'additional-path'. Otherwise, verifies that the first
element of 'user-provided-command' is an existing program.
This rule returns the command to be used when invoking the tool. If we can't
find the tool, a warning is issued. If 'path-last' is specified, PATH is
checked after 'additional-paths' when searching for 'tool'.
"""
assert isinstance(toolset, basestring)
assert isinstance(tool, basestring)
assert is_iterable_typed(user_provided_command, basestring)
assert is_iterable_typed(additional_paths, basestring) or additional_paths is None
assert isinstance(path_last, (int, bool))
if not user_provided_command:
command = find_tool(tool, additional_paths, path_last)
if not command and __debug_configuration:
print "warning: toolset", toolset, "initialization: can't find tool, tool"
#FIXME
#print "warning: initialized from" [ errors.nearest-user-location ] ;
else:
command = check_tool(user_provided_command)
if not command and __debug_configuration:
print "warning: toolset", toolset, "initialization:"
print "warning: can't find user-provided command", user_provided_command
#FIXME
#ECHO "warning: initialized from" [ errors.nearest-user-location ]
command = []
if command:
command = ' '.join(command)
return command | A helper rule to get the command to invoke some tool. If
'user-provided-command' is not given, tries to find binary named 'tool' in
PATH and in the passed 'additional-path'. Otherwise, verifies that the first
element of 'user-provided-command' is an existing program.
This rule returns the command to be used when invoking the tool. If we can't
find the tool, a warning is issued. If 'path-last' is specified, PATH is
checked after 'additional-paths' when searching for 'tool'. | Below is the the instruction that describes the task:
### Input:
A helper rule to get the command to invoke some tool. If
'user-provided-command' is not given, tries to find binary named 'tool' in
PATH and in the passed 'additional-path'. Otherwise, verifies that the first
element of 'user-provided-command' is an existing program.
This rule returns the command to be used when invoking the tool. If we can't
find the tool, a warning is issued. If 'path-last' is specified, PATH is
checked after 'additional-paths' when searching for 'tool'.
### Response:
def get_invocation_command_nodefault(
toolset, tool, user_provided_command=[], additional_paths=[], path_last=False):
"""
A helper rule to get the command to invoke some tool. If
'user-provided-command' is not given, tries to find binary named 'tool' in
PATH and in the passed 'additional-path'. Otherwise, verifies that the first
element of 'user-provided-command' is an existing program.
This rule returns the command to be used when invoking the tool. If we can't
find the tool, a warning is issued. If 'path-last' is specified, PATH is
checked after 'additional-paths' when searching for 'tool'.
"""
assert isinstance(toolset, basestring)
assert isinstance(tool, basestring)
assert is_iterable_typed(user_provided_command, basestring)
assert is_iterable_typed(additional_paths, basestring) or additional_paths is None
assert isinstance(path_last, (int, bool))
if not user_provided_command:
command = find_tool(tool, additional_paths, path_last)
if not command and __debug_configuration:
print "warning: toolset", toolset, "initialization: can't find tool, tool"
#FIXME
#print "warning: initialized from" [ errors.nearest-user-location ] ;
else:
command = check_tool(user_provided_command)
if not command and __debug_configuration:
print "warning: toolset", toolset, "initialization:"
print "warning: can't find user-provided command", user_provided_command
#FIXME
#ECHO "warning: initialized from" [ errors.nearest-user-location ]
command = []
if command:
command = ' '.join(command)
return command |
def _write(self, data):
"""
Note: print()-statements cause to multiple write calls.
(write('line') and write('\n')). Of course we don't want to call
`run_in_terminal` for every individual call, because that's too
expensive, and as long as the newline hasn't been written, the
text itself is again overwritter by the rendering of the input
command line. Therefor, we have a little buffer which holds the
text until a newline is written to stdout.
"""
if '\n' in data:
# When there is a newline in the data, write everything before the
# newline, including the newline itself.
before, after = data.rsplit('\n', 1)
to_write = self._buffer + [before, '\n']
self._buffer = [after]
def run():
for s in to_write:
if self._raw:
self._cli.output.write_raw(s)
else:
self._cli.output.write(s)
self._do(run)
else:
# Otherwise, cache in buffer.
self._buffer.append(data) | Note: print()-statements cause to multiple write calls.
(write('line') and write('\n')). Of course we don't want to call
`run_in_terminal` for every individual call, because that's too
expensive, and as long as the newline hasn't been written, the
text itself is again overwritter by the rendering of the input
command line. Therefor, we have a little buffer which holds the
text until a newline is written to stdout. | Below is the the instruction that describes the task:
### Input:
Note: print()-statements cause to multiple write calls.
(write('line') and write('\n')). Of course we don't want to call
`run_in_terminal` for every individual call, because that's too
expensive, and as long as the newline hasn't been written, the
text itself is again overwritter by the rendering of the input
command line. Therefor, we have a little buffer which holds the
text until a newline is written to stdout.
### Response:
def _write(self, data):
"""
Note: print()-statements cause to multiple write calls.
(write('line') and write('\n')). Of course we don't want to call
`run_in_terminal` for every individual call, because that's too
expensive, and as long as the newline hasn't been written, the
text itself is again overwritter by the rendering of the input
command line. Therefor, we have a little buffer which holds the
text until a newline is written to stdout.
"""
if '\n' in data:
# When there is a newline in the data, write everything before the
# newline, including the newline itself.
before, after = data.rsplit('\n', 1)
to_write = self._buffer + [before, '\n']
self._buffer = [after]
def run():
for s in to_write:
if self._raw:
self._cli.output.write_raw(s)
else:
self._cli.output.write(s)
self._do(run)
else:
# Otherwise, cache in buffer.
self._buffer.append(data) |
def focus_next(self):
"""move focus to next position (DFO)"""
w, focuspos = self.get_focus()
next = self._tree.next_position(focuspos)
if next is not None:
self.set_focus(next) | move focus to next position (DFO) | Below is the the instruction that describes the task:
### Input:
move focus to next position (DFO)
### Response:
def focus_next(self):
"""move focus to next position (DFO)"""
w, focuspos = self.get_focus()
next = self._tree.next_position(focuspos)
if next is not None:
self.set_focus(next) |
def _dist_to_trans(self, dist):
"""Convert mouse x, y movement into x, y, z translations"""
rae = np.array([self.roll, self.azimuth, self.elevation]) * np.pi / 180
sro, saz, sel = np.sin(rae)
cro, caz, cel = np.cos(rae)
dx = (+ dist[0] * (cro * caz + sro * sel * saz)
+ dist[1] * (sro * caz - cro * sel * saz))
dy = (+ dist[0] * (cro * saz - sro * sel * caz)
+ dist[1] * (sro * saz + cro * sel * caz))
dz = (- dist[0] * sro * cel + dist[1] * cro * cel)
return dx, dy, dz | Convert mouse x, y movement into x, y, z translations | Below is the the instruction that describes the task:
### Input:
Convert mouse x, y movement into x, y, z translations
### Response:
def _dist_to_trans(self, dist):
"""Convert mouse x, y movement into x, y, z translations"""
rae = np.array([self.roll, self.azimuth, self.elevation]) * np.pi / 180
sro, saz, sel = np.sin(rae)
cro, caz, cel = np.cos(rae)
dx = (+ dist[0] * (cro * caz + sro * sel * saz)
+ dist[1] * (sro * caz - cro * sel * saz))
dy = (+ dist[0] * (cro * saz - sro * sel * caz)
+ dist[1] * (sro * saz + cro * sel * caz))
dz = (- dist[0] * sro * cel + dist[1] * cro * cel)
return dx, dy, dz |
def get_queryset(self):
"""
Return queryset limited to not removed entries.
"""
kwargs = {'model': self.model, 'using': self._db}
if hasattr(self, '_hints'):
kwargs['hints'] = self._hints
return self._queryset_class(**kwargs).filter(is_removed=False) | Return queryset limited to not removed entries. | Below is the the instruction that describes the task:
### Input:
Return queryset limited to not removed entries.
### Response:
def get_queryset(self):
"""
Return queryset limited to not removed entries.
"""
kwargs = {'model': self.model, 'using': self._db}
if hasattr(self, '_hints'):
kwargs['hints'] = self._hints
return self._queryset_class(**kwargs).filter(is_removed=False) |
def _diff(state_data, resource_object):
'''helper method to compare salt state info with the PagerDuty API json structure,
and determine if we need to update.
returns the dict to pass to the PD API to perform the update, or empty dict if no update.
'''
objects_differ = None
for k, v in state_data['service'].items():
if k == 'escalation_policy_id':
resource_value = resource_object['escalation_policy']['id']
elif k == 'service_key':
# service_key on create must 'foo' but the GET will return '[email protected]'
resource_value = resource_object['service_key']
if '@' in resource_value:
resource_value = resource_value[0:resource_value.find('@')]
else:
resource_value = resource_object[k]
if v != resource_value:
objects_differ = '{0} {1} {2}'.format(k, v, resource_value)
break
if objects_differ:
return state_data
else:
return {} | helper method to compare salt state info with the PagerDuty API json structure,
and determine if we need to update.
returns the dict to pass to the PD API to perform the update, or empty dict if no update. | Below is the the instruction that describes the task:
### Input:
helper method to compare salt state info with the PagerDuty API json structure,
and determine if we need to update.
returns the dict to pass to the PD API to perform the update, or empty dict if no update.
### Response:
def _diff(state_data, resource_object):
'''helper method to compare salt state info with the PagerDuty API json structure,
and determine if we need to update.
returns the dict to pass to the PD API to perform the update, or empty dict if no update.
'''
objects_differ = None
for k, v in state_data['service'].items():
if k == 'escalation_policy_id':
resource_value = resource_object['escalation_policy']['id']
elif k == 'service_key':
# service_key on create must 'foo' but the GET will return '[email protected]'
resource_value = resource_object['service_key']
if '@' in resource_value:
resource_value = resource_value[0:resource_value.find('@')]
else:
resource_value = resource_object[k]
if v != resource_value:
objects_differ = '{0} {1} {2}'.format(k, v, resource_value)
break
if objects_differ:
return state_data
else:
return {} |
def load_colormap(self, name=None):
"""
Loads a colormap of the supplied name. None means used the internal
name. (See self.get_name())
"""
if name == None: name = self.get_name()
if name == "" or not type(name)==str: return "Error: Bad name."
# assemble the path to the colormap
path = _os.path.join(_settings.path_home, "colormaps", name+".cmap")
# make sure the file exists
if not _os.path.exists(path):
print("load_colormap(): Colormap '"+name+"' does not exist. Creating.")
self.save_colormap(name)
return
# open the file and get the lines
f = open(path, 'r')
x = f.read()
f.close()
try:
self._colorpoint_list = eval(x)
except:
print("Invalid colormap. Overwriting.")
self.save_colormap()
# update the image
self.update_image()
return self | Loads a colormap of the supplied name. None means used the internal
name. (See self.get_name()) | Below is the the instruction that describes the task:
### Input:
Loads a colormap of the supplied name. None means used the internal
name. (See self.get_name())
### Response:
def load_colormap(self, name=None):
"""
Loads a colormap of the supplied name. None means used the internal
name. (See self.get_name())
"""
if name == None: name = self.get_name()
if name == "" or not type(name)==str: return "Error: Bad name."
# assemble the path to the colormap
path = _os.path.join(_settings.path_home, "colormaps", name+".cmap")
# make sure the file exists
if not _os.path.exists(path):
print("load_colormap(): Colormap '"+name+"' does not exist. Creating.")
self.save_colormap(name)
return
# open the file and get the lines
f = open(path, 'r')
x = f.read()
f.close()
try:
self._colorpoint_list = eval(x)
except:
print("Invalid colormap. Overwriting.")
self.save_colormap()
# update the image
self.update_image()
return self |
def get_id(self):
"""Returns unique id of an alignment. """
return hash(str(self.title) + str(self.best_score()) + str(self.hit_def)) | Returns unique id of an alignment. | Below is the the instruction that describes the task:
### Input:
Returns unique id of an alignment.
### Response:
def get_id(self):
"""Returns unique id of an alignment. """
return hash(str(self.title) + str(self.best_score()) + str(self.hit_def)) |
def _interpolate_with_fill(self, method='pad', axis=0, inplace=False,
limit=None, fill_value=None, coerce=False,
downcast=None):
""" fillna but using the interpolate machinery """
inplace = validate_bool_kwarg(inplace, 'inplace')
# if we are coercing, then don't force the conversion
# if the block can't hold the type
if coerce:
if not self._can_hold_na:
if inplace:
return [self]
else:
return [self.copy()]
values = self.values if inplace else self.values.copy()
values, fill_value = self._try_coerce_args(values, fill_value)
values = missing.interpolate_2d(values, method=method, axis=axis,
limit=limit, fill_value=fill_value,
dtype=self.dtype)
values = self._try_coerce_result(values)
blocks = [self.make_block_same_class(values, ndim=self.ndim)]
return self._maybe_downcast(blocks, downcast) | fillna but using the interpolate machinery | Below is the the instruction that describes the task:
### Input:
fillna but using the interpolate machinery
### Response:
def _interpolate_with_fill(self, method='pad', axis=0, inplace=False,
limit=None, fill_value=None, coerce=False,
downcast=None):
""" fillna but using the interpolate machinery """
inplace = validate_bool_kwarg(inplace, 'inplace')
# if we are coercing, then don't force the conversion
# if the block can't hold the type
if coerce:
if not self._can_hold_na:
if inplace:
return [self]
else:
return [self.copy()]
values = self.values if inplace else self.values.copy()
values, fill_value = self._try_coerce_args(values, fill_value)
values = missing.interpolate_2d(values, method=method, axis=axis,
limit=limit, fill_value=fill_value,
dtype=self.dtype)
values = self._try_coerce_result(values)
blocks = [self.make_block_same_class(values, ndim=self.ndim)]
return self._maybe_downcast(blocks, downcast) |
def AppMoVCopeland(profile, alpha=0.5):
"""
Returns an integer that is equal to the margin of victory of the election profile, that is,
the smallest number k such that changing k votes can change the winners.
:ivar Profile profile: A Profile object that represents an election profile.
"""
# Currently, we expect the profile to contain complete ordering over candidates.
elecType = profile.getElecType()
if elecType != "soc" and elecType != "toc":
print("ERROR: unsupported profile type")
exit()
# Initialization
n = profile.numVoters
m = profile.numCands
# Compute the original winner d
# Initialize each Copeland score as 0.0.
copelandscores = {}
for cand in profile.candMap.keys():
copelandscores[cand] = 0.0
# For each pair of candidates, calculate the number of votes in which one beat the other.
wmgMap = profile.getWmg()
for cand1, cand2 in itertools.combinations(wmgMap.keys(), 2):
if cand2 in wmgMap[cand1].keys():
if wmgMap[cand1][cand2] > 0:
copelandscores[cand1] += 1.0
elif wmgMap[cand1][cand2] < 0:
copelandscores[cand2] += 1.0
# If a pair of candidates is tied, we add alpha to their score for each vote.
else:
copelandscores[cand1] += alpha
copelandscores[cand2] += alpha
d = max(copelandscores.items(), key=lambda x: x[1])[0]
#Compute c* = argmin_c RM(d,c)
relative_margin = {}
alter_without_d = delete(range(1, m + 1), d - 1)
for c in alter_without_d:
relative_margin[c] = RM(wmgMap, n, m, d, c, alpha)
c_star = min(relative_margin.items(), key=lambda x: x[1])[0]
return relative_margin[c_star]*(math.ceil(log(m)) + 1) | Returns an integer that is equal to the margin of victory of the election profile, that is,
the smallest number k such that changing k votes can change the winners.
:ivar Profile profile: A Profile object that represents an election profile. | Below is the the instruction that describes the task:
### Input:
Returns an integer that is equal to the margin of victory of the election profile, that is,
the smallest number k such that changing k votes can change the winners.
:ivar Profile profile: A Profile object that represents an election profile.
### Response:
def AppMoVCopeland(profile, alpha=0.5):
"""
Returns an integer that is equal to the margin of victory of the election profile, that is,
the smallest number k such that changing k votes can change the winners.
:ivar Profile profile: A Profile object that represents an election profile.
"""
# Currently, we expect the profile to contain complete ordering over candidates.
elecType = profile.getElecType()
if elecType != "soc" and elecType != "toc":
print("ERROR: unsupported profile type")
exit()
# Initialization
n = profile.numVoters
m = profile.numCands
# Compute the original winner d
# Initialize each Copeland score as 0.0.
copelandscores = {}
for cand in profile.candMap.keys():
copelandscores[cand] = 0.0
# For each pair of candidates, calculate the number of votes in which one beat the other.
wmgMap = profile.getWmg()
for cand1, cand2 in itertools.combinations(wmgMap.keys(), 2):
if cand2 in wmgMap[cand1].keys():
if wmgMap[cand1][cand2] > 0:
copelandscores[cand1] += 1.0
elif wmgMap[cand1][cand2] < 0:
copelandscores[cand2] += 1.0
# If a pair of candidates is tied, we add alpha to their score for each vote.
else:
copelandscores[cand1] += alpha
copelandscores[cand2] += alpha
d = max(copelandscores.items(), key=lambda x: x[1])[0]
#Compute c* = argmin_c RM(d,c)
relative_margin = {}
alter_without_d = delete(range(1, m + 1), d - 1)
for c in alter_without_d:
relative_margin[c] = RM(wmgMap, n, m, d, c, alpha)
c_star = min(relative_margin.items(), key=lambda x: x[1])[0]
return relative_margin[c_star]*(math.ceil(log(m)) + 1) |
def _init_unique_sets(self):
"""Initialise sets used for uniqueness checking."""
ks = dict()
for t in self._unique_checks:
key = t[0]
ks[key] = set() # empty set
return ks | Initialise sets used for uniqueness checking. | Below is the the instruction that describes the task:
### Input:
Initialise sets used for uniqueness checking.
### Response:
def _init_unique_sets(self):
"""Initialise sets used for uniqueness checking."""
ks = dict()
for t in self._unique_checks:
key = t[0]
ks[key] = set() # empty set
return ks |
def match_alphabet(self, pattern):
"""Initialise the alphabet for the Bitap algorithm.
Args:
pattern: The text to encode.
Returns:
Hash of character locations.
"""
s = {}
for char in pattern:
s[char] = 0
for i in xrange(len(pattern)):
s[pattern[i]] |= 1 << (len(pattern) - i - 1)
return s | Initialise the alphabet for the Bitap algorithm.
Args:
pattern: The text to encode.
Returns:
Hash of character locations. | Below is the the instruction that describes the task:
### Input:
Initialise the alphabet for the Bitap algorithm.
Args:
pattern: The text to encode.
Returns:
Hash of character locations.
### Response:
def match_alphabet(self, pattern):
"""Initialise the alphabet for the Bitap algorithm.
Args:
pattern: The text to encode.
Returns:
Hash of character locations.
"""
s = {}
for char in pattern:
s[char] = 0
for i in xrange(len(pattern)):
s[pattern[i]] |= 1 << (len(pattern) - i - 1)
return s |
def is_dec(ip):
"""Return true if the IP address is in decimal notation."""
try:
dec = int(str(ip))
except ValueError:
return False
if dec > 4294967295 or dec < 0:
return False
return True | Return true if the IP address is in decimal notation. | Below is the the instruction that describes the task:
### Input:
Return true if the IP address is in decimal notation.
### Response:
def is_dec(ip):
"""Return true if the IP address is in decimal notation."""
try:
dec = int(str(ip))
except ValueError:
return False
if dec > 4294967295 or dec < 0:
return False
return True |
def _generate_queues(queues, exchange, platform_queue):
""" Queues known by this worker """
return set([
Queue('celery', exchange, routing_key='celery'),
Queue(platform_queue, exchange, routing_key='#'),
] + [
Queue(q_name, exchange, routing_key=q_name)
for q_name in queues
]) | Queues known by this worker | Below is the the instruction that describes the task:
### Input:
Queues known by this worker
### Response:
def _generate_queues(queues, exchange, platform_queue):
""" Queues known by this worker """
return set([
Queue('celery', exchange, routing_key='celery'),
Queue(platform_queue, exchange, routing_key='#'),
] + [
Queue(q_name, exchange, routing_key=q_name)
for q_name in queues
]) |
def readFILTERLIST(self):
""" Read a length-prefixed list of FILTERs """
number = self.readUI8()
return [self.readFILTER() for _ in range(number)] | Read a length-prefixed list of FILTERs | Below is the the instruction that describes the task:
### Input:
Read a length-prefixed list of FILTERs
### Response:
def readFILTERLIST(self):
""" Read a length-prefixed list of FILTERs """
number = self.readUI8()
return [self.readFILTER() for _ in range(number)] |
def get_permission_checks(self, request, view):
"""
Get permission checks that will be executed for current action.
"""
if view.action is None:
return []
# if permissions are defined for view directly - use them.
if hasattr(view, view.action + '_permissions'):
return getattr(view, view.action + '_permissions')
# otherwise return view-level permissions + extra view permissions
extra_permissions = getattr(view, view.action + 'extra_permissions', [])
if request.method in SAFE_METHODS:
return getattr(view, 'safe_methods_permissions', []) + extra_permissions
else:
return getattr(view, 'unsafe_methods_permissions', []) + extra_permissions | Get permission checks that will be executed for current action. | Below is the the instruction that describes the task:
### Input:
Get permission checks that will be executed for current action.
### Response:
def get_permission_checks(self, request, view):
"""
Get permission checks that will be executed for current action.
"""
if view.action is None:
return []
# if permissions are defined for view directly - use them.
if hasattr(view, view.action + '_permissions'):
return getattr(view, view.action + '_permissions')
# otherwise return view-level permissions + extra view permissions
extra_permissions = getattr(view, view.action + 'extra_permissions', [])
if request.method in SAFE_METHODS:
return getattr(view, 'safe_methods_permissions', []) + extra_permissions
else:
return getattr(view, 'unsafe_methods_permissions', []) + extra_permissions |
def analyze_entities(self, document, encoding_type=None, retry=None, timeout=None, metadata=None):
"""
Finds named entities in the text along with entity types,
salience, mentions for each entity, and other properties.
:param document: Input document.
If a dict is provided, it must be of the same form as the protobuf message Document
:type document: dict or class google.cloud.language_v1.types.Document
:param encoding_type: The encoding type used by the API to calculate offsets.
:type encoding_type: google.cloud.language_v1.types.EncodingType
:param retry: A retry object used to retry requests. If None is specified, requests will not be
retried.
:type retry: google.api_core.retry.Retry
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
retry is specified, the timeout applies to each individual attempt.
:type timeout: float
:param metadata: Additional metadata that is provided to the method.
:type metadata: sequence[tuple[str, str]]]
:rtype: google.cloud.language_v1.types.AnalyzeEntitiesResponse
"""
client = self.get_conn()
return client.analyze_entities(
document=document, encoding_type=encoding_type, retry=retry, timeout=timeout, metadata=metadata
) | Finds named entities in the text along with entity types,
salience, mentions for each entity, and other properties.
:param document: Input document.
If a dict is provided, it must be of the same form as the protobuf message Document
:type document: dict or class google.cloud.language_v1.types.Document
:param encoding_type: The encoding type used by the API to calculate offsets.
:type encoding_type: google.cloud.language_v1.types.EncodingType
:param retry: A retry object used to retry requests. If None is specified, requests will not be
retried.
:type retry: google.api_core.retry.Retry
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
retry is specified, the timeout applies to each individual attempt.
:type timeout: float
:param metadata: Additional metadata that is provided to the method.
:type metadata: sequence[tuple[str, str]]]
:rtype: google.cloud.language_v1.types.AnalyzeEntitiesResponse | Below is the the instruction that describes the task:
### Input:
Finds named entities in the text along with entity types,
salience, mentions for each entity, and other properties.
:param document: Input document.
If a dict is provided, it must be of the same form as the protobuf message Document
:type document: dict or class google.cloud.language_v1.types.Document
:param encoding_type: The encoding type used by the API to calculate offsets.
:type encoding_type: google.cloud.language_v1.types.EncodingType
:param retry: A retry object used to retry requests. If None is specified, requests will not be
retried.
:type retry: google.api_core.retry.Retry
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
retry is specified, the timeout applies to each individual attempt.
:type timeout: float
:param metadata: Additional metadata that is provided to the method.
:type metadata: sequence[tuple[str, str]]]
:rtype: google.cloud.language_v1.types.AnalyzeEntitiesResponse
### Response:
def analyze_entities(self, document, encoding_type=None, retry=None, timeout=None, metadata=None):
"""
Finds named entities in the text along with entity types,
salience, mentions for each entity, and other properties.
:param document: Input document.
If a dict is provided, it must be of the same form as the protobuf message Document
:type document: dict or class google.cloud.language_v1.types.Document
:param encoding_type: The encoding type used by the API to calculate offsets.
:type encoding_type: google.cloud.language_v1.types.EncodingType
:param retry: A retry object used to retry requests. If None is specified, requests will not be
retried.
:type retry: google.api_core.retry.Retry
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
retry is specified, the timeout applies to each individual attempt.
:type timeout: float
:param metadata: Additional metadata that is provided to the method.
:type metadata: sequence[tuple[str, str]]]
:rtype: google.cloud.language_v1.types.AnalyzeEntitiesResponse
"""
client = self.get_conn()
return client.analyze_entities(
document=document, encoding_type=encoding_type, retry=retry, timeout=timeout, metadata=metadata
) |
def _get_statevector(self):
"""Return the current statevector in JSON Result spec format"""
vec = np.reshape(self._statevector, 2 ** self._number_of_qubits)
# Expand complex numbers
vec = np.stack([vec.real, vec.imag], axis=1)
# Truncate small values
vec[abs(vec) < self._chop_threshold] = 0.0
return vec | Return the current statevector in JSON Result spec format | Below is the the instruction that describes the task:
### Input:
Return the current statevector in JSON Result spec format
### Response:
def _get_statevector(self):
"""Return the current statevector in JSON Result spec format"""
vec = np.reshape(self._statevector, 2 ** self._number_of_qubits)
# Expand complex numbers
vec = np.stack([vec.real, vec.imag], axis=1)
# Truncate small values
vec[abs(vec) < self._chop_threshold] = 0.0
return vec |
def get_chat_administrators(self, chat_id):
"""
Use this method to get a list of administrators in a chat. On success, returns an Array of ChatMember objects that contains information about all chat administrators except other bots. If the chat is a group or a supergroup and no administrators were appointed, only the creator will be returned.
https://core.telegram.org/bots/api#getchatadministrators
Parameters:
:param chat_id: Unique identifier for the target chat or username of the target supergroup or channel (in the format @channelusername)
:type chat_id: int | str|unicode
Returns:
:return: On success, returns an Array of ChatMember objects that contains information about all chat administrators except other bots
:rtype: list of pytgbot.api_types.receivable.peer.ChatMember
"""
assert_type_or_raise(chat_id, (int, unicode_type), parameter_name="chat_id")
result = self.do("getChatAdministrators", chat_id=chat_id)
if self.return_python_objects:
logger.debug("Trying to parse {data}".format(data=repr(result)))
from pytgbot.api_types.receivable.peer import ChatMember
try:
return ChatMember.from_array_list(result, list_level=1)
except TgApiParseException:
logger.debug("Failed parsing as api_type ChatMember", exc_info=True)
# end try
# no valid parsing so far
raise TgApiParseException("Could not parse result.") # See debug log for details!
# end if return_python_objects
return result | Use this method to get a list of administrators in a chat. On success, returns an Array of ChatMember objects that contains information about all chat administrators except other bots. If the chat is a group or a supergroup and no administrators were appointed, only the creator will be returned.
https://core.telegram.org/bots/api#getchatadministrators
Parameters:
:param chat_id: Unique identifier for the target chat or username of the target supergroup or channel (in the format @channelusername)
:type chat_id: int | str|unicode
Returns:
:return: On success, returns an Array of ChatMember objects that contains information about all chat administrators except other bots
:rtype: list of pytgbot.api_types.receivable.peer.ChatMember | Below is the the instruction that describes the task:
### Input:
Use this method to get a list of administrators in a chat. On success, returns an Array of ChatMember objects that contains information about all chat administrators except other bots. If the chat is a group or a supergroup and no administrators were appointed, only the creator will be returned.
https://core.telegram.org/bots/api#getchatadministrators
Parameters:
:param chat_id: Unique identifier for the target chat or username of the target supergroup or channel (in the format @channelusername)
:type chat_id: int | str|unicode
Returns:
:return: On success, returns an Array of ChatMember objects that contains information about all chat administrators except other bots
:rtype: list of pytgbot.api_types.receivable.peer.ChatMember
### Response:
def get_chat_administrators(self, chat_id):
"""
Use this method to get a list of administrators in a chat. On success, returns an Array of ChatMember objects that contains information about all chat administrators except other bots. If the chat is a group or a supergroup and no administrators were appointed, only the creator will be returned.
https://core.telegram.org/bots/api#getchatadministrators
Parameters:
:param chat_id: Unique identifier for the target chat or username of the target supergroup or channel (in the format @channelusername)
:type chat_id: int | str|unicode
Returns:
:return: On success, returns an Array of ChatMember objects that contains information about all chat administrators except other bots
:rtype: list of pytgbot.api_types.receivable.peer.ChatMember
"""
assert_type_or_raise(chat_id, (int, unicode_type), parameter_name="chat_id")
result = self.do("getChatAdministrators", chat_id=chat_id)
if self.return_python_objects:
logger.debug("Trying to parse {data}".format(data=repr(result)))
from pytgbot.api_types.receivable.peer import ChatMember
try:
return ChatMember.from_array_list(result, list_level=1)
except TgApiParseException:
logger.debug("Failed parsing as api_type ChatMember", exc_info=True)
# end try
# no valid parsing so far
raise TgApiParseException("Could not parse result.") # See debug log for details!
# end if return_python_objects
return result |
def delete_image_tar(file_obj, tar):
'''delete image tar will close a file object (if extracted into
memory) or delete from the file system (if saved to disk)'''
try:
file_obj.close()
except:
tar.close()
if os.path.exists(file_obj):
os.remove(file_obj)
deleted = True
bot.debug('Deleted temporary tar.')
return deleted | delete image tar will close a file object (if extracted into
memory) or delete from the file system (if saved to disk) | Below is the the instruction that describes the task:
### Input:
delete image tar will close a file object (if extracted into
memory) or delete from the file system (if saved to disk)
### Response:
def delete_image_tar(file_obj, tar):
'''delete image tar will close a file object (if extracted into
memory) or delete from the file system (if saved to disk)'''
try:
file_obj.close()
except:
tar.close()
if os.path.exists(file_obj):
os.remove(file_obj)
deleted = True
bot.debug('Deleted temporary tar.')
return deleted |
def enable_job(name, **kwargs):
'''
Enable a job in the minion's schedule
CLI Example:
.. code-block:: bash
salt '*' schedule.enable_job job1
'''
ret = {'comment': [],
'result': True}
if not name:
ret['comment'] = 'Job name is required.'
ret['result'] = False
if 'test' in __opts__ and __opts__['test']:
ret['comment'] = 'Job: {0} would be enabled in schedule.'.format(name)
else:
persist = True
if 'persist' in kwargs:
persist = kwargs['persist']
if name in list_(show_all=True, where='opts', return_yaml=False):
event_data = {'name': name, 'func': 'enable_job', 'persist': persist}
elif name in list_(show_all=True, where='pillar', return_yaml=False):
event_data = {'name': name, 'where': 'pillar', 'func': 'enable_job', 'persist': False}
else:
ret['comment'] = 'Job {0} does not exist.'.format(name)
ret['result'] = False
return ret
try:
eventer = salt.utils.event.get_event('minion', opts=__opts__)
res = __salt__['event.fire'](event_data, 'manage_schedule')
if res:
event_ret = eventer.get_event(tag='/salt/minion/minion_schedule_enabled_job_complete', wait=30)
if event_ret and event_ret['complete']:
schedule = event_ret['schedule']
# check item exists in schedule and is enabled
if name in schedule and schedule[name]['enabled']:
ret['result'] = True
ret['comment'] = 'Enabled Job {0} in schedule.'.format(name)
else:
ret['result'] = False
ret['comment'] = 'Failed to enable job {0} in schedule.'.format(name)
return ret
except KeyError:
# Effectively a no-op, since we can't really return without an event system
ret['comment'] = 'Event module not available. Schedule enable job failed.'
return ret | Enable a job in the minion's schedule
CLI Example:
.. code-block:: bash
salt '*' schedule.enable_job job1 | Below is the the instruction that describes the task:
### Input:
Enable a job in the minion's schedule
CLI Example:
.. code-block:: bash
salt '*' schedule.enable_job job1
### Response:
def enable_job(name, **kwargs):
'''
Enable a job in the minion's schedule
CLI Example:
.. code-block:: bash
salt '*' schedule.enable_job job1
'''
ret = {'comment': [],
'result': True}
if not name:
ret['comment'] = 'Job name is required.'
ret['result'] = False
if 'test' in __opts__ and __opts__['test']:
ret['comment'] = 'Job: {0} would be enabled in schedule.'.format(name)
else:
persist = True
if 'persist' in kwargs:
persist = kwargs['persist']
if name in list_(show_all=True, where='opts', return_yaml=False):
event_data = {'name': name, 'func': 'enable_job', 'persist': persist}
elif name in list_(show_all=True, where='pillar', return_yaml=False):
event_data = {'name': name, 'where': 'pillar', 'func': 'enable_job', 'persist': False}
else:
ret['comment'] = 'Job {0} does not exist.'.format(name)
ret['result'] = False
return ret
try:
eventer = salt.utils.event.get_event('minion', opts=__opts__)
res = __salt__['event.fire'](event_data, 'manage_schedule')
if res:
event_ret = eventer.get_event(tag='/salt/minion/minion_schedule_enabled_job_complete', wait=30)
if event_ret and event_ret['complete']:
schedule = event_ret['schedule']
# check item exists in schedule and is enabled
if name in schedule and schedule[name]['enabled']:
ret['result'] = True
ret['comment'] = 'Enabled Job {0} in schedule.'.format(name)
else:
ret['result'] = False
ret['comment'] = 'Failed to enable job {0} in schedule.'.format(name)
return ret
except KeyError:
# Effectively a no-op, since we can't really return without an event system
ret['comment'] = 'Event module not available. Schedule enable job failed.'
return ret |
def get_base_url(html: str) -> str:
"""
Search for login url from VK login page
"""
forms = BeautifulSoup(html, 'html.parser').find_all('form')
if not forms:
raise VVKBaseUrlException('Form for login not found')
elif len(forms) > 1:
raise VVKBaseUrlException('More than one login form found')
login_url = forms[0].get('action')
if not login_url:
raise VVKBaseUrlException('No action tag in form')
return login_url | Search for login url from VK login page | Below is the the instruction that describes the task:
### Input:
Search for login url from VK login page
### Response:
def get_base_url(html: str) -> str:
"""
Search for login url from VK login page
"""
forms = BeautifulSoup(html, 'html.parser').find_all('form')
if not forms:
raise VVKBaseUrlException('Form for login not found')
elif len(forms) > 1:
raise VVKBaseUrlException('More than one login form found')
login_url = forms[0].get('action')
if not login_url:
raise VVKBaseUrlException('No action tag in form')
return login_url |
def retain_error(self, error, frame=None):
"""
Adds details of an error to the report.
:param error: The error exception to add to the report.
"""
if frame is None:
stack = traceback.format_exc()
self.labels.add("@iopipe/error")
else:
stack = "\n".join(traceback.format_stack(frame))
self.labels.add("@iopipe/timeout")
details = {
"name": type(error).__name__,
"message": "{}".format(error),
"stack": stack,
}
self.report["errors"] = details | Adds details of an error to the report.
:param error: The error exception to add to the report. | Below is the the instruction that describes the task:
### Input:
Adds details of an error to the report.
:param error: The error exception to add to the report.
### Response:
def retain_error(self, error, frame=None):
"""
Adds details of an error to the report.
:param error: The error exception to add to the report.
"""
if frame is None:
stack = traceback.format_exc()
self.labels.add("@iopipe/error")
else:
stack = "\n".join(traceback.format_stack(frame))
self.labels.add("@iopipe/timeout")
details = {
"name": type(error).__name__,
"message": "{}".format(error),
"stack": stack,
}
self.report["errors"] = details |
def find_shows_by_ids(self, show_ids):
"""doc: http://open.youku.com/docs/doc?id=60
"""
url = 'https://openapi.youku.com/v2/shows/show_batch.json'
params = {
'client_id': self.client_id,
'show_ids': show_ids
}
r = requests.get(url, params=params)
check_error(r)
return r.json() | doc: http://open.youku.com/docs/doc?id=60 | Below is the the instruction that describes the task:
### Input:
doc: http://open.youku.com/docs/doc?id=60
### Response:
def find_shows_by_ids(self, show_ids):
"""doc: http://open.youku.com/docs/doc?id=60
"""
url = 'https://openapi.youku.com/v2/shows/show_batch.json'
params = {
'client_id': self.client_id,
'show_ids': show_ids
}
r = requests.get(url, params=params)
check_error(r)
return r.json() |
def make_utool_json_encoder(allow_pickle=False):
"""
References:
http://stackoverflow.com/questions/8230315/python-sets-are
http://stackoverflow.com/questions/11561932/why-does-json
https://github.com/jsonpickle/jsonpickle
http://stackoverflow.com/questions/24369666/typeerror-b1
http://stackoverflow.com/questions/30469575/how-to-pickle
"""
import utool as ut
PYOBJECT_TAG = '__PYTHON_OBJECT__'
UUID_TAG = '__UUID__'
SLICE_TAG = '__SLICE__'
def decode_pickle(text):
obj = pickle.loads(codecs.decode(text.encode(), 'base64'))
return obj
def encode_pickle(obj):
try:
# Use protocol 2 to support both python2.7 and python3
COMPATIBLE_PROTOCOL = 2
pickle_bytes = pickle.dumps(obj, protocol=COMPATIBLE_PROTOCOL)
except Exception:
raise
text = codecs.encode(pickle_bytes, 'base64').decode()
return text
type_to_tag = collections.OrderedDict([
(slice, SLICE_TAG),
(uuid.UUID, UUID_TAG),
(object, PYOBJECT_TAG),
])
tag_to_type = {tag: type_ for type_, tag in type_to_tag.items()}
def slice_part(c):
return '' if c is None else str(c)
def encode_slice(s):
parts = [slice_part(s.start), slice_part(s.stop), slice_part(s.step)]
return ':'.join(parts)
def decode_slice(x):
return ut.smart_cast(x, slice)
encoders = {
UUID_TAG: str,
SLICE_TAG: encode_slice,
PYOBJECT_TAG: encode_pickle,
}
decoders = {
UUID_TAG: uuid.UUID,
SLICE_TAG: decode_slice,
PYOBJECT_TAG: decode_pickle,
}
if not allow_pickle:
del encoders[PYOBJECT_TAG]
del decoders[PYOBJECT_TAG]
type_ = tag_to_type[PYOBJECT_TAG]
del tag_to_type[PYOBJECT_TAG]
del type_to_tag[type_]
class UtoolJSONEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, util_type.NUMPY_TYPE_TUPLE):
return obj.tolist()
elif six.PY3 and isinstance(obj, bytes):
return obj.decode('utf-8')
elif isinstance(obj, (set, frozenset)):
return list(obj)
# return json.JSONEncoder.default(self, list(obj))
# return [json.JSONEncoder.default(o) for o in obj]
elif isinstance(obj, util_type.PRIMATIVE_TYPES):
return json.JSONEncoder.default(self, obj)
elif hasattr(obj, '__getstate__'):
return obj.__getstate__()
else:
for type_, tag in type_to_tag.items():
if isinstance(obj, type_):
#print('----')
#print('encoder obj = %r' % (obj,))
#print('encoder type_ = %r' % (type_,))
func = encoders[tag]
text = func(obj)
return {tag: text}
raise TypeError('Invalid serialization type=%r' % (type(obj)))
@classmethod
def _json_object_hook(cls, value, verbose=False, **kwargs):
if len(value) == 1:
tag, text = list(value.items())[0]
if tag in decoders:
#print('----')
#print('decoder tag = %r' % (tag,))
func = decoders[tag]
obj = func(text)
#print('decoder obj = %r' % (obj,))
return obj
else:
return value
return value
return UtoolJSONEncoder | References:
http://stackoverflow.com/questions/8230315/python-sets-are
http://stackoverflow.com/questions/11561932/why-does-json
https://github.com/jsonpickle/jsonpickle
http://stackoverflow.com/questions/24369666/typeerror-b1
http://stackoverflow.com/questions/30469575/how-to-pickle | Below is the the instruction that describes the task:
### Input:
References:
http://stackoverflow.com/questions/8230315/python-sets-are
http://stackoverflow.com/questions/11561932/why-does-json
https://github.com/jsonpickle/jsonpickle
http://stackoverflow.com/questions/24369666/typeerror-b1
http://stackoverflow.com/questions/30469575/how-to-pickle
### Response:
def make_utool_json_encoder(allow_pickle=False):
"""
References:
http://stackoverflow.com/questions/8230315/python-sets-are
http://stackoverflow.com/questions/11561932/why-does-json
https://github.com/jsonpickle/jsonpickle
http://stackoverflow.com/questions/24369666/typeerror-b1
http://stackoverflow.com/questions/30469575/how-to-pickle
"""
import utool as ut
PYOBJECT_TAG = '__PYTHON_OBJECT__'
UUID_TAG = '__UUID__'
SLICE_TAG = '__SLICE__'
def decode_pickle(text):
obj = pickle.loads(codecs.decode(text.encode(), 'base64'))
return obj
def encode_pickle(obj):
try:
# Use protocol 2 to support both python2.7 and python3
COMPATIBLE_PROTOCOL = 2
pickle_bytes = pickle.dumps(obj, protocol=COMPATIBLE_PROTOCOL)
except Exception:
raise
text = codecs.encode(pickle_bytes, 'base64').decode()
return text
type_to_tag = collections.OrderedDict([
(slice, SLICE_TAG),
(uuid.UUID, UUID_TAG),
(object, PYOBJECT_TAG),
])
tag_to_type = {tag: type_ for type_, tag in type_to_tag.items()}
def slice_part(c):
return '' if c is None else str(c)
def encode_slice(s):
parts = [slice_part(s.start), slice_part(s.stop), slice_part(s.step)]
return ':'.join(parts)
def decode_slice(x):
return ut.smart_cast(x, slice)
encoders = {
UUID_TAG: str,
SLICE_TAG: encode_slice,
PYOBJECT_TAG: encode_pickle,
}
decoders = {
UUID_TAG: uuid.UUID,
SLICE_TAG: decode_slice,
PYOBJECT_TAG: decode_pickle,
}
if not allow_pickle:
del encoders[PYOBJECT_TAG]
del decoders[PYOBJECT_TAG]
type_ = tag_to_type[PYOBJECT_TAG]
del tag_to_type[PYOBJECT_TAG]
del type_to_tag[type_]
class UtoolJSONEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, util_type.NUMPY_TYPE_TUPLE):
return obj.tolist()
elif six.PY3 and isinstance(obj, bytes):
return obj.decode('utf-8')
elif isinstance(obj, (set, frozenset)):
return list(obj)
# return json.JSONEncoder.default(self, list(obj))
# return [json.JSONEncoder.default(o) for o in obj]
elif isinstance(obj, util_type.PRIMATIVE_TYPES):
return json.JSONEncoder.default(self, obj)
elif hasattr(obj, '__getstate__'):
return obj.__getstate__()
else:
for type_, tag in type_to_tag.items():
if isinstance(obj, type_):
#print('----')
#print('encoder obj = %r' % (obj,))
#print('encoder type_ = %r' % (type_,))
func = encoders[tag]
text = func(obj)
return {tag: text}
raise TypeError('Invalid serialization type=%r' % (type(obj)))
@classmethod
def _json_object_hook(cls, value, verbose=False, **kwargs):
if len(value) == 1:
tag, text = list(value.items())[0]
if tag in decoders:
#print('----')
#print('decoder tag = %r' % (tag,))
func = decoders[tag]
obj = func(text)
#print('decoder obj = %r' % (obj,))
return obj
else:
return value
return value
return UtoolJSONEncoder |
def delete_dcnm_in_nwk(self, tenant_id, fw_dict, is_fw_virt=False):
"""Delete the DCNM In Network and store the result in DB. """
tenant_name = fw_dict.get('tenant_name')
ret = self._delete_service_nwk(tenant_id, tenant_name, 'in')
if ret:
res = fw_const.DCNM_IN_NETWORK_DEL_SUCCESS
LOG.info("In Service network deleted for tenant %s",
tenant_id)
else:
res = fw_const.DCNM_IN_NETWORK_DEL_FAIL
LOG.info("In Service network deleted failed for tenant %s",
tenant_id)
self.update_fw_db_result(tenant_id, dcnm_status=res)
return ret | Delete the DCNM In Network and store the result in DB. | Below is the the instruction that describes the task:
### Input:
Delete the DCNM In Network and store the result in DB.
### Response:
def delete_dcnm_in_nwk(self, tenant_id, fw_dict, is_fw_virt=False):
"""Delete the DCNM In Network and store the result in DB. """
tenant_name = fw_dict.get('tenant_name')
ret = self._delete_service_nwk(tenant_id, tenant_name, 'in')
if ret:
res = fw_const.DCNM_IN_NETWORK_DEL_SUCCESS
LOG.info("In Service network deleted for tenant %s",
tenant_id)
else:
res = fw_const.DCNM_IN_NETWORK_DEL_FAIL
LOG.info("In Service network deleted failed for tenant %s",
tenant_id)
self.update_fw_db_result(tenant_id, dcnm_status=res)
return ret |
def _compute_follow(self):
"""Computes the FOLLOW set for every non-terminal in the grammar.
Tenatively based on _compute_follow in PLY.
"""
self._follow[self.start_symbol].add(END_OF_INPUT)
while True:
changed = False
for nonterminal, productions in self.nonterminals.items():
for production in productions:
for i, symbol in enumerate(production.rhs):
if symbol not in self.nonterminals:
continue
first = self.first(production.rhs[i + 1:])
new_follow = first - set([EPSILON])
if EPSILON in first or i == (len(production.rhs) - 1):
new_follow |= self._follow[nonterminal]
if new_follow - self._follow[symbol]:
self._follow[symbol] |= new_follow
changed = True
if not changed:
break | Computes the FOLLOW set for every non-terminal in the grammar.
Tenatively based on _compute_follow in PLY. | Below is the the instruction that describes the task:
### Input:
Computes the FOLLOW set for every non-terminal in the grammar.
Tenatively based on _compute_follow in PLY.
### Response:
def _compute_follow(self):
"""Computes the FOLLOW set for every non-terminal in the grammar.
Tenatively based on _compute_follow in PLY.
"""
self._follow[self.start_symbol].add(END_OF_INPUT)
while True:
changed = False
for nonterminal, productions in self.nonterminals.items():
for production in productions:
for i, symbol in enumerate(production.rhs):
if symbol not in self.nonterminals:
continue
first = self.first(production.rhs[i + 1:])
new_follow = first - set([EPSILON])
if EPSILON in first or i == (len(production.rhs) - 1):
new_follow |= self._follow[nonterminal]
if new_follow - self._follow[symbol]:
self._follow[symbol] |= new_follow
changed = True
if not changed:
break |
def exception_occurred(self, text, is_traceback):
"""
Exception ocurred in the internal console.
Show a QDialog or the internal console to warn the user.
"""
# Skip errors without traceback or dismiss
if (not is_traceback and self.error_dlg is None) or self.dismiss_error:
return
if CONF.get('main', 'show_internal_errors'):
if self.error_dlg is None:
self.error_dlg = SpyderErrorDialog(self)
self.error_dlg.close_btn.clicked.connect(self.close_error_dlg)
self.error_dlg.rejected.connect(self.remove_error_dlg)
self.error_dlg.details.go_to_error.connect(self.go_to_error)
self.error_dlg.show()
self.error_dlg.append_traceback(text)
elif DEV or get_debug_level():
self.dockwidget.show()
self.dockwidget.raise_() | Exception ocurred in the internal console.
Show a QDialog or the internal console to warn the user. | Below is the the instruction that describes the task:
### Input:
Exception ocurred in the internal console.
Show a QDialog or the internal console to warn the user.
### Response:
def exception_occurred(self, text, is_traceback):
"""
Exception ocurred in the internal console.
Show a QDialog or the internal console to warn the user.
"""
# Skip errors without traceback or dismiss
if (not is_traceback and self.error_dlg is None) or self.dismiss_error:
return
if CONF.get('main', 'show_internal_errors'):
if self.error_dlg is None:
self.error_dlg = SpyderErrorDialog(self)
self.error_dlg.close_btn.clicked.connect(self.close_error_dlg)
self.error_dlg.rejected.connect(self.remove_error_dlg)
self.error_dlg.details.go_to_error.connect(self.go_to_error)
self.error_dlg.show()
self.error_dlg.append_traceback(text)
elif DEV or get_debug_level():
self.dockwidget.show()
self.dockwidget.raise_() |
def _find_usage_network_interfaces(self):
"""find usage of network interfaces"""
enis = paginate_dict(
self.conn.describe_network_interfaces,
alc_marker_path=['NextToken'],
alc_data_path=['NetworkInterfaces'],
alc_marker_param='NextToken'
)
self.limits['Network interfaces per Region']._add_current_usage(
len(enis['NetworkInterfaces']),
aws_type='AWS::EC2::NetworkInterface'
) | find usage of network interfaces | Below is the the instruction that describes the task:
### Input:
find usage of network interfaces
### Response:
def _find_usage_network_interfaces(self):
"""find usage of network interfaces"""
enis = paginate_dict(
self.conn.describe_network_interfaces,
alc_marker_path=['NextToken'],
alc_data_path=['NetworkInterfaces'],
alc_marker_param='NextToken'
)
self.limits['Network interfaces per Region']._add_current_usage(
len(enis['NetworkInterfaces']),
aws_type='AWS::EC2::NetworkInterface'
) |
def async_session_handler(self, signal: str) -> None:
"""Signalling from websocket.
data - new data available for processing.
state - network state has changed.
"""
if signal == 'data':
self.async_event_handler(self.websocket.data)
elif signal == 'state':
if self.async_connection_status_callback:
self.async_connection_status_callback(
self.websocket.state == 'running') | Signalling from websocket.
data - new data available for processing.
state - network state has changed. | Below is the the instruction that describes the task:
### Input:
Signalling from websocket.
data - new data available for processing.
state - network state has changed.
### Response:
def async_session_handler(self, signal: str) -> None:
"""Signalling from websocket.
data - new data available for processing.
state - network state has changed.
"""
if signal == 'data':
self.async_event_handler(self.websocket.data)
elif signal == 'state':
if self.async_connection_status_callback:
self.async_connection_status_callback(
self.websocket.state == 'running') |
def copy(string):
"""Copy given string into system clipboard.
"""
win32clipboard.OpenClipboard()
win32clipboard.EmptyClipboard()
win32clipboard.SetClipboardText(string)
win32clipboard.CloseClipboard() | Copy given string into system clipboard. | Below is the the instruction that describes the task:
### Input:
Copy given string into system clipboard.
### Response:
def copy(string):
"""Copy given string into system clipboard.
"""
win32clipboard.OpenClipboard()
win32clipboard.EmptyClipboard()
win32clipboard.SetClipboardText(string)
win32clipboard.CloseClipboard() |
def is_all_field_none(self):
"""
:rtype: bool
"""
if self._id_ is not None:
return False
if self._created is not None:
return False
if self._updated is not None:
return False
if self._action is not None:
return False
if self._user_id is not None:
return False
if self._monetary_account_id is not None:
return False
if self._object_ is not None:
return False
if self._status is not None:
return False
return True | :rtype: bool | Below is the the instruction that describes the task:
### Input:
:rtype: bool
### Response:
def is_all_field_none(self):
"""
:rtype: bool
"""
if self._id_ is not None:
return False
if self._created is not None:
return False
if self._updated is not None:
return False
if self._action is not None:
return False
if self._user_id is not None:
return False
if self._monetary_account_id is not None:
return False
if self._object_ is not None:
return False
if self._status is not None:
return False
return True |
def name_of_the_nearest_place(feature, parent):
"""If the impact layer has a distance field, it will return the name
of the nearest place.
e.g. name_of_the_nearest_place() -> Tokyo
"""
_ = feature, parent # NOQA
layer = exposure_summary_layer()
if not layer:
return None
index = layer.fields().lookupField(
exposure_name_field['field_name'])
if index < 0:
return None
feature = next(layer.getFeatures())
return feature[index] | If the impact layer has a distance field, it will return the name
of the nearest place.
e.g. name_of_the_nearest_place() -> Tokyo | Below is the the instruction that describes the task:
### Input:
If the impact layer has a distance field, it will return the name
of the nearest place.
e.g. name_of_the_nearest_place() -> Tokyo
### Response:
def name_of_the_nearest_place(feature, parent):
"""If the impact layer has a distance field, it will return the name
of the nearest place.
e.g. name_of_the_nearest_place() -> Tokyo
"""
_ = feature, parent # NOQA
layer = exposure_summary_layer()
if not layer:
return None
index = layer.fields().lookupField(
exposure_name_field['field_name'])
if index < 0:
return None
feature = next(layer.getFeatures())
return feature[index] |
async def dispatch_request(
self, request_context: Optional[RequestContext]=None,
) -> ResponseReturnValue:
"""Dispatch the request to the view function.
Arguments:
request_context: The request context, optional as Flask
omits this argument.
"""
request_ = (request_context or _request_ctx_stack.top).request
if request_.routing_exception is not None:
raise request_.routing_exception
if request_.method == 'OPTIONS' and request_.url_rule.provide_automatic_options:
return await self.make_default_options_response()
handler = self.view_functions[request_.url_rule.endpoint]
return await handler(**request_.view_args) | Dispatch the request to the view function.
Arguments:
request_context: The request context, optional as Flask
omits this argument. | Below is the the instruction that describes the task:
### Input:
Dispatch the request to the view function.
Arguments:
request_context: The request context, optional as Flask
omits this argument.
### Response:
async def dispatch_request(
self, request_context: Optional[RequestContext]=None,
) -> ResponseReturnValue:
"""Dispatch the request to the view function.
Arguments:
request_context: The request context, optional as Flask
omits this argument.
"""
request_ = (request_context or _request_ctx_stack.top).request
if request_.routing_exception is not None:
raise request_.routing_exception
if request_.method == 'OPTIONS' and request_.url_rule.provide_automatic_options:
return await self.make_default_options_response()
handler = self.view_functions[request_.url_rule.endpoint]
return await handler(**request_.view_args) |
def phenSpecificEffects(snps,pheno1,pheno2,K=None,covs=None,test='lrt'):
"""
Univariate fixed effects interaction test for phenotype specific SNP effects
Args:
snps: [N x S] SP.array of S SNPs for N individuals (test SNPs)
pheno1: [N x 1] SP.array of 1 phenotype for N individuals
pheno2: [N x 1] SP.array of 1 phenotype for N individuals
K: [N x N] SP.array of LMM-covariance/kinship koefficients (optional)
If not provided, then linear regression analysis is performed
covs: [N x D] SP.array of D covariates for N individuals
test: 'lrt' for likelihood ratio test (default) or 'f' for F-test
Returns:
limix LMM object
"""
N=snps.shape[0]
if K is None:
K=SP.eye(N)
assert (pheno1.shape[1]==pheno2.shape[1]), "Only consider equal number of phenotype dimensions"
if covs is None:
covs = SP.ones(N,1)
assert (pheno1.shape[1]==1 and pheno2.shape[1]==1 and pheno1.shape[0]==N and pheno2.shape[0]==N and K.shape[0]==N and K.shape[1]==N and covs.shape[0]==N), "shapes missmatch"
Inter = SP.zeros((N*2,1))
Inter[0:N,0]=1
Inter0 = SP.ones((N*2,1))
Yinter=SP.concatenate((pheno1,pheno2),0)
Xinter = SP.tile(snps,(2,1))
Covitner= SP.tile(covs(2,1))
lm = simple_interaction(snps=Xinter,pheno=Yinter,covs=Covinter,Inter=Inter,Inter0=Inter0,test=test)
return lm | Univariate fixed effects interaction test for phenotype specific SNP effects
Args:
snps: [N x S] SP.array of S SNPs for N individuals (test SNPs)
pheno1: [N x 1] SP.array of 1 phenotype for N individuals
pheno2: [N x 1] SP.array of 1 phenotype for N individuals
K: [N x N] SP.array of LMM-covariance/kinship koefficients (optional)
If not provided, then linear regression analysis is performed
covs: [N x D] SP.array of D covariates for N individuals
test: 'lrt' for likelihood ratio test (default) or 'f' for F-test
Returns:
limix LMM object | Below is the the instruction that describes the task:
### Input:
Univariate fixed effects interaction test for phenotype specific SNP effects
Args:
snps: [N x S] SP.array of S SNPs for N individuals (test SNPs)
pheno1: [N x 1] SP.array of 1 phenotype for N individuals
pheno2: [N x 1] SP.array of 1 phenotype for N individuals
K: [N x N] SP.array of LMM-covariance/kinship koefficients (optional)
If not provided, then linear regression analysis is performed
covs: [N x D] SP.array of D covariates for N individuals
test: 'lrt' for likelihood ratio test (default) or 'f' for F-test
Returns:
limix LMM object
### Response:
def phenSpecificEffects(snps,pheno1,pheno2,K=None,covs=None,test='lrt'):
"""
Univariate fixed effects interaction test for phenotype specific SNP effects
Args:
snps: [N x S] SP.array of S SNPs for N individuals (test SNPs)
pheno1: [N x 1] SP.array of 1 phenotype for N individuals
pheno2: [N x 1] SP.array of 1 phenotype for N individuals
K: [N x N] SP.array of LMM-covariance/kinship koefficients (optional)
If not provided, then linear regression analysis is performed
covs: [N x D] SP.array of D covariates for N individuals
test: 'lrt' for likelihood ratio test (default) or 'f' for F-test
Returns:
limix LMM object
"""
N=snps.shape[0]
if K is None:
K=SP.eye(N)
assert (pheno1.shape[1]==pheno2.shape[1]), "Only consider equal number of phenotype dimensions"
if covs is None:
covs = SP.ones(N,1)
assert (pheno1.shape[1]==1 and pheno2.shape[1]==1 and pheno1.shape[0]==N and pheno2.shape[0]==N and K.shape[0]==N and K.shape[1]==N and covs.shape[0]==N), "shapes missmatch"
Inter = SP.zeros((N*2,1))
Inter[0:N,0]=1
Inter0 = SP.ones((N*2,1))
Yinter=SP.concatenate((pheno1,pheno2),0)
Xinter = SP.tile(snps,(2,1))
Covitner= SP.tile(covs(2,1))
lm = simple_interaction(snps=Xinter,pheno=Yinter,covs=Covinter,Inter=Inter,Inter0=Inter0,test=test)
return lm |
def __find_prime_in_row(self, row):
"""
Find the first prime element in the specified row. Returns
the column index, or -1 if no starred element was found.
"""
col = -1
for j in range(self.n):
if self.marked[row][j] == 2:
col = j
break
return col | Find the first prime element in the specified row. Returns
the column index, or -1 if no starred element was found. | Below is the the instruction that describes the task:
### Input:
Find the first prime element in the specified row. Returns
the column index, or -1 if no starred element was found.
### Response:
def __find_prime_in_row(self, row):
"""
Find the first prime element in the specified row. Returns
the column index, or -1 if no starred element was found.
"""
col = -1
for j in range(self.n):
if self.marked[row][j] == 2:
col = j
break
return col |
def isolate_region(sequences, start, end, gap_char='-'):
"""
Replace regions before and after start:end with gap chars
"""
# Check arguments
if end <= start:
raise ValueError("start of slice must precede end ({0} !> {1})".format(
end, start))
for sequence in sequences:
seq = sequence.seq
start_gap = gap_char * start
end_gap = gap_char * (len(seq) - end)
seq = Seq(start_gap + str(seq[start:end]) + end_gap,
alphabet=seq.alphabet)
sequence.seq = seq
yield sequence | Replace regions before and after start:end with gap chars | Below is the the instruction that describes the task:
### Input:
Replace regions before and after start:end with gap chars
### Response:
def isolate_region(sequences, start, end, gap_char='-'):
"""
Replace regions before and after start:end with gap chars
"""
# Check arguments
if end <= start:
raise ValueError("start of slice must precede end ({0} !> {1})".format(
end, start))
for sequence in sequences:
seq = sequence.seq
start_gap = gap_char * start
end_gap = gap_char * (len(seq) - end)
seq = Seq(start_gap + str(seq[start:end]) + end_gap,
alphabet=seq.alphabet)
sequence.seq = seq
yield sequence |
def replace_pod_security_policy(self, name, body, **kwargs): # noqa: E501
"""replace_pod_security_policy # noqa: E501
replace the specified PodSecurityPolicy # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_pod_security_policy(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the PodSecurityPolicy (required)
:param ExtensionsV1beta1PodSecurityPolicy body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: ExtensionsV1beta1PodSecurityPolicy
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.replace_pod_security_policy_with_http_info(name, body, **kwargs) # noqa: E501
else:
(data) = self.replace_pod_security_policy_with_http_info(name, body, **kwargs) # noqa: E501
return data | replace_pod_security_policy # noqa: E501
replace the specified PodSecurityPolicy # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_pod_security_policy(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the PodSecurityPolicy (required)
:param ExtensionsV1beta1PodSecurityPolicy body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: ExtensionsV1beta1PodSecurityPolicy
If the method is called asynchronously,
returns the request thread. | Below is the the instruction that describes the task:
### Input:
replace_pod_security_policy # noqa: E501
replace the specified PodSecurityPolicy # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_pod_security_policy(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the PodSecurityPolicy (required)
:param ExtensionsV1beta1PodSecurityPolicy body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: ExtensionsV1beta1PodSecurityPolicy
If the method is called asynchronously,
returns the request thread.
### Response:
def replace_pod_security_policy(self, name, body, **kwargs): # noqa: E501
"""replace_pod_security_policy # noqa: E501
replace the specified PodSecurityPolicy # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_pod_security_policy(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the PodSecurityPolicy (required)
:param ExtensionsV1beta1PodSecurityPolicy body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: ExtensionsV1beta1PodSecurityPolicy
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.replace_pod_security_policy_with_http_info(name, body, **kwargs) # noqa: E501
else:
(data) = self.replace_pod_security_policy_with_http_info(name, body, **kwargs) # noqa: E501
return data |
def get_version(version=None):
"""
Return full version nr, inc. rc, beta etc tags.
For example: `2.0.0a1`
:rtype: str
"""
v = version or __version__
if len(v) == 4:
return '{0}{1}'.format(short_version(v), v[3])
return short_version(v) | Return full version nr, inc. rc, beta etc tags.
For example: `2.0.0a1`
:rtype: str | Below is the the instruction that describes the task:
### Input:
Return full version nr, inc. rc, beta etc tags.
For example: `2.0.0a1`
:rtype: str
### Response:
def get_version(version=None):
"""
Return full version nr, inc. rc, beta etc tags.
For example: `2.0.0a1`
:rtype: str
"""
v = version or __version__
if len(v) == 4:
return '{0}{1}'.format(short_version(v), v[3])
return short_version(v) |
def convert_images(image_list, image_format="png", timeout=20):
"""Convert images from list of images to given format, if needed.
Figure out the types of the images that were extracted from
the tarball and determine how to convert them into PNG.
:param: image_list ([string, string, ...]): the list of image files
extracted from the tarball in step 1
:param: image_format (string): which image format to convert to.
(PNG by default)
:param: timeout (int): the timeout value on shell commands.
:return: image_mapping ({new_image: original_image, ...]): The mapping of
image files when all have been converted to PNG format.
"""
png_output_contains = 'PNG image'
image_mapping = {}
for image_file in image_list:
if os.path.isdir(image_file):
continue
if not os.path.exists(image_file):
continue
cmd_out = check_output(['file', image_file], timeout=timeout)
if cmd_out.find(png_output_contains) > -1:
# Already PNG
image_mapping[image_file] = image_file
else:
# we're just going to assume that ImageMagick can convert all
# the image types that we may be faced with
# for sure it can do EPS->PNG and JPG->PNG and PS->PNG
# and PSTEX->PNG
converted_image_file = get_converted_image_name(image_file)
try:
convert_image(image_file, converted_image_file, image_format)
except (MissingDelegateError, ResourceLimitError):
# Too bad, cannot convert image format.
continue
if os.path.exists(converted_image_file):
image_mapping[converted_image_file] = image_file
return image_mapping | Convert images from list of images to given format, if needed.
Figure out the types of the images that were extracted from
the tarball and determine how to convert them into PNG.
:param: image_list ([string, string, ...]): the list of image files
extracted from the tarball in step 1
:param: image_format (string): which image format to convert to.
(PNG by default)
:param: timeout (int): the timeout value on shell commands.
:return: image_mapping ({new_image: original_image, ...]): The mapping of
image files when all have been converted to PNG format. | Below is the the instruction that describes the task:
### Input:
Convert images from list of images to given format, if needed.
Figure out the types of the images that were extracted from
the tarball and determine how to convert them into PNG.
:param: image_list ([string, string, ...]): the list of image files
extracted from the tarball in step 1
:param: image_format (string): which image format to convert to.
(PNG by default)
:param: timeout (int): the timeout value on shell commands.
:return: image_mapping ({new_image: original_image, ...]): The mapping of
image files when all have been converted to PNG format.
### Response:
def convert_images(image_list, image_format="png", timeout=20):
"""Convert images from list of images to given format, if needed.
Figure out the types of the images that were extracted from
the tarball and determine how to convert them into PNG.
:param: image_list ([string, string, ...]): the list of image files
extracted from the tarball in step 1
:param: image_format (string): which image format to convert to.
(PNG by default)
:param: timeout (int): the timeout value on shell commands.
:return: image_mapping ({new_image: original_image, ...]): The mapping of
image files when all have been converted to PNG format.
"""
png_output_contains = 'PNG image'
image_mapping = {}
for image_file in image_list:
if os.path.isdir(image_file):
continue
if not os.path.exists(image_file):
continue
cmd_out = check_output(['file', image_file], timeout=timeout)
if cmd_out.find(png_output_contains) > -1:
# Already PNG
image_mapping[image_file] = image_file
else:
# we're just going to assume that ImageMagick can convert all
# the image types that we may be faced with
# for sure it can do EPS->PNG and JPG->PNG and PS->PNG
# and PSTEX->PNG
converted_image_file = get_converted_image_name(image_file)
try:
convert_image(image_file, converted_image_file, image_format)
except (MissingDelegateError, ResourceLimitError):
# Too bad, cannot convert image format.
continue
if os.path.exists(converted_image_file):
image_mapping[converted_image_file] = image_file
return image_mapping |
def collapse_all(self):
"""
Collapse all positions; works only if the underlying tree allows it.
"""
if implementsCollapseAPI(self._tree):
self._tree.collapse_all()
self.set_focus(self._tree.root)
self._walker.clear_cache()
self.refresh() | Collapse all positions; works only if the underlying tree allows it. | Below is the the instruction that describes the task:
### Input:
Collapse all positions; works only if the underlying tree allows it.
### Response:
def collapse_all(self):
"""
Collapse all positions; works only if the underlying tree allows it.
"""
if implementsCollapseAPI(self._tree):
self._tree.collapse_all()
self.set_focus(self._tree.root)
self._walker.clear_cache()
self.refresh() |
def content_type(self):
"""Return the value of Content-Type header field.
The value for the Content-Type header field is determined from
the :attr:`media_type` and :attr:`charset` data attributes.
Returns:
str: Value of Content-Type header field
"""
if (self.media_type is not None and
self.media_type.startswith('text/') and
self.charset is not None):
return self.media_type + '; charset=' + self.charset
else:
return self.media_type | Return the value of Content-Type header field.
The value for the Content-Type header field is determined from
the :attr:`media_type` and :attr:`charset` data attributes.
Returns:
str: Value of Content-Type header field | Below is the the instruction that describes the task:
### Input:
Return the value of Content-Type header field.
The value for the Content-Type header field is determined from
the :attr:`media_type` and :attr:`charset` data attributes.
Returns:
str: Value of Content-Type header field
### Response:
def content_type(self):
"""Return the value of Content-Type header field.
The value for the Content-Type header field is determined from
the :attr:`media_type` and :attr:`charset` data attributes.
Returns:
str: Value of Content-Type header field
"""
if (self.media_type is not None and
self.media_type.startswith('text/') and
self.charset is not None):
return self.media_type + '; charset=' + self.charset
else:
return self.media_type |
def new_log_filepath(self):
'''returns a filepath to a log which does not currently exist and is suitable for DF logging'''
lastlog_filename = os.path.join(self.dataflash_dir,'LASTLOG.TXT')
if os.path.exists(lastlog_filename) and os.stat(lastlog_filename).st_size != 0:
fh = open(lastlog_filename,'rb')
log_cnt = int(fh.read()) + 1
fh.close()
else:
log_cnt = 1
self.lastlog_file = open(lastlog_filename,'w+b')
self.lastlog_file.write(log_cnt.__str__())
self.lastlog_file.close()
return os.path.join(self.dataflash_dir, '%u.BIN' % (log_cnt,)); | returns a filepath to a log which does not currently exist and is suitable for DF logging | Below is the the instruction that describes the task:
### Input:
returns a filepath to a log which does not currently exist and is suitable for DF logging
### Response:
def new_log_filepath(self):
'''returns a filepath to a log which does not currently exist and is suitable for DF logging'''
lastlog_filename = os.path.join(self.dataflash_dir,'LASTLOG.TXT')
if os.path.exists(lastlog_filename) and os.stat(lastlog_filename).st_size != 0:
fh = open(lastlog_filename,'rb')
log_cnt = int(fh.read()) + 1
fh.close()
else:
log_cnt = 1
self.lastlog_file = open(lastlog_filename,'w+b')
self.lastlog_file.write(log_cnt.__str__())
self.lastlog_file.close()
return os.path.join(self.dataflash_dir, '%u.BIN' % (log_cnt,)); |
def get_remote_client(self, target_name, user=None, password=None):
"""
Returns a new client for the remote target. This is a lightweight
client that only uses different credentials and shares the transport
with the underlying client
"""
if user:
base = self.get_user_client(user, password, populate=False)
else:
base = weakproxy(self)
return RemoteXCLIClient(base, target_name, populate=True) | Returns a new client for the remote target. This is a lightweight
client that only uses different credentials and shares the transport
with the underlying client | Below is the the instruction that describes the task:
### Input:
Returns a new client for the remote target. This is a lightweight
client that only uses different credentials and shares the transport
with the underlying client
### Response:
def get_remote_client(self, target_name, user=None, password=None):
"""
Returns a new client for the remote target. This is a lightweight
client that only uses different credentials and shares the transport
with the underlying client
"""
if user:
base = self.get_user_client(user, password, populate=False)
else:
base = weakproxy(self)
return RemoteXCLIClient(base, target_name, populate=True) |
def load(hdfs_path, **kwargs):
"""\
Read the content of ``hdfs_path`` and return it.
Keyword arguments are passed to :func:`open`. The `"mode"` kwarg
must be readonly.
"""
m, _ = common.parse_mode(kwargs.get("mode", "r"))
if m != "r":
raise ValueError("opening mode must be readonly")
with open(hdfs_path, **kwargs) as fi:
data = fi.read()
fi.fs.close()
return data | \
Read the content of ``hdfs_path`` and return it.
Keyword arguments are passed to :func:`open`. The `"mode"` kwarg
must be readonly. | Below is the the instruction that describes the task:
### Input:
\
Read the content of ``hdfs_path`` and return it.
Keyword arguments are passed to :func:`open`. The `"mode"` kwarg
must be readonly.
### Response:
def load(hdfs_path, **kwargs):
"""\
Read the content of ``hdfs_path`` and return it.
Keyword arguments are passed to :func:`open`. The `"mode"` kwarg
must be readonly.
"""
m, _ = common.parse_mode(kwargs.get("mode", "r"))
if m != "r":
raise ValueError("opening mode must be readonly")
with open(hdfs_path, **kwargs) as fi:
data = fi.read()
fi.fs.close()
return data |
def _add_data(self, plotter_cls, *args, **kwargs):
"""
Add new plots to the project
Parameters
----------
%(ProjectPlotter._add_data.parameters.no_filename_or_obj)s
Other Parameters
----------------
%(ProjectPlotter._add_data.other_parameters)s
Returns
-------
%(ProjectPlotter._add_data.returns)s
"""
# this method is just a shortcut to the :meth:`Project._add_data`
# method but is reimplemented by subclasses as the
# :class:`DatasetPlotter` or the :class:`DataArrayPlotter`
return super(DatasetPlotter, self)._add_data(plotter_cls, self._ds,
*args, **kwargs) | Add new plots to the project
Parameters
----------
%(ProjectPlotter._add_data.parameters.no_filename_or_obj)s
Other Parameters
----------------
%(ProjectPlotter._add_data.other_parameters)s
Returns
-------
%(ProjectPlotter._add_data.returns)s | Below is the the instruction that describes the task:
### Input:
Add new plots to the project
Parameters
----------
%(ProjectPlotter._add_data.parameters.no_filename_or_obj)s
Other Parameters
----------------
%(ProjectPlotter._add_data.other_parameters)s
Returns
-------
%(ProjectPlotter._add_data.returns)s
### Response:
def _add_data(self, plotter_cls, *args, **kwargs):
"""
Add new plots to the project
Parameters
----------
%(ProjectPlotter._add_data.parameters.no_filename_or_obj)s
Other Parameters
----------------
%(ProjectPlotter._add_data.other_parameters)s
Returns
-------
%(ProjectPlotter._add_data.returns)s
"""
# this method is just a shortcut to the :meth:`Project._add_data`
# method but is reimplemented by subclasses as the
# :class:`DatasetPlotter` or the :class:`DataArrayPlotter`
return super(DatasetPlotter, self)._add_data(plotter_cls, self._ds,
*args, **kwargs) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.