sentence1
stringlengths 52
3.87M
| sentence2
stringlengths 1
47.2k
| label
stringclasses 1
value |
---|---|---|
def reference(self):
"""Return the Reference object for this Key.
This is a entity_pb.Reference instance -- a protocol buffer class
used by the lower-level API to the datastore.
NOTE: The caller should not mutate the return value.
"""
if self.__reference is None:
self.__reference = _ConstructReference(self.__class__,
pairs=self.__pairs,
app=self.__app,
namespace=self.__namespace)
return self.__reference | Return the Reference object for this Key.
This is a entity_pb.Reference instance -- a protocol buffer class
used by the lower-level API to the datastore.
NOTE: The caller should not mutate the return value. | entailment |
def urlsafe(self):
"""Return a url-safe string encoding this Key's Reference.
This string is compatible with other APIs and languages and with
the strings used to represent Keys in GQL and in the App Engine
Admin Console.
"""
# This is 3-4x faster than urlsafe_b64decode()
urlsafe = base64.b64encode(self.reference().Encode())
return urlsafe.rstrip('=').replace('+', '-').replace('/', '_') | Return a url-safe string encoding this Key's Reference.
This string is compatible with other APIs and languages and with
the strings used to represent Keys in GQL and in the App Engine
Admin Console. | entailment |
def get_async(self, **ctx_options):
"""Return a Future whose result is the entity for this Key.
If no such entity exists, a Future is still returned, and the
Future's eventual return result be None.
"""
from . import model, tasklets
ctx = tasklets.get_context()
cls = model.Model._kind_map.get(self.kind())
if cls:
cls._pre_get_hook(self)
fut = ctx.get(self, **ctx_options)
if cls:
post_hook = cls._post_get_hook
if not cls._is_default_hook(model.Model._default_post_get_hook,
post_hook):
fut.add_immediate_callback(post_hook, self, fut)
return fut | Return a Future whose result is the entity for this Key.
If no such entity exists, a Future is still returned, and the
Future's eventual return result be None. | entailment |
def delete_async(self, **ctx_options):
"""Schedule deletion of the entity for this Key.
This returns a Future, whose result becomes available once the
deletion is complete. If no such entity exists, a Future is still
returned. In all cases the Future's result is None (i.e. there is
no way to tell whether the entity existed or not).
"""
from . import tasklets, model
ctx = tasklets.get_context()
cls = model.Model._kind_map.get(self.kind())
if cls:
cls._pre_delete_hook(self)
fut = ctx.delete(self, **ctx_options)
if cls:
post_hook = cls._post_delete_hook
if not cls._is_default_hook(model.Model._default_post_delete_hook,
post_hook):
fut.add_immediate_callback(post_hook, self, fut)
return fut | Schedule deletion of the entity for this Key.
This returns a Future, whose result becomes available once the
deletion is complete. If no such entity exists, a Future is still
returned. In all cases the Future's result is None (i.e. there is
no way to tell whether the entity existed or not). | entailment |
def add_flow_exception(exc):
"""Add an exception that should not be logged.
The argument must be a subclass of Exception.
"""
global _flow_exceptions
if not isinstance(exc, type) or not issubclass(exc, Exception):
raise TypeError('Expected an Exception subclass, got %r' % (exc,))
as_set = set(_flow_exceptions)
as_set.add(exc)
_flow_exceptions = tuple(as_set) | Add an exception that should not be logged.
The argument must be a subclass of Exception. | entailment |
def _init_flow_exceptions():
"""Internal helper to initialize _flow_exceptions.
This automatically adds webob.exc.HTTPException, if it can be imported.
"""
global _flow_exceptions
_flow_exceptions = ()
add_flow_exception(datastore_errors.Rollback)
try:
from webob import exc
except ImportError:
pass
else:
add_flow_exception(exc.HTTPException) | Internal helper to initialize _flow_exceptions.
This automatically adds webob.exc.HTTPException, if it can be imported. | entailment |
def sleep(dt):
"""Public function to sleep some time.
Example:
yield tasklets.sleep(0.5) # Sleep for half a sec.
"""
fut = Future('sleep(%.3f)' % dt)
eventloop.queue_call(dt, fut.set_result, None)
return fut | Public function to sleep some time.
Example:
yield tasklets.sleep(0.5) # Sleep for half a sec. | entailment |
def _transfer_result(fut1, fut2):
"""Helper to transfer result or errors from one Future to another."""
exc = fut1.get_exception()
if exc is not None:
tb = fut1.get_traceback()
fut2.set_exception(exc, tb)
else:
val = fut1.get_result()
fut2.set_result(val) | Helper to transfer result or errors from one Future to another. | entailment |
def synctasklet(func):
"""Decorator to run a function as a tasklet when called.
Use this to wrap a request handler function that will be called by
some web application framework (e.g. a Django view function or a
webapp.RequestHandler.get method).
"""
taskletfunc = tasklet(func) # wrap at declaration time.
@utils.wrapping(func)
def synctasklet_wrapper(*args, **kwds):
# pylint: disable=invalid-name
__ndb_debug__ = utils.func_info(func)
return taskletfunc(*args, **kwds).get_result()
return synctasklet_wrapper | Decorator to run a function as a tasklet when called.
Use this to wrap a request handler function that will be called by
some web application framework (e.g. a Django view function or a
webapp.RequestHandler.get method). | entailment |
def toplevel(func):
"""A sync tasklet that sets a fresh default Context.
Use this for toplevel view functions such as
webapp.RequestHandler.get() or Django view functions.
"""
synctaskletfunc = synctasklet(func) # wrap at declaration time.
@utils.wrapping(func)
def add_context_wrapper(*args, **kwds):
# pylint: disable=invalid-name
__ndb_debug__ = utils.func_info(func)
_state.clear_all_pending()
# Create and install a new context.
ctx = make_default_context()
try:
set_context(ctx)
return synctaskletfunc(*args, **kwds)
finally:
set_context(None)
ctx.flush().check_success()
eventloop.run() # Ensure writes are flushed, etc.
return add_context_wrapper | A sync tasklet that sets a fresh default Context.
Use this for toplevel view functions such as
webapp.RequestHandler.get() or Django view functions. | entailment |
def _make_cloud_datastore_context(app_id, external_app_ids=()):
"""Creates a new context to connect to a remote Cloud Datastore instance.
This should only be used outside of Google App Engine.
Args:
app_id: The application id to connect to. This differs from the project
id as it may have an additional prefix, e.g. "s~" or "e~".
external_app_ids: A list of apps that may be referenced by data in your
application. For example, if you are connected to s~my-app and store keys
for s~my-other-app, you should include s~my-other-app in the external_apps
list.
Returns:
An ndb.Context that can connect to a Remote Cloud Datastore. You can use
this context by passing it to ndb.set_context.
"""
from . import model # Late import to deal with circular imports.
# Late import since it might not exist.
if not datastore_pbs._CLOUD_DATASTORE_ENABLED:
raise datastore_errors.BadArgumentError(
datastore_pbs.MISSING_CLOUD_DATASTORE_MESSAGE)
import googledatastore
try:
from google.appengine.datastore import cloud_datastore_v1_remote_stub
except ImportError:
from google3.apphosting.datastore import cloud_datastore_v1_remote_stub
current_app_id = os.environ.get('APPLICATION_ID', None)
if current_app_id and current_app_id != app_id:
# TODO(pcostello): We should support this so users can connect to different
# applications.
raise ValueError('Cannot create a Cloud Datastore context that connects '
'to an application (%s) that differs from the application '
'already connected to (%s).' % (app_id, current_app_id))
os.environ['APPLICATION_ID'] = app_id
id_resolver = datastore_pbs.IdResolver((app_id,) + tuple(external_app_ids))
project_id = id_resolver.resolve_project_id(app_id)
endpoint = googledatastore.helper.get_project_endpoint_from_env(project_id)
datastore = googledatastore.Datastore(
project_endpoint=endpoint,
credentials=googledatastore.helper.get_credentials_from_env())
conn = model.make_connection(_api_version=datastore_rpc._CLOUD_DATASTORE_V1,
_id_resolver=id_resolver)
# If necessary, install the stubs
try:
stub = cloud_datastore_v1_remote_stub.CloudDatastoreV1RemoteStub(datastore)
apiproxy_stub_map.apiproxy.RegisterStub(datastore_rpc._CLOUD_DATASTORE_V1,
stub)
except:
pass # The stub is already installed.
# TODO(pcostello): Ensure the current stub is connected to the right project.
# Install a memcache and taskqueue stub which throws on everything.
try:
apiproxy_stub_map.apiproxy.RegisterStub('memcache', _ThrowingStub())
except:
pass # The stub is already installed.
try:
apiproxy_stub_map.apiproxy.RegisterStub('taskqueue', _ThrowingStub())
except:
pass # The stub is already installed.
return make_context(conn=conn) | Creates a new context to connect to a remote Cloud Datastore instance.
This should only be used outside of Google App Engine.
Args:
app_id: The application id to connect to. This differs from the project
id as it may have an additional prefix, e.g. "s~" or "e~".
external_app_ids: A list of apps that may be referenced by data in your
application. For example, if you are connected to s~my-app and store keys
for s~my-other-app, you should include s~my-other-app in the external_apps
list.
Returns:
An ndb.Context that can connect to a Remote Cloud Datastore. You can use
this context by passing it to ndb.set_context. | entailment |
def _analyze_indexed_fields(indexed_fields):
"""Internal helper to check a list of indexed fields.
Args:
indexed_fields: A list of names, possibly dotted names.
(A dotted name is a string containing names separated by dots,
e.g. 'foo.bar.baz'. An undotted name is a string containing no
dots, e.g. 'foo'.)
Returns:
A dict whose keys are undotted names. For each undotted name in
the argument, the dict contains that undotted name as a key with
None as a value. For each dotted name in the argument, the dict
contains the first component as a key with a list of remainders as
values.
Example:
If the argument is ['foo.bar.baz', 'bar', 'foo.bletch'], the return
value is {'foo': ['bar.baz', 'bletch'], 'bar': None}.
Raises:
TypeError if an argument is not a string.
ValueError for duplicate arguments and for conflicting arguments
(when an undotted name also appears as the first component of
a dotted name).
"""
result = {}
for field_name in indexed_fields:
if not isinstance(field_name, basestring):
raise TypeError('Field names must be strings; got %r' % (field_name,))
if '.' not in field_name:
if field_name in result:
raise ValueError('Duplicate field name %s' % field_name)
result[field_name] = None
else:
head, tail = field_name.split('.', 1)
if head not in result:
result[head] = [tail]
elif result[head] is None:
raise ValueError('Field name %s conflicts with ancestor %s' %
(field_name, head))
else:
result[head].append(tail)
return result | Internal helper to check a list of indexed fields.
Args:
indexed_fields: A list of names, possibly dotted names.
(A dotted name is a string containing names separated by dots,
e.g. 'foo.bar.baz'. An undotted name is a string containing no
dots, e.g. 'foo'.)
Returns:
A dict whose keys are undotted names. For each undotted name in
the argument, the dict contains that undotted name as a key with
None as a value. For each dotted name in the argument, the dict
contains the first component as a key with a list of remainders as
values.
Example:
If the argument is ['foo.bar.baz', 'bar', 'foo.bletch'], the return
value is {'foo': ['bar.baz', 'bletch'], 'bar': None}.
Raises:
TypeError if an argument is not a string.
ValueError for duplicate arguments and for conflicting arguments
(when an undotted name also appears as the first component of
a dotted name). | entailment |
def _make_model_class(message_type, indexed_fields, **props):
"""Construct a Model subclass corresponding to a Message subclass.
Args:
message_type: A Message subclass.
indexed_fields: A list of dotted and undotted field names.
**props: Additional properties with which to seed the class.
Returns:
A Model subclass whose properties correspond to those fields of
message_type whose field name is listed in indexed_fields, plus
the properties specified by the **props arguments. For dotted
field names, a StructuredProperty is generated using a Model
subclass created by a recursive call.
Raises:
Whatever _analyze_indexed_fields() raises.
ValueError if a field name conflicts with a name in **props.
ValueError if a field name is not valid field of message_type.
ValueError if an undotted field name designates a MessageField.
"""
analyzed = _analyze_indexed_fields(indexed_fields)
for field_name, sub_fields in analyzed.iteritems():
if field_name in props:
raise ValueError('field name %s is reserved' % field_name)
try:
field = message_type.field_by_name(field_name)
except KeyError:
raise ValueError('Message type %s has no field named %s' %
(message_type.__name__, field_name))
if isinstance(field, messages.MessageField):
if not sub_fields:
raise ValueError(
'MessageField %s cannot be indexed, only sub-fields' % field_name)
sub_model_class = _make_model_class(field.type, sub_fields)
prop = model.StructuredProperty(sub_model_class, field_name,
repeated=field.repeated)
else:
if sub_fields is not None:
raise ValueError(
'Unstructured field %s cannot have indexed sub-fields' % field_name)
if isinstance(field, messages.EnumField):
prop = EnumProperty(field.type, field_name, repeated=field.repeated)
elif isinstance(field, messages.BytesField):
prop = model.BlobProperty(field_name,
repeated=field.repeated, indexed=True)
else:
# IntegerField, FloatField, BooleanField, StringField.
prop = model.GenericProperty(field_name, repeated=field.repeated)
props[field_name] = prop
return model.MetaModel('_%s__Model' % message_type.__name__,
(model.Model,), props) | Construct a Model subclass corresponding to a Message subclass.
Args:
message_type: A Message subclass.
indexed_fields: A list of dotted and undotted field names.
**props: Additional properties with which to seed the class.
Returns:
A Model subclass whose properties correspond to those fields of
message_type whose field name is listed in indexed_fields, plus
the properties specified by the **props arguments. For dotted
field names, a StructuredProperty is generated using a Model
subclass created by a recursive call.
Raises:
Whatever _analyze_indexed_fields() raises.
ValueError if a field name conflicts with a name in **props.
ValueError if a field name is not valid field of message_type.
ValueError if an undotted field name designates a MessageField. | entailment |
def _message_to_entity(msg, modelclass):
"""Recursive helper for _to_base_type() to convert a message to an entity.
Args:
msg: A Message instance.
modelclass: A Model subclass.
Returns:
An instance of modelclass.
"""
ent = modelclass()
for prop_name, prop in modelclass._properties.iteritems():
if prop._code_name == 'blob_': # TODO: Devise a cleaner test.
continue # That's taken care of later.
value = getattr(msg, prop_name)
if value is not None and isinstance(prop, model.StructuredProperty):
if prop._repeated:
value = [_message_to_entity(v, prop._modelclass) for v in value]
else:
value = _message_to_entity(value, prop._modelclass)
setattr(ent, prop_name, value)
return ent | Recursive helper for _to_base_type() to convert a message to an entity.
Args:
msg: A Message instance.
modelclass: A Model subclass.
Returns:
An instance of modelclass. | entailment |
def _projected_entity_to_message(ent, message_type):
"""Recursive helper for _from_base_type() to convert an entity to a message.
Args:
ent: A Model instance.
message_type: A Message subclass.
Returns:
An instance of message_type.
"""
msg = message_type()
analyzed = _analyze_indexed_fields(ent._projection)
for name, sublist in analyzed.iteritems():
prop = ent._properties[name]
val = prop._get_value(ent)
assert isinstance(prop, model.StructuredProperty) == bool(sublist)
if sublist:
field = message_type.field_by_name(name)
assert isinstance(field, messages.MessageField)
assert prop._repeated == field.repeated
if prop._repeated:
assert isinstance(val, list)
val = [_projected_entity_to_message(v, field.type) for v in val]
else:
assert isinstance(val, prop._modelclass)
val = _projected_entity_to_message(val, field.type)
setattr(msg, name, val)
return msg | Recursive helper for _from_base_type() to convert an entity to a message.
Args:
ent: A Model instance.
message_type: A Message subclass.
Returns:
An instance of message_type. | entailment |
def _validate(self, value):
"""Validate an Enum value.
Raises:
TypeError if the value is not an instance of self._enum_type.
"""
if not isinstance(value, self._enum_type):
raise TypeError('Expected a %s instance, got %r instead' %
(self._enum_type.__name__, value)) | Validate an Enum value.
Raises:
TypeError if the value is not an instance of self._enum_type. | entailment |
def _validate(self, msg):
"""Validate an Enum value.
Raises:
TypeError if the value is not an instance of self._message_type.
"""
if not isinstance(msg, self._message_type):
raise TypeError('Expected a %s instance for %s property',
self._message_type.__name__,
self._code_name or self._name) | Validate an Enum value.
Raises:
TypeError if the value is not an instance of self._message_type. | entailment |
def _to_base_type(self, msg):
"""Convert a Message value to a Model instance (entity)."""
ent = _message_to_entity(msg, self._modelclass)
ent.blob_ = self._protocol_impl.encode_message(msg)
return ent | Convert a Message value to a Model instance (entity). | entailment |
def _from_base_type(self, ent):
"""Convert a Model instance (entity) to a Message value."""
if ent._projection:
# Projection query result. Reconstitute the message from the fields.
return _projected_entity_to_message(ent, self._message_type)
blob = ent.blob_
if blob is not None:
protocol = self._protocol_impl
else:
# Perhaps it was written using a different protocol.
protocol = None
for name in _protocols_registry.names:
key = '__%s__' % name
if key in ent._values:
blob = ent._values[key]
if isinstance(blob, model._BaseValue):
blob = blob.b_val
protocol = _protocols_registry.lookup_by_name(name)
break
if blob is None or protocol is None:
return None # This will reveal the underlying dummy model.
msg = protocol.decode_message(self._message_type, blob)
return msg | Convert a Model instance (entity) to a Message value. | entailment |
def _get_value(self, entity):
"""Compute and store a default value if necessary."""
value = super(_ClassKeyProperty, self)._get_value(entity)
if not value:
value = entity._class_key()
self._store_value(entity, value)
return value | Compute and store a default value if necessary. | entailment |
def _update_kind_map(cls):
"""Override; called by Model._fix_up_properties().
Update the kind map as well as the class map, except for PolyModel
itself (its class key is empty). Note that the kind map will
contain entries for all classes in a PolyModel hierarchy; they all
have the same kind, but different class names. PolyModel class
names, like regular Model class names, must be globally unique.
"""
cls._kind_map[cls._class_name()] = cls
class_key = cls._class_key()
if class_key:
cls._class_map[tuple(class_key)] = cls | Override; called by Model._fix_up_properties().
Update the kind map as well as the class map, except for PolyModel
itself (its class key is empty). Note that the kind map will
contain entries for all classes in a PolyModel hierarchy; they all
have the same kind, but different class names. PolyModel class
names, like regular Model class names, must be globally unique. | entailment |
def _from_pb(cls, pb, set_key=True, ent=None, key=None):
"""Override.
Use the class map to give the entity the correct subclass.
"""
prop_name = cls.class_._name
class_name = []
for plist in [pb.property_list(), pb.raw_property_list()]:
for p in plist:
if p.name() == prop_name:
class_name.append(p.value().stringvalue())
cls = cls._class_map.get(tuple(class_name), cls)
return super(PolyModel, cls)._from_pb(pb, set_key, ent, key) | Override.
Use the class map to give the entity the correct subclass. | entailment |
def _get_kind(cls):
"""Override.
Make sure that the kind returned is the root class of the
polymorphic hierarchy.
"""
bases = cls._get_hierarchy()
if not bases:
# We have to jump through some hoops to call the superclass'
# _get_kind() method. First, this is called by the metaclass
# before the PolyModel name is defined, so it can't use
# super(PolyModel, cls)._get_kind(). Second, we can't just call
# Model._get_kind() because that always returns 'Model'. Hence
# the 'im_func' hack.
return model.Model._get_kind.im_func(cls)
else:
return bases[0]._class_name() | Override.
Make sure that the kind returned is the root class of the
polymorphic hierarchy. | entailment |
def _get_hierarchy(cls):
"""Internal helper to return the list of polymorphic base classes.
This returns a list of class objects, e.g. [Animal, Feline, Cat].
"""
bases = []
for base in cls.mro(): # pragma: no branch
if hasattr(base, '_get_hierarchy'):
bases.append(base)
del bases[-1] # Delete PolyModel itself
bases.reverse()
return bases | Internal helper to return the list of polymorphic base classes.
This returns a list of class objects, e.g. [Animal, Feline, Cat]. | entailment |
def find_env_paths_in_basedirs(base_dirs):
"""Returns all potential envs in a basedir"""
# get potential env path in the base_dirs
env_path = []
for base_dir in base_dirs:
env_path.extend(glob.glob(os.path.join(
os.path.expanduser(base_dir), '*', '')))
# self.log.info("Found the following kernels from config: %s", ", ".join(venvs))
return env_path | Returns all potential envs in a basedir | entailment |
def convert_to_env_data(mgr, env_paths, validator_func, activate_func,
name_template, display_name_template, name_prefix):
"""Converts a list of paths to environments to env_data.
env_data is a structure {name -> (ressourcedir, kernel spec)}
"""
env_data = {}
for venv_dir in env_paths:
venv_name = os.path.split(os.path.abspath(venv_dir))[1]
kernel_name = name_template.format(name_prefix + venv_name)
kernel_name = kernel_name.lower()
if kernel_name in env_data:
mgr.log.debug(
"Found duplicate env kernel: %s, which would again point to %s. Using the first!",
kernel_name, venv_dir)
continue
argv, language, resource_dir = validator_func(venv_dir)
if not argv:
# probably does not contain the kernel type (e.g. not R or python or does not contain
# the kernel code itself)
continue
display_name = display_name_template.format(kernel_name)
kspec_dict = {"argv": argv, "language": language,
"display_name": display_name,
"resource_dir": resource_dir
}
# the default vars are needed to save the vars in the function context
def loader(env_dir=venv_dir, activate_func=activate_func, mgr=mgr):
mgr.log.debug("Loading env data for %s" % env_dir)
res = activate_func(mgr, env_dir)
# mgr.log.info("PATH: %s" % res['PATH'])
return res
kspec = EnvironmentLoadingKernelSpec(loader, **kspec_dict)
env_data.update({kernel_name: (resource_dir, kspec)})
return env_data | Converts a list of paths to environments to env_data.
env_data is a structure {name -> (ressourcedir, kernel spec)} | entailment |
def validate_IPykernel(venv_dir):
"""Validates that this env contains an IPython kernel and returns info to start it
Returns: tuple
(ARGV, language, resource_dir)
"""
python_exe_name = find_exe(venv_dir, "python")
if python_exe_name is None:
python_exe_name = find_exe(venv_dir, "python2")
if python_exe_name is None:
python_exe_name = find_exe(venv_dir, "python3")
if python_exe_name is None:
return [], None, None
# Make some checks for ipython first, because calling the import is expensive
if find_exe(venv_dir, "ipython") is None:
if find_exe(venv_dir, "ipython2") is None:
if find_exe(venv_dir, "ipython3") is None:
return [], None, None
# check if this is really an ipython **kernel**
import subprocess
try:
subprocess.check_call([python_exe_name, '-c', '"import ipykernel"'])
except:
# not installed? -> not useable in any case...
return [], None, None
argv = [python_exe_name, "-m", "ipykernel", "-f", "{connection_file}"]
resources_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "logos", "python")
return argv, "python", resources_dir | Validates that this env contains an IPython kernel and returns info to start it
Returns: tuple
(ARGV, language, resource_dir) | entailment |
def validate_IRkernel(venv_dir):
"""Validates that this env contains an IRkernel kernel and returns info to start it
Returns: tuple
(ARGV, language, resource_dir)
"""
r_exe_name = find_exe(venv_dir, "R")
if r_exe_name is None:
return [], None, None
# check if this is really an IRkernel **kernel**
import subprocess
ressources_dir = None
try:
print_resources = 'cat(as.character(system.file("kernelspec", package = "IRkernel")))'
resources_dir_bytes = subprocess.check_output([r_exe_name, '--slave', '-e', print_resources])
resources_dir = resources_dir_bytes.decode(errors='ignore')
except:
# not installed? -> not useable in any case...
return [], None, None
argv = [r_exe_name, "--slave", "-e", "IRkernel::main()", "--args", "{connection_file}"]
if not os.path.exists(resources_dir.strip()):
# Fallback to our own log, but don't get the nice js goodies...
resources_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "logos", "r")
return argv, "r", resources_dir | Validates that this env contains an IRkernel kernel and returns info to start it
Returns: tuple
(ARGV, language, resource_dir) | entailment |
def find_exe(env_dir, name):
"""Finds a exe with that name in the environment path"""
if platform.system() == "Windows":
name = name + ".exe"
# find the binary
exe_name = os.path.join(env_dir, name)
if not os.path.exists(exe_name):
exe_name = os.path.join(env_dir, "bin", name)
if not os.path.exists(exe_name):
exe_name = os.path.join(env_dir, "Scripts", name)
if not os.path.exists(exe_name):
return None
return exe_name | Finds a exe with that name in the environment path | entailment |
def get_virtualenv_env_data(mgr):
"""Finds kernel specs from virtualenv environments
env_data is a structure {name -> (resourcedir, kernel spec)}
"""
if not mgr.find_virtualenv_envs:
return {}
mgr.log.debug("Looking for virtualenv environments in %s...", mgr.virtualenv_env_dirs)
# find all potential env paths
env_paths = find_env_paths_in_basedirs(mgr.virtualenv_env_dirs)
mgr.log.debug("Scanning virtualenv environments for python kernels...")
env_data = convert_to_env_data(mgr=mgr,
env_paths=env_paths,
validator_func=validate_IPykernel,
activate_func=_get_env_vars_for_virtualenv_env,
name_template=mgr.virtualenv_prefix_template,
display_name_template=mgr.display_name_template,
# virtualenv has only python, so no need for a prefix
name_prefix="")
return env_data | Finds kernel specs from virtualenv environments
env_data is a structure {name -> (resourcedir, kernel spec)} | entailment |
def source_bash(args, stdin=None):
"""Simply bash-specific wrapper around source-foreign
Returns a dict to be used as a new environment"""
args = list(args)
new_args = ['bash', '--sourcer=source']
new_args.extend(args)
return source_foreign(new_args, stdin=stdin) | Simply bash-specific wrapper around source-foreign
Returns a dict to be used as a new environment | entailment |
def source_zsh(args, stdin=None):
"""Simply zsh-specific wrapper around source-foreign
Returns a dict to be used as a new environment"""
args = list(args)
new_args = ['zsh', '--sourcer=source']
new_args.extend(args)
return source_foreign(new_args, stdin=stdin) | Simply zsh-specific wrapper around source-foreign
Returns a dict to be used as a new environment | entailment |
def source_cmd(args, stdin=None):
"""Simple cmd.exe-specific wrapper around source-foreign.
returns a dict to be used as a new environment
"""
args = list(args)
fpath = locate_binary(args[0])
args[0] = fpath if fpath else args[0]
if not os.path.isfile(args[0]):
raise RuntimeError("Command not found: %s" % args[0])
prevcmd = 'call '
prevcmd += ' '.join([argvquote(arg, force=True) for arg in args])
prevcmd = escape_windows_cmd_string(prevcmd)
args.append('--prevcmd={}'.format(prevcmd))
args.insert(0, 'cmd')
args.append('--interactive=0')
args.append('--sourcer=call')
args.append('--envcmd=set')
args.append('--seterrpostcmd=if errorlevel 1 exit 1')
args.append('--use-tmpfile=1')
return source_foreign(args, stdin=stdin) | Simple cmd.exe-specific wrapper around source-foreign.
returns a dict to be used as a new environment | entailment |
def argvquote(arg, force=False):
""" Returns an argument quoted in such a way that that CommandLineToArgvW
on Windows will return the argument string unchanged.
This is the same thing Popen does when supplied with an list of arguments.
Arguments in a command line should be separated by spaces; this
function does not add these spaces. This implementation follows the
suggestions outlined here:
https://blogs.msdn.microsoft.com/twistylittlepassagesallalike/2011/04/23/everyone-quotes-command-line-arguments-the-wrong-way/
"""
if not force and len(arg) != 0 and not any([c in arg for c in ' \t\n\v"']):
return arg
else:
n_backslashes = 0
cmdline = '"'
for c in arg:
if c == "\\":
# first count the number of current backslashes
n_backslashes += 1
continue
if c == '"':
# Escape all backslashes and the following double quotation mark
cmdline += (n_backslashes * 2 + 1) * '\\'
else:
# backslashes are not special here
cmdline += n_backslashes * '\\'
n_backslashes = 0
cmdline += c
# Escape all backslashes, but let the terminating
# double quotation mark we add below be interpreted
# as a metacharacter
cmdline += + n_backslashes * 2 * '\\' + '"'
return cmdline | Returns an argument quoted in such a way that that CommandLineToArgvW
on Windows will return the argument string unchanged.
This is the same thing Popen does when supplied with an list of arguments.
Arguments in a command line should be separated by spaces; this
function does not add these spaces. This implementation follows the
suggestions outlined here:
https://blogs.msdn.microsoft.com/twistylittlepassagesallalike/2011/04/23/everyone-quotes-command-line-arguments-the-wrong-way/ | entailment |
def escape_windows_cmd_string(s):
"""Returns a string that is usable by the Windows cmd.exe.
The escaping is based on details here and emperical testing:
http://www.robvanderwoude.com/escapechars.php
"""
for c in '()%!^<>&|"':
s = s.replace(c, '^' + c)
s = s.replace('/?', '/.')
return s | Returns a string that is usable by the Windows cmd.exe.
The escaping is based on details here and emperical testing:
http://www.robvanderwoude.com/escapechars.php | entailment |
def source_foreign(args, stdin=None):
"""Sources a file written in a foreign shell language."""
parser = _ensure_source_foreign_parser()
ns = parser.parse_args(args)
if ns.prevcmd is not None:
pass # don't change prevcmd if given explicitly
elif os.path.isfile(ns.files_or_code[0]):
# we have filename to source
ns.prevcmd = '{} "{}"'.format(ns.sourcer, '" "'.join(ns.files_or_code))
elif ns.prevcmd is None:
ns.prevcmd = ' '.join(ns.files_or_code) # code to run, no files
fsenv = foreign_shell_data(shell=ns.shell, login=ns.login,
interactive=ns.interactive,
envcmd=ns.envcmd,
aliascmd=ns.aliascmd,
extra_args=ns.extra_args,
safe=ns.safe, prevcmd=ns.prevcmd,
postcmd=ns.postcmd,
funcscmd=ns.funcscmd,
sourcer=ns.sourcer,
use_tmpfile=ns.use_tmpfile,
seterrprevcmd=ns.seterrprevcmd,
seterrpostcmd=ns.seterrpostcmd)
if fsenv is None:
raise RuntimeError("Source failed: {}\n".format(ns.prevcmd), 1)
# apply results
env = os.environ.copy()
for k, v in fsenv.items():
if k in env and v == env[k]:
continue # no change from original
env[k] = v
# Remove any env-vars that were unset by the script.
for k in os.environ: # use os.environ again to prevent errors about changed size
if k not in fsenv:
env.pop(k, None)
return env | Sources a file written in a foreign shell language. | entailment |
def _is_executable_file(path):
"""Checks that path is an executable regular file, or a symlink towards one.
This is roughly ``os.path isfile(path) and os.access(path, os.X_OK)``.
This function was forked from pexpect originally:
Copyright (c) 2013-2014, Pexpect development team
Copyright (c) 2012, Noah Spurrier <[email protected]>
PERMISSION TO USE, COPY, MODIFY, AND/OR DISTRIBUTE THIS SOFTWARE FOR ANY
PURPOSE WITH OR WITHOUT FEE IS HEREBY GRANTED, PROVIDED THAT THE ABOVE
COPYRIGHT NOTICE AND THIS PERMISSION NOTICE APPEAR IN ALL COPIES.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""
# follow symlinks,
fpath = os.path.realpath(path)
if not os.path.isfile(fpath):
# non-files (directories, fifo, etc.)
return False
return os.access(fpath, os.X_OK) | Checks that path is an executable regular file, or a symlink towards one.
This is roughly ``os.path isfile(path) and os.access(path, os.X_OK)``.
This function was forked from pexpect originally:
Copyright (c) 2013-2014, Pexpect development team
Copyright (c) 2012, Noah Spurrier <[email protected]>
PERMISSION TO USE, COPY, MODIFY, AND/OR DISTRIBUTE THIS SOFTWARE FOR ANY
PURPOSE WITH OR WITHOUT FEE IS HEREBY GRANTED, PROVIDED THAT THE ABOVE
COPYRIGHT NOTICE AND THIS PERMISSION NOTICE APPEAR IN ALL COPIES.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. | entailment |
def foreign_shell_data(shell, interactive=True, login=False, envcmd=None,
aliascmd=None, extra_args=(), currenv=None,
safe=False, prevcmd='', postcmd='', funcscmd=None,
sourcer=None, use_tmpfile=False, tmpfile_ext=None,
runcmd=None, seterrprevcmd=None, seterrpostcmd=None):
"""Extracts data from a foreign (non-xonsh) shells. Currently this gets
the environment, aliases, and functions but may be extended in the future.
Parameters
----------
shell : str
The name of the shell, such as 'bash' or '/bin/sh'.
interactive : bool, optional
Whether the shell should be run in interactive mode.
login : bool, optional
Whether the shell should be a login shell.
envcmd : str or None, optional
The command to generate environment output with.
aliascmd : str or None, optional
The command to generate alias output with.
extra_args : tuple of str, optional
Addtional command line options to pass into the shell.
currenv : tuple of items or None, optional
Manual override for the current environment.
safe : bool, optional
Flag for whether or not to safely handle exceptions and other errors.
prevcmd : str, optional
A command to run in the shell before anything else, useful for
sourcing and other commands that may require environment recovery.
postcmd : str, optional
A command to run after everything else, useful for cleaning up any
damage that the prevcmd may have caused.
funcscmd : str or None, optional
This is a command or script that can be used to determine the names
and locations of any functions that are native to the foreign shell.
This command should print *only* a JSON object that maps
function names to the filenames where the functions are defined.
If this is None, then a default script will attempted to be looked
up based on the shell name. Callable wrappers for these functions
will be returned in the aliases dictionary.
sourcer : str or None, optional
How to source a foreign shell file for purposes of calling functions
in that shell. If this is None, a default value will attempt to be
looked up based on the shell name.
use_tmpfile : bool, optional
This specifies if the commands are written to a tmp file or just
parsed directly to the shell
tmpfile_ext : str or None, optional
If tmpfile is True this sets specifies the extension used.
runcmd : str or None, optional
Command line switches to use when running the script, such as
-c for Bash and /C for cmd.exe.
seterrprevcmd : str or None, optional
Command that enables exit-on-error for the shell that is run at the
start of the script. For example, this is "set -e" in Bash. To disable
exit-on-error behavior, simply pass in an empty string.
seterrpostcmd : str or None, optional
Command that enables exit-on-error for the shell that is run at the end
of the script. For example, this is "if errorlevel 1 exit 1" in
cmd.exe. To disable exit-on-error behavior, simply pass in an
empty string.
Returns
-------
env : dict
Dictionary of shell's environment
aliases : dict
Dictionary of shell's alaiases, this includes foreign function
wrappers.
"""
cmd = [shell]
cmd.extend(extra_args) # needs to come here for GNU long options
if interactive:
cmd.append('-i')
if login:
cmd.append('-l')
shkey = CANON_SHELL_NAMES[shell]
envcmd = DEFAULT_ENVCMDS.get(shkey, 'env') if envcmd is None else envcmd
tmpfile_ext = DEFAULT_TMPFILE_EXT.get(shkey, 'sh') if tmpfile_ext is None else tmpfile_ext
runcmd = DEFAULT_RUNCMD.get(shkey, '-c') if runcmd is None else runcmd
seterrprevcmd = DEFAULT_SETERRPREVCMD.get(shkey, '') \
if seterrprevcmd is None else seterrprevcmd
seterrpostcmd = DEFAULT_SETERRPOSTCMD.get(shkey, '') \
if seterrpostcmd is None else seterrpostcmd
command = COMMAND.format(envcmd=envcmd, prevcmd=prevcmd,
postcmd=postcmd,
seterrprevcmd=seterrprevcmd,
seterrpostcmd=seterrpostcmd).strip()
cmd.append(runcmd)
if not use_tmpfile:
cmd.append(command)
else:
tmpfile = NamedTemporaryFile(suffix=tmpfile_ext, delete=False)
tmpfile.write(command.encode('utf8'))
tmpfile.close()
cmd.append(tmpfile.name)
if currenv is not None:
currenv = os.environ
try:
s = subprocess.check_output(cmd, stderr=subprocess.PIPE, env=currenv,
# start new session to avoid hangs
start_new_session=True,
universal_newlines=True)
except (subprocess.CalledProcessError, FileNotFoundError):
if not safe:
raise
return None, None
finally:
if use_tmpfile:
pass
os.remove(tmpfile.name)
env = parse_env(s)
return env | Extracts data from a foreign (non-xonsh) shells. Currently this gets
the environment, aliases, and functions but may be extended in the future.
Parameters
----------
shell : str
The name of the shell, such as 'bash' or '/bin/sh'.
interactive : bool, optional
Whether the shell should be run in interactive mode.
login : bool, optional
Whether the shell should be a login shell.
envcmd : str or None, optional
The command to generate environment output with.
aliascmd : str or None, optional
The command to generate alias output with.
extra_args : tuple of str, optional
Addtional command line options to pass into the shell.
currenv : tuple of items or None, optional
Manual override for the current environment.
safe : bool, optional
Flag for whether or not to safely handle exceptions and other errors.
prevcmd : str, optional
A command to run in the shell before anything else, useful for
sourcing and other commands that may require environment recovery.
postcmd : str, optional
A command to run after everything else, useful for cleaning up any
damage that the prevcmd may have caused.
funcscmd : str or None, optional
This is a command or script that can be used to determine the names
and locations of any functions that are native to the foreign shell.
This command should print *only* a JSON object that maps
function names to the filenames where the functions are defined.
If this is None, then a default script will attempted to be looked
up based on the shell name. Callable wrappers for these functions
will be returned in the aliases dictionary.
sourcer : str or None, optional
How to source a foreign shell file for purposes of calling functions
in that shell. If this is None, a default value will attempt to be
looked up based on the shell name.
use_tmpfile : bool, optional
This specifies if the commands are written to a tmp file or just
parsed directly to the shell
tmpfile_ext : str or None, optional
If tmpfile is True this sets specifies the extension used.
runcmd : str or None, optional
Command line switches to use when running the script, such as
-c for Bash and /C for cmd.exe.
seterrprevcmd : str or None, optional
Command that enables exit-on-error for the shell that is run at the
start of the script. For example, this is "set -e" in Bash. To disable
exit-on-error behavior, simply pass in an empty string.
seterrpostcmd : str or None, optional
Command that enables exit-on-error for the shell that is run at the end
of the script. For example, this is "if errorlevel 1 exit 1" in
cmd.exe. To disable exit-on-error behavior, simply pass in an
empty string.
Returns
-------
env : dict
Dictionary of shell's environment
aliases : dict
Dictionary of shell's alaiases, this includes foreign function
wrappers. | entailment |
def to_bool(x):
""""Converts to a boolean in a semantically meaningful way."""
if isinstance(x, bool):
return x
elif isinstance(x, str):
return False if x.lower() in _FALSES else True
else:
return bool(x) | Converts to a boolean in a semantically meaningful way. | entailment |
def parse_env(s):
"""Parses the environment portion of string into a dict."""
m = ENV_RE.search(s)
if m is None:
return {}
g1 = m.group(1)
env = dict(ENV_SPLIT_RE.findall(g1))
return env | Parses the environment portion of string into a dict. | entailment |
def get_conda_env_data(mgr):
"""Finds kernel specs from conda environments
env_data is a structure {name -> (resourcedir, kernel spec)}
"""
if not mgr.find_conda_envs:
return {}
mgr.log.debug("Looking for conda environments in %s...", mgr.conda_env_dirs)
# find all potential env paths
env_paths = find_env_paths_in_basedirs(mgr.conda_env_dirs)
env_paths.extend(_find_conda_env_paths_from_conda(mgr))
env_paths = list(set(env_paths)) # remove duplicates
mgr.log.debug("Scanning conda environments for python kernels...")
env_data = convert_to_env_data(mgr=mgr,
env_paths=env_paths,
validator_func=validate_IPykernel,
activate_func=_get_env_vars_for_conda_env,
name_template=mgr.conda_prefix_template,
display_name_template=mgr.display_name_template,
name_prefix="") # lets keep the py kernels without a prefix...
if mgr.find_r_envs:
mgr.log.debug("Scanning conda environments for R kernels...")
env_data.update(convert_to_env_data(mgr=mgr,
env_paths=env_paths,
validator_func=validate_IRkernel,
activate_func=_get_env_vars_for_conda_env,
name_template=mgr.conda_prefix_template,
display_name_template=mgr.display_name_template,
name_prefix="r_"))
return env_data | Finds kernel specs from conda environments
env_data is a structure {name -> (resourcedir, kernel spec)} | entailment |
def _find_conda_env_paths_from_conda(mgr):
"""Returns a list of path as given by `conda env list --json`.
Returns empty list, if conda couldn't be called.
"""
# this is expensive, so make it configureable...
if not mgr.use_conda_directly:
return []
mgr.log.debug("Looking for conda environments by calling conda directly...")
import subprocess
import json
try:
p = subprocess.Popen(
['conda', 'env', 'list', '--json'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
comm = p.communicate()
output = comm[0].decode()
if p.returncode != 0 or len(output) == 0:
mgr.log.error(
"Couldn't call 'conda' to get the environments. "
"Output:\n%s", str(comm))
return []
except FileNotFoundError:
mgr.log.error("'conda' not found in path.")
return []
output = json.loads(output)
envs = output["envs"]
# self.log.info("Found the following kernels from conda: %s", ", ".join(envs))
return envs | Returns a list of path as given by `conda env list --json`.
Returns empty list, if conda couldn't be called. | entailment |
def validate_env(self, envname):
"""
Check the name of the environment against the black list and the
whitelist. If a whitelist is specified only it is checked.
"""
if self.whitelist_envs and envname in self.whitelist_envs:
return True
elif self.whitelist_envs:
return False
if self.blacklist_envs and envname not in self.blacklist_envs:
return True
elif self.blacklist_envs:
# If there is just a True, all envs are blacklisted
return False
else:
return True | Check the name of the environment against the black list and the
whitelist. If a whitelist is specified only it is checked. | entailment |
def _get_env_data(self, reload=False):
"""Get the data about the available environments.
env_data is a structure {name -> (resourcedir, kernel spec)}
"""
# This is called much too often and finding-process is really expensive :-(
if not reload and getattr(self, "_env_data_cache", {}):
return getattr(self, "_env_data_cache")
env_data = {}
for supplyer in ENV_SUPPLYER:
env_data.update(supplyer(self))
env_data = {name: env_data[name] for name in env_data if self.validate_env(name)}
new_kernels = [env for env in list(env_data.keys()) if env not in list(self._env_data_cache.keys())]
if new_kernels:
self.log.info("Found new kernels in environments: %s", ", ".join(new_kernels))
self._env_data_cache = env_data
return env_data | Get the data about the available environments.
env_data is a structure {name -> (resourcedir, kernel spec)} | entailment |
def find_kernel_specs_for_envs(self):
"""Returns a dict mapping kernel names to resource directories."""
data = self._get_env_data()
return {name: data[name][0] for name in data} | Returns a dict mapping kernel names to resource directories. | entailment |
def get_all_kernel_specs_for_envs(self):
"""Returns the dict of name -> kernel_spec for all environments"""
data = self._get_env_data()
return {name: data[name][1] for name in data} | Returns the dict of name -> kernel_spec for all environments | entailment |
def find_kernel_specs(self):
"""Returns a dict mapping kernel names to resource directories."""
# let real installed kernels overwrite envs with the same name:
# this is the same order as the get_kernel_spec way, which also prefers
# kernels from the jupyter dir over env kernels.
specs = self.find_kernel_specs_for_envs()
specs.update(super(EnvironmentKernelSpecManager,
self).find_kernel_specs())
return specs | Returns a dict mapping kernel names to resource directories. | entailment |
def get_all_specs(self):
"""Returns a dict mapping kernel names and resource directories.
"""
# This is new in 4.1 -> https://github.com/jupyter/jupyter_client/pull/93
specs = self.get_all_kernel_specs_for_envs()
specs.update(super(EnvironmentKernelSpecManager, self).get_all_specs())
return specs | Returns a dict mapping kernel names and resource directories. | entailment |
def get_kernel_spec(self, kernel_name):
"""Returns a :class:`KernelSpec` instance for the given kernel_name.
Raises :exc:`NoSuchKernel` if the given kernel name is not found.
"""
try:
return super(EnvironmentKernelSpecManager,
self).get_kernel_spec(kernel_name)
except (NoSuchKernel, FileNotFoundError):
venv_kernel_name = kernel_name.lower()
specs = self.get_all_kernel_specs_for_envs()
if venv_kernel_name in specs:
return specs[venv_kernel_name]
else:
raise NoSuchKernel(kernel_name) | Returns a :class:`KernelSpec` instance for the given kernel_name.
Raises :exc:`NoSuchKernel` if the given kernel name is not found. | entailment |
def to_latin(string_to_transliterate, lang_code='sr'):
''' Transliterate serbian cyrillic string of characters to latin string of characters.
:param string_to_transliterate: The cyrillic string to transliterate into latin characters.
:param lang_code: Indicates the cyrillic language code we are translating from. Defaults to Serbian (sr).
:return: A string of latin characters transliterated from the given cyrillic string.
'''
# First check if we support the cyrillic alphabet we want to transliterate to latin.
if lang_code.lower() not in TRANSLIT_DICT:
# If we don't support it, then just return the original string.
return string_to_transliterate
# If we do support it, check if the implementation is not missing before proceeding.
elif not TRANSLIT_DICT[lang_code.lower()]['tolatin']:
return string_to_transliterate
# Everything checks out, proceed with transliteration.
else:
# Get the character per character transliteration dictionary
transliteration_dict = TRANSLIT_DICT[lang_code.lower()]['tolatin']
# Initialize the output latin string variable
latinized_str = ''
# Transliterate by traversing the input string character by character.
string_to_transliterate = __decode_utf8(string_to_transliterate)
for c in string_to_transliterate:
# If character is in dictionary, it means it's a cyrillic so let's transliterate that character.
if c in transliteration_dict:
# Transliterate current character.
latinized_str += transliteration_dict[c]
# If character is not in character transliteration dictionary,
# it is most likely a number or a special character so just keep it.
else:
latinized_str += c
# Return the transliterated string.
return __encode_utf8(latinized_str) | Transliterate serbian cyrillic string of characters to latin string of characters.
:param string_to_transliterate: The cyrillic string to transliterate into latin characters.
:param lang_code: Indicates the cyrillic language code we are translating from. Defaults to Serbian (sr).
:return: A string of latin characters transliterated from the given cyrillic string. | entailment |
def to_cyrillic(string_to_transliterate, lang_code='sr'):
''' Transliterate serbian latin string of characters to cyrillic string of characters.
:param string_to_transliterate: The latin string to transliterate into cyrillic characters.
:param lang_code: Indicates the cyrillic language code we are translating to. Defaults to Serbian (sr).
:return: A string of cyrillic characters transliterated from the given latin string.
'''
# First check if we support the cyrillic alphabet we want to transliterate to latin.
if lang_code.lower() not in TRANSLIT_DICT:
# If we don't support it, then just return the original string.
return string_to_transliterate
# If we do support it, check if the implementation is not missing before proceeding.
elif not TRANSLIT_DICT[lang_code.lower()]['tocyrillic']:
return string_to_transliterate
else:
# Get the character per character transliteration dictionary
transliteration_dict = TRANSLIT_DICT[lang_code.lower()]['tocyrillic']
# Initialize the output cyrillic string variable
cyrillic_str = ''
string_to_transliterate = __decode_utf8(string_to_transliterate)
# Transliterate by traversing the inputted string character by character.
length_of_string_to_transliterate = len(string_to_transliterate)
index = 0
while index < length_of_string_to_transliterate:
# Grab a character from the string at the current index
c = string_to_transliterate[index]
# Watch out for Lj and lj. Don't want to interpret Lj/lj as L/l and j.
# Watch out for Nj and nj. Don't want to interpret Nj/nj as N/n and j.
# Watch out for Dž and and dž. Don't want to interpret Dž/dž as D/d and j.
c_plus_1 = u''
if index != length_of_string_to_transliterate - 1:
c_plus_1 = string_to_transliterate[index + 1]
if ((c == u'L' or c == u'l') and c_plus_1 == u'j') or \
((c == u'N' or c == u'n') and c_plus_1 == u'j') or \
((c == u'D' or c == u'd') and c_plus_1 == u'ž') or \
(lang_code == 'mk' and (c == u'D' or c == u'd') and c_plus_1 == u'z') or \
(lang_code == 'ru' and (
(c in u'Cc' and c_plus_1 in u'Hh') or # c, ch
(c in u'Ee' and c_plus_1 in u'Hh') or # eh
(c == u'i' and c_plus_1 == u'y' and
string_to_transliterate[index + 2:index + 3] not in u'aou') or # iy[^AaOoUu]
(c in u'Jj' and c_plus_1 in u'UuAaEe') or # j, ju, ja, je
(c in u'Ss' and c_plus_1 in u'HhZz') or # s, sh, sz
(c in u'Yy' and c_plus_1 in u'AaOoUu') or # y, ya, yo, yu
(c in u'Zz' and c_plus_1 in u'Hh') # z, zh
)):
index += 1
c += c_plus_1
# If character is in dictionary, it means it's a cyrillic so let's transliterate that character.
if c in transliteration_dict:
# ay, ey, iy, oy, uy
if lang_code == 'ru' and c in u'Yy' and \
cyrillic_str and cyrillic_str[-1].lower() in u"аеиоуэя":
cyrillic_str += u"й" if c == u'y' else u"Й"
else:
# Transliterate current character.
cyrillic_str += transliteration_dict[c]
# If character is not in character transliteration dictionary,
# it is most likely a number or a special character so just keep it.
else:
cyrillic_str += c
index += 1
return __encode_utf8(cyrillic_str) | Transliterate serbian latin string of characters to cyrillic string of characters.
:param string_to_transliterate: The latin string to transliterate into cyrillic characters.
:param lang_code: Indicates the cyrillic language code we are translating to. Defaults to Serbian (sr).
:return: A string of cyrillic characters transliterated from the given latin string. | entailment |
def parse(self, stream, media_type=None, parser_context=None):
"""
Parses the incoming bytestream as XML and returns the resulting data.
"""
assert etree, 'XMLParser requires defusedxml to be installed'
parser_context = parser_context or {}
encoding = parser_context.get('encoding', settings.DEFAULT_CHARSET)
parser = etree.DefusedXMLParser(encoding=encoding)
try:
tree = etree.parse(stream, parser=parser, forbid_dtd=True)
except (etree.ParseError, ValueError) as exc:
raise ParseError('XML parse error - %s' % six.text_type(exc))
data = self._xml_convert(tree.getroot())
return data | Parses the incoming bytestream as XML and returns the resulting data. | entailment |
def _xml_convert(self, element):
"""
convert the xml `element` into the corresponding python object
"""
children = list(element)
if len(children) == 0:
return self._type_convert(element.text)
else:
# if the fist child tag is list-item means all children are list-item
if children[0].tag == "list-item":
data = []
for child in children:
data.append(self._xml_convert(child))
else:
data = {}
for child in children:
data[child.tag] = self._xml_convert(child)
return data | convert the xml `element` into the corresponding python object | entailment |
def _type_convert(self, value):
"""
Converts the value returned by the XMl parse into the equivalent
Python type
"""
if value is None:
return value
try:
return datetime.datetime.strptime(value, '%Y-%m-%d %H:%M:%S')
except ValueError:
pass
try:
return int(value)
except ValueError:
pass
try:
return decimal.Decimal(value)
except decimal.InvalidOperation:
pass
return value | Converts the value returned by the XMl parse into the equivalent
Python type | entailment |
def render(self, data, accepted_media_type=None, renderer_context=None):
"""
Renders `data` into serialized XML.
"""
if data is None:
return ''
stream = StringIO()
xml = SimplerXMLGenerator(stream, self.charset)
xml.startDocument()
xml.startElement(self.root_tag_name, {})
self._to_xml(xml, data)
xml.endElement(self.root_tag_name)
xml.endDocument()
return stream.getvalue() | Renders `data` into serialized XML. | entailment |
def open(self):
"""Open a connection to the device."""
device_type = 'cisco_ios'
if self.transport == 'telnet':
device_type = 'cisco_ios_telnet'
self.device = ConnectHandler(device_type=device_type,
host=self.hostname,
username=self.username,
password=self.password,
**self.netmiko_optional_args)
# ensure in enable mode
self.device.enable() | Open a connection to the device. | entailment |
def _create_tmp_file(config):
"""Write temp file and for use with inline config and SCP."""
tmp_dir = tempfile.gettempdir()
rand_fname = py23_compat.text_type(uuid.uuid4())
filename = os.path.join(tmp_dir, rand_fname)
with open(filename, 'wt') as fobj:
fobj.write(config)
return filename | Write temp file and for use with inline config and SCP. | entailment |
def _load_candidate_wrapper(self, source_file=None, source_config=None, dest_file=None,
file_system=None):
"""
Transfer file to remote device for either merge or replace operations
Returns (return_status, msg)
"""
return_status = False
msg = ''
if source_file and source_config:
raise ValueError("Cannot simultaneously set source_file and source_config")
if source_config:
if self.inline_transfer:
(return_status, msg) = self._inline_tcl_xfer(source_config=source_config,
dest_file=dest_file,
file_system=file_system)
else:
# Use SCP
tmp_file = self._create_tmp_file(source_config)
(return_status, msg) = self._scp_file(source_file=tmp_file, dest_file=dest_file,
file_system=file_system)
if tmp_file and os.path.isfile(tmp_file):
os.remove(tmp_file)
if source_file:
if self.inline_transfer:
(return_status, msg) = self._inline_tcl_xfer(source_file=source_file,
dest_file=dest_file,
file_system=file_system)
else:
(return_status, msg) = self._scp_file(source_file=source_file, dest_file=dest_file,
file_system=file_system)
if not return_status:
if msg == '':
msg = "Transfer to remote device failed"
return (return_status, msg) | Transfer file to remote device for either merge or replace operations
Returns (return_status, msg) | entailment |
def load_replace_candidate(self, filename=None, config=None):
"""
SCP file to device filesystem, defaults to candidate_config.
Return None or raise exception
"""
self.config_replace = True
return_status, msg = self._load_candidate_wrapper(source_file=filename,
source_config=config,
dest_file=self.candidate_cfg,
file_system=self.dest_file_system)
if not return_status:
raise ReplaceConfigException(msg) | SCP file to device filesystem, defaults to candidate_config.
Return None or raise exception | entailment |
def load_merge_candidate(self, filename=None, config=None):
"""
SCP file to remote device.
Merge configuration in: copy <file> running-config
"""
self.config_replace = False
return_status, msg = self._load_candidate_wrapper(source_file=filename,
source_config=config,
dest_file=self.merge_cfg,
file_system=self.dest_file_system)
if not return_status:
raise MergeConfigException(msg) | SCP file to remote device.
Merge configuration in: copy <file> running-config | entailment |
def _commit_hostname_handler(self, cmd):
"""Special handler for hostname change on commit operation."""
current_prompt = self.device.find_prompt().strip()
terminating_char = current_prompt[-1]
pattern = r"[>#{}]\s*$".format(terminating_char)
# Look exclusively for trailing pattern that includes '#' and '>'
output = self.device.send_command_expect(cmd, expect_string=pattern)
# Reset base prompt in case hostname changed
self.device.set_base_prompt()
return output | Special handler for hostname change on commit operation. | entailment |
def commit_config(self):
"""
If replacement operation, perform 'configure replace' for the entire config.
If merge operation, perform copy <file> running-config.
"""
# Always generate a rollback config on commit
self._gen_rollback_cfg()
if self.config_replace:
# Replace operation
filename = self.candidate_cfg
cfg_file = self._gen_full_path(filename)
if not self._check_file_exists(cfg_file):
raise ReplaceConfigException("Candidate config file does not exist")
if self.auto_rollback_on_error:
cmd = 'configure replace {} force revert trigger error'.format(cfg_file)
else:
cmd = 'configure replace {} force'.format(cfg_file)
output = self._commit_hostname_handler(cmd)
if ('original configuration has been successfully restored' in output) or \
('error' in output.lower()) or \
('failed' in output.lower()):
msg = "Candidate config could not be applied\n{}".format(output)
raise ReplaceConfigException(msg)
elif '%Please turn config archive on' in output:
msg = "napalm-ios replace() requires Cisco 'archive' feature to be enabled."
raise ReplaceConfigException(msg)
else:
# Merge operation
filename = self.merge_cfg
cfg_file = self._gen_full_path(filename)
if not self._check_file_exists(cfg_file):
raise MergeConfigException("Merge source config file does not exist")
cmd = 'copy {} running-config'.format(cfg_file)
self._disable_confirm()
output = self._commit_hostname_handler(cmd)
self._enable_confirm()
if 'Invalid input detected' in output:
self.rollback()
err_header = "Configuration merge failed; automatic rollback attempted"
merge_error = "{0}:\n{1}".format(err_header, output)
raise MergeConfigException(merge_error)
# Save config to startup (both replace and merge)
output += self.device.send_command_expect("write mem") | If replacement operation, perform 'configure replace' for the entire config.
If merge operation, perform copy <file> running-config. | entailment |
def discard_config(self):
"""Set candidate_cfg to current running-config. Erase the merge_cfg file."""
discard_candidate = 'copy running-config {}'.format(self._gen_full_path(self.candidate_cfg))
discard_merge = 'copy null: {}'.format(self._gen_full_path(self.merge_cfg))
self._disable_confirm()
self.device.send_command_expect(discard_candidate)
self.device.send_command_expect(discard_merge)
self._enable_confirm() | Set candidate_cfg to current running-config. Erase the merge_cfg file. | entailment |
def _xfer_file(self, source_file=None, source_config=None, dest_file=None, file_system=None,
TransferClass=FileTransfer):
"""Transfer file to remote device.
By default, this will use Secure Copy if self.inline_transfer is set, then will use
Netmiko InlineTransfer method to transfer inline using either SSH or telnet (plus TCL
onbox).
Return (status, msg)
status = boolean
msg = details on what happened
"""
if not source_file and not source_config:
raise ValueError("File source not specified for transfer.")
if not dest_file or not file_system:
raise ValueError("Destination file or file system not specified.")
if source_file:
kwargs = dict(ssh_conn=self.device, source_file=source_file, dest_file=dest_file,
direction='put', file_system=file_system)
elif source_config:
kwargs = dict(ssh_conn=self.device, source_config=source_config, dest_file=dest_file,
direction='put', file_system=file_system)
enable_scp = True
if self.inline_transfer:
enable_scp = False
with TransferClass(**kwargs) as transfer:
# Check if file already exists and has correct MD5
if transfer.check_file_exists() and transfer.compare_md5():
msg = "File already exists and has correct MD5: no SCP needed"
return (True, msg)
if not transfer.verify_space_available():
msg = "Insufficient space available on remote device"
return (False, msg)
if enable_scp:
transfer.enable_scp()
# Transfer file
transfer.transfer_file()
# Compares MD5 between local-remote files
if transfer.verify_file():
msg = "File successfully transferred to remote device"
return (True, msg)
else:
msg = "File transfer to remote device failed"
return (False, msg)
return (False, '') | Transfer file to remote device.
By default, this will use Secure Copy if self.inline_transfer is set, then will use
Netmiko InlineTransfer method to transfer inline using either SSH or telnet (plus TCL
onbox).
Return (status, msg)
status = boolean
msg = details on what happened | entailment |
def _gen_full_path(self, filename, file_system=None):
"""Generate full file path on remote device."""
if file_system is None:
return '{}/{}'.format(self.dest_file_system, filename)
else:
if ":" not in file_system:
raise ValueError("Invalid file_system specified: {}".format(file_system))
return '{}/{}'.format(file_system, filename) | Generate full file path on remote device. | entailment |
def _gen_rollback_cfg(self):
"""Save a configuration that can be used for rollback."""
cfg_file = self._gen_full_path(self.rollback_cfg)
cmd = 'copy running-config {}'.format(cfg_file)
self._disable_confirm()
self.device.send_command_expect(cmd)
self._enable_confirm() | Save a configuration that can be used for rollback. | entailment |
def _check_file_exists(self, cfg_file):
"""
Check that the file exists on remote device using full path.
cfg_file is full path i.e. flash:/file_name
For example
# dir flash:/candidate_config.txt
Directory of flash:/candidate_config.txt
33 -rw- 5592 Dec 18 2015 10:50:22 -08:00 candidate_config.txt
return boolean
"""
cmd = 'dir {}'.format(cfg_file)
success_pattern = 'Directory of {}'.format(cfg_file)
output = self.device.send_command_expect(cmd)
if 'Error opening' in output:
return False
elif success_pattern in output:
return True
return False | Check that the file exists on remote device using full path.
cfg_file is full path i.e. flash:/file_name
For example
# dir flash:/candidate_config.txt
Directory of flash:/candidate_config.txt
33 -rw- 5592 Dec 18 2015 10:50:22 -08:00 candidate_config.txt
return boolean | entailment |
def _expand_interface_name(self, interface_brief):
"""
Obtain the full interface name from the abbreviated name.
Cache mappings in self.interface_map.
"""
if self.interface_map.get(interface_brief):
return self.interface_map.get(interface_brief)
command = 'show int {}'.format(interface_brief)
output = self._send_command(command)
first_line = output.splitlines()[0]
if 'line protocol' in first_line:
full_int_name = first_line.split()[0]
self.interface_map[interface_brief] = full_int_name
return self.interface_map.get(interface_brief)
else:
return interface_brief | Obtain the full interface name from the abbreviated name.
Cache mappings in self.interface_map. | entailment |
def get_lldp_neighbors(self):
"""IOS implementation of get_lldp_neighbors."""
lldp = {}
command = 'show lldp neighbors'
output = self._send_command(command)
# Check if router supports the command
if '% Invalid input' in output:
return {}
# Process the output to obtain just the LLDP entries
try:
split_output = re.split(r'^Device ID.*$', output, flags=re.M)[1]
split_output = re.split(r'^Total entries displayed.*$', split_output, flags=re.M)[0]
except IndexError:
return {}
split_output = split_output.strip()
for lldp_entry in split_output.splitlines():
# Example, twb-sf-hpsw1 Fa4 120 B 17
try:
device_id, local_int_brief, hold_time, capability, remote_port = lldp_entry.split()
except ValueError:
if len(lldp_entry.split()) == 4:
# Four fields might be long_name or missing capability
capability_missing = True if lldp_entry[46] == ' ' else False
if capability_missing:
device_id, local_int_brief, hold_time, remote_port = lldp_entry.split()
else:
# Might be long_name issue
tmp_field, hold_time, capability, remote_port = lldp_entry.split()
device_id = tmp_field[:20]
local_int_brief = tmp_field[20:]
# device_id might be abbreviated, try to get full name
lldp_tmp = self._lldp_detail_parser(local_int_brief)
device_id_new = lldp_tmp[3][0]
# Verify abbreviated and full name are consistent
if device_id_new[:20] == device_id:
device_id = device_id_new
else:
raise ValueError("Unable to obtain remote device name")
local_port = self._expand_interface_name(local_int_brief)
entry = {'port': remote_port, 'hostname': device_id}
lldp.setdefault(local_port, [])
lldp[local_port].append(entry)
return lldp | IOS implementation of get_lldp_neighbors. | entailment |
def get_lldp_neighbors_detail(self, interface=''):
"""
IOS implementation of get_lldp_neighbors_detail.
Calls get_lldp_neighbors.
"""
lldp = {}
lldp_neighbors = self.get_lldp_neighbors()
# Filter to specific interface
if interface:
lldp_data = lldp_neighbors.get(interface)
if lldp_data:
lldp_neighbors = {interface: lldp_data}
else:
lldp_neighbors = {}
for interface in lldp_neighbors:
local_port = interface
lldp_fields = self._lldp_detail_parser(interface)
# Convert any 'not advertised' to 'N/A'
for field in lldp_fields:
for i, value in enumerate(field):
if 'not advertised' in value:
field[i] = 'N/A'
number_entries = len(lldp_fields[0])
# re.findall will return a list. Make sure same number of entries always returned.
for test_list in lldp_fields:
if len(test_list) != number_entries:
raise ValueError("Failure processing show lldp neighbors detail")
# Standardize the fields
port_id, port_description, chassis_id, system_name, system_description, \
system_capabilities, enabled_capabilities, remote_address = lldp_fields
standardized_fields = zip(port_id, port_description, chassis_id, system_name,
system_description, system_capabilities,
enabled_capabilities, remote_address)
lldp.setdefault(local_port, [])
for entry in standardized_fields:
remote_port_id, remote_port_description, remote_chassis_id, remote_system_name, \
remote_system_description, remote_system_capab, remote_enabled_capab, \
remote_mgmt_address = entry
lldp[local_port].append({
'parent_interface': u'N/A',
'remote_port': remote_port_id,
'remote_port_description': remote_port_description,
'remote_chassis_id': remote_chassis_id,
'remote_system_name': remote_system_name,
'remote_system_description': remote_system_description,
'remote_system_capab': remote_system_capab,
'remote_system_enable_capab': remote_enabled_capab})
return lldp | IOS implementation of get_lldp_neighbors_detail.
Calls get_lldp_neighbors. | entailment |
def get_facts(self):
"""Return a set of facts from the devices."""
# default values.
vendor = u'Cisco'
uptime = -1
serial_number, fqdn, os_version, hostname, domain_name = ('Unknown',) * 5
# obtain output from device
show_ver = self._send_command('show version')
show_hosts = self._send_command('show hosts')
show_ip_int_br = self._send_command('show ip interface brief')
# uptime/serial_number/IOS version
for line in show_ver.splitlines():
if ' uptime is ' in line:
hostname, uptime_str = line.split(' uptime is ')
uptime = self.parse_uptime(uptime_str)
hostname = hostname.strip()
if 'Processor board ID' in line:
_, serial_number = line.split("Processor board ID ")
serial_number = serial_number.strip()
if re.search(r"Cisco IOS Software", line):
try:
_, os_version = line.split("Cisco IOS Software, ")
except ValueError:
# Handle 'Cisco IOS Software [Denali],'
_, os_version = re.split(r"Cisco IOS Software \[.*?\], ", line)
os_version = os_version.strip()
elif re.search(r"IOS (tm).+Software", line):
_, os_version = line.split("IOS (tm) ")
os_version = os_version.strip()
# Determine domain_name and fqdn
for line in show_hosts.splitlines():
if 'Default domain' in line:
_, domain_name = line.split("Default domain is ")
domain_name = domain_name.strip()
break
if domain_name != 'Unknown' and hostname != 'Unknown':
fqdn = u'{}.{}'.format(hostname, domain_name)
# model filter
try:
match_model = re.search(r"Cisco (.+?) .+bytes of", show_ver, flags=re.IGNORECASE)
model = match_model.group(1)
except AttributeError:
model = u'Unknown'
# interface_list filter
interface_list = []
show_ip_int_br = show_ip_int_br.strip()
for line in show_ip_int_br.splitlines():
if 'Interface ' in line:
continue
interface = line.split()[0]
interface_list.append(interface)
return {
'uptime': uptime,
'vendor': vendor,
'os_version': py23_compat.text_type(os_version),
'serial_number': py23_compat.text_type(serial_number),
'model': py23_compat.text_type(model),
'hostname': py23_compat.text_type(hostname),
'fqdn': fqdn,
'interface_list': interface_list
} | Return a set of facts from the devices. | entailment |
def get_interfaces(self):
"""
Get interface details.
last_flapped is not implemented
Example Output:
{ u'Vlan1': { 'description': u'N/A',
'is_enabled': True,
'is_up': True,
'last_flapped': -1.0,
'mac_address': u'a493.4cc1.67a7',
'speed': 100},
u'Vlan100': { 'description': u'Data Network',
'is_enabled': True,
'is_up': True,
'last_flapped': -1.0,
'mac_address': u'a493.4cc1.67a7',
'speed': 100},
u'Vlan200': { 'description': u'Voice Network',
'is_enabled': True,
'is_up': True,
'last_flapped': -1.0,
'mac_address': u'a493.4cc1.67a7',
'speed': 100}}
"""
# default values.
last_flapped = -1.0
command = 'show interfaces'
output = self._send_command(command)
interface = description = mac_address = speed = speedformat = ''
is_enabled = is_up = None
interface_dict = {}
for line in output.splitlines():
interface_regex_1 = r"^(\S+?)\s+is\s+(.+?),\s+line\s+protocol\s+is\s+(\S+)"
interface_regex_2 = r"^(\S+)\s+is\s+(up|down)"
for pattern in (interface_regex_1, interface_regex_2):
interface_match = re.search(pattern, line)
if interface_match:
interface = interface_match.group(1)
status = interface_match.group(2)
try:
protocol = interface_match.group(3)
except IndexError:
protocol = ''
if 'admin' in status.lower():
is_enabled = False
else:
is_enabled = True
if protocol:
is_up = bool('up' in protocol)
else:
is_up = bool('up' in status)
break
mac_addr_regex = r"^\s+Hardware.+address\s+is\s+({})".format(MAC_REGEX)
if re.search(mac_addr_regex, line):
mac_addr_match = re.search(mac_addr_regex, line)
mac_address = napalm_base.helpers.mac(mac_addr_match.groups()[0])
descr_regex = "^\s+Description:\s+(.+?)$"
if re.search(descr_regex, line):
descr_match = re.search(descr_regex, line)
description = descr_match.groups()[0]
speed_regex = r"^\s+MTU\s+\d+.+BW\s+(\d+)\s+([KMG]?b)"
if re.search(speed_regex, line):
speed_match = re.search(speed_regex, line)
speed = speed_match.groups()[0]
speedformat = speed_match.groups()[1]
speed = float(speed)
if speedformat.startswith('Kb'):
speed = speed / 1000.0
elif speedformat.startswith('Gb'):
speed = speed * 1000
speed = int(round(speed))
if interface == '':
raise ValueError("Interface attributes were \
found without any known interface")
if not isinstance(is_up, bool) or not isinstance(is_enabled, bool):
raise ValueError("Did not correctly find the interface status")
interface_dict[interface] = {'is_enabled': is_enabled, 'is_up': is_up,
'description': description, 'mac_address': mac_address,
'last_flapped': last_flapped, 'speed': speed}
interface = description = mac_address = speed = speedformat = ''
is_enabled = is_up = None
return interface_dict | Get interface details.
last_flapped is not implemented
Example Output:
{ u'Vlan1': { 'description': u'N/A',
'is_enabled': True,
'is_up': True,
'last_flapped': -1.0,
'mac_address': u'a493.4cc1.67a7',
'speed': 100},
u'Vlan100': { 'description': u'Data Network',
'is_enabled': True,
'is_up': True,
'last_flapped': -1.0,
'mac_address': u'a493.4cc1.67a7',
'speed': 100},
u'Vlan200': { 'description': u'Voice Network',
'is_enabled': True,
'is_up': True,
'last_flapped': -1.0,
'mac_address': u'a493.4cc1.67a7',
'speed': 100}} | entailment |
def get_interfaces_ip(self):
"""
Get interface ip details.
Returns a dict of dicts
Example Output:
{ u'FastEthernet8': { 'ipv4': { u'10.66.43.169': { 'prefix_length': 22}}},
u'Loopback555': { 'ipv4': { u'192.168.1.1': { 'prefix_length': 24}},
'ipv6': { u'1::1': { 'prefix_length': 64},
u'2001:DB8:1::1': { 'prefix_length': 64},
u'2::': { 'prefix_length': 64},
u'FE80::3': { 'prefix_length': 10}}},
u'Tunnel0': { 'ipv4': { u'10.63.100.9': { 'prefix_length': 24}}},
u'Tunnel1': { 'ipv4': { u'10.63.101.9': { 'prefix_length': 24}}},
u'Vlan100': { 'ipv4': { u'10.40.0.1': { 'prefix_length': 24},
u'10.41.0.1': { 'prefix_length': 24},
u'10.65.0.1': { 'prefix_length': 24}}},
u'Vlan200': { 'ipv4': { u'10.63.176.57': { 'prefix_length': 29}}}}
"""
interfaces = {}
command = 'show ip interface'
show_ip_interface = self._send_command(command)
command = 'show ipv6 interface'
show_ipv6_interface = self._send_command(command)
INTERNET_ADDRESS = r'\s+(?:Internet address is|Secondary address)'
INTERNET_ADDRESS += r' (?P<ip>{})/(?P<prefix>\d+)'.format(IPV4_ADDR_REGEX)
LINK_LOCAL_ADDRESS = r'\s+IPv6 is enabled, link-local address is (?P<ip>[a-fA-F0-9:]+)'
GLOBAL_ADDRESS = r'\s+(?P<ip>[a-fA-F0-9:]+), subnet is (?:[a-fA-F0-9:]+)/(?P<prefix>\d+)'
interfaces = {}
for line in show_ip_interface.splitlines():
if(len(line.strip()) == 0):
continue
if(line[0] != ' '):
ipv4 = {}
interface_name = line.split()[0]
m = re.match(INTERNET_ADDRESS, line)
if m:
ip, prefix = m.groups()
ipv4.update({ip: {"prefix_length": int(prefix)}})
interfaces[interface_name] = {'ipv4': ipv4}
for line in show_ipv6_interface.splitlines():
if(len(line.strip()) == 0):
continue
if(line[0] != ' '):
ifname = line.split()[0]
ipv6 = {}
if ifname not in interfaces:
interfaces[ifname] = {'ipv6': ipv6}
else:
interfaces[ifname].update({'ipv6': ipv6})
m = re.match(LINK_LOCAL_ADDRESS, line)
if m:
ip = m.group(1)
ipv6.update({ip: {"prefix_length": 10}})
m = re.match(GLOBAL_ADDRESS, line)
if m:
ip, prefix = m.groups()
ipv6.update({ip: {"prefix_length": int(prefix)}})
# Interface without ipv6 doesn't appears in show ipv6 interface
return interfaces | Get interface ip details.
Returns a dict of dicts
Example Output:
{ u'FastEthernet8': { 'ipv4': { u'10.66.43.169': { 'prefix_length': 22}}},
u'Loopback555': { 'ipv4': { u'192.168.1.1': { 'prefix_length': 24}},
'ipv6': { u'1::1': { 'prefix_length': 64},
u'2001:DB8:1::1': { 'prefix_length': 64},
u'2::': { 'prefix_length': 64},
u'FE80::3': { 'prefix_length': 10}}},
u'Tunnel0': { 'ipv4': { u'10.63.100.9': { 'prefix_length': 24}}},
u'Tunnel1': { 'ipv4': { u'10.63.101.9': { 'prefix_length': 24}}},
u'Vlan100': { 'ipv4': { u'10.40.0.1': { 'prefix_length': 24},
u'10.41.0.1': { 'prefix_length': 24},
u'10.65.0.1': { 'prefix_length': 24}}},
u'Vlan200': { 'ipv4': { u'10.63.176.57': { 'prefix_length': 29}}}} | entailment |
def bgp_time_conversion(bgp_uptime):
"""
Convert string time to seconds.
Examples
00:14:23
00:13:40
00:00:21
00:00:13
00:00:49
1d11h
1d17h
1w0d
8w5d
1y28w
never
"""
bgp_uptime = bgp_uptime.strip()
uptime_letters = set(['w', 'h', 'd'])
if 'never' in bgp_uptime:
return -1
elif ':' in bgp_uptime:
times = bgp_uptime.split(":")
times = [int(x) for x in times]
hours, minutes, seconds = times
return (hours * 3600) + (minutes * 60) + seconds
# Check if any letters 'w', 'h', 'd' are in the time string
elif uptime_letters & set(bgp_uptime):
form1 = r'(\d+)d(\d+)h' # 1d17h
form2 = r'(\d+)w(\d+)d' # 8w5d
form3 = r'(\d+)y(\d+)w' # 1y28w
match = re.search(form1, bgp_uptime)
if match:
days = int(match.group(1))
hours = int(match.group(2))
return (days * DAY_SECONDS) + (hours * 3600)
match = re.search(form2, bgp_uptime)
if match:
weeks = int(match.group(1))
days = int(match.group(2))
return (weeks * WEEK_SECONDS) + (days * DAY_SECONDS)
match = re.search(form3, bgp_uptime)
if match:
years = int(match.group(1))
weeks = int(match.group(2))
return (years * YEAR_SECONDS) + (weeks * WEEK_SECONDS)
raise ValueError("Unexpected value for BGP uptime string: {}".format(bgp_uptime)) | Convert string time to seconds.
Examples
00:14:23
00:13:40
00:00:21
00:00:13
00:00:49
1d11h
1d17h
1w0d
8w5d
1y28w
never | entailment |
def get_bgp_neighbors(self):
"""BGP neighbor information.
Currently no VRF support. Supports both IPv4 and IPv6.
"""
supported_afi = ['ipv4', 'ipv6']
bgp_neighbor_data = dict()
bgp_neighbor_data['global'] = {}
# get summary output from device
cmd_bgp_all_sum = 'show bgp all summary'
summary_output = self._send_command(cmd_bgp_all_sum).strip()
# get neighbor output from device
neighbor_output = ''
for afi in supported_afi:
cmd_bgp_neighbor = 'show bgp %s unicast neighbors' % afi
neighbor_output += self._send_command(cmd_bgp_neighbor).strip()
# trailing newline required for parsing
neighbor_output += "\n"
# Regular expressions used for parsing BGP summary
parse_summary = {
'patterns': [
# For address family: IPv4 Unicast
{'regexp': re.compile(r'^For address family: (?P<afi>\S+) '),
'record': False},
# Capture router_id and local_as values, e.g.:
# BGP router identifier 10.0.1.1, local AS number 65000
{'regexp': re.compile(r'^.* router identifier (?P<router_id>{}), '
r'local AS number (?P<local_as>{})'.format(
IPV4_ADDR_REGEX, ASN_REGEX
)),
'record': False},
# Match neighbor summary row, capturing useful details and
# discarding the 5 columns that we don't care about, e.g.:
# Neighbor V AS MsgRcvd MsgSent TblVer InQ OutQ Up/Down State/PfxRcd
# 10.0.0.2 4 65000 1336020 64337701 1011343614 0 0 8w0d 3143
{'regexp': re.compile(r'^\*?(?P<remote_addr>({})|({}))'
r'\s+\d+\s+(?P<remote_as>{})(\s+\S+){{5}}\s+'
r'(?P<uptime>(never)|\d+\S+)'
r'\s+(?P<accepted_prefixes>\d+)'.format(
IPV4_ADDR_REGEX, IPV6_ADDR_REGEX, ASN_REGEX
)),
'record': True},
# Same as above, but for peer that are not Established, e.g.:
# Neighbor V AS MsgRcvd MsgSent TblVer InQ OutQ Up/Down State/PfxRcd
# 192.168.0.2 4 65002 0 0 1 0 0 never Active
{'regexp': re.compile(r'^\*?(?P<remote_addr>({})|({}))'
r'\s+\d+\s+(?P<remote_as>{})(\s+\S+){{5}}\s+'
r'(?P<uptime>(never)|\d+\S+)\s+(?P<state>\D.*)'.format(
IPV4_ADDR_REGEX, IPV6_ADDR_REGEX, ASN_REGEX
)),
'record': True},
# ipv6 peers often break accross rows because of the longer peer address,
# match as above, but in separate expressions, e.g.:
# Neighbor V AS MsgRcvd MsgSent TblVer InQ OutQ Up/Down State/PfxRcd
# 2001:DB8::4
# 4 65004 9900690 612449 155362939 0 0 26w6d 36391
{'regexp': re.compile(r'^\*?(?P<remote_addr>({})|({}))'.format(
IPV4_ADDR_REGEX, IPV6_ADDR_REGEX
)),
'record': False},
{'regexp': re.compile(r'^\s+\d+\s+(?P<remote_as>{})(\s+\S+){{5}}\s+'
r'(?P<uptime>(never)|\d+\S+)'
r'\s+(?P<accepted_prefixes>\d+)'.format(
ASN_REGEX
)),
'record': True},
# Same as above, but for peers that are not Established, e.g.:
# Neighbor V AS MsgRcvd MsgSent TblVer InQ OutQ Up/Down State/PfxRcd
# 2001:DB8::3
# 4 65003 0 0 1 0 0 never Idle (Admin)
{'regexp': re.compile(r'^\s+\d+\s+(?P<remote_as>{})(\s+\S+){{5}}\s+'
r'(?P<uptime>(never)|\d+\S+)\s+(?P<state>\D.*)'.format(
ASN_REGEX
)),
'record': True}
],
'no_fill_fields': ['accepted_prefixes', 'state', 'uptime', 'remote_as', 'remote_addr']
}
parse_neighbors = {
'patterns': [
# Capture BGP neighbor is 10.0.0.2, remote AS 65000, internal link
{'regexp': re.compile(r'^BGP neighbor is (?P<remote_addr>({})|({})),'
r'\s+remote AS (?P<remote_as>{}).*'.format(
IPV4_ADDR_REGEX, IPV6_ADDR_REGEX, ASN_REGEX
)),
'record': False},
# Capture description
{'regexp': re.compile(r'^\s+Description: (?P<description>.+)'),
'record': False},
# Capture remote_id, e.g.:
# BGP version 4, remote router ID 10.0.1.2
{'regexp': re.compile(r'^\s+BGP version \d+, remote router ID '
r'(?P<remote_id>{})'.format(IPV4_ADDR_REGEX)),
'record': False},
# Capture AFI and SAFI names, e.g.:
# For address family: IPv4 Unicast
{'regexp': re.compile(r'^\s+For address family: (?P<afi>\S+) '),
'record': False},
# Capture current sent and accepted prefixes, e.g.:
# Prefixes Current: 637213 3142 (Consumes 377040 bytes)
{'regexp': re.compile(r'^\s+Prefixes Current:\s+(?P<sent_prefixes>\d+)\s+'
r'(?P<accepted_prefixes>\d+).*'),
'record': False},
# Capture received_prefixes if soft-reconfig is enabled for the peer
{'regexp': re.compile(r'^\s+Saved (soft-reconfig):.+(?P<received_prefixes>\d+).*'),
'record': True},
# Otherwise, use the following as an end of row marker
{'regexp': re.compile(r'^\s+Local Policy Denied Prefixes:.+'),
'record': True}
],
# fields that should not be "filled down" across table rows
'no_fill_fields': ['received_prefixes', 'accepted_prefixes', 'sent_prefixes']
}
# Parse outputs into a list of dicts
summary_data = []
summary_data_entry = {}
for line in summary_output.splitlines():
# check for matches against each pattern
for item in parse_summary['patterns']:
match = item['regexp'].match(line)
if match:
# a match was found, so update the temp entry with the match's groupdict
summary_data_entry.update(match.groupdict())
if item['record']:
# Record indicates the last piece of data has been obtained; move
# on to next entry
summary_data.append(copy.deepcopy(summary_data_entry))
# remove keys that are listed in no_fill_fields before the next pass
for field in parse_summary['no_fill_fields']:
try:
del summary_data_entry[field]
except KeyError:
pass
break
neighbor_data = []
neighbor_data_entry = {}
for line in neighbor_output.splitlines():
# check for matches against each pattern
for item in parse_neighbors['patterns']:
match = item['regexp'].match(line)
if match:
# a match was found, so update the temp entry with the match's groupdict
neighbor_data_entry.update(match.groupdict())
if item['record']:
# Record indicates the last piece of data has been obtained; move
# on to next entry
neighbor_data.append(copy.deepcopy(neighbor_data_entry))
# remove keys that are listed in no_fill_fields before the next pass
for field in parse_neighbors['no_fill_fields']:
try:
del neighbor_data_entry[field]
except KeyError:
pass
break
router_id = None
for entry in summary_data:
if not router_id:
router_id = entry['router_id']
elif entry['router_id'] != router_id:
raise ValueError
# check the router_id looks like an ipv4 address
router_id = napalm_base.helpers.ip(router_id, version=4)
# add parsed data to output dict
bgp_neighbor_data['global']['router_id'] = router_id
bgp_neighbor_data['global']['peers'] = {}
for entry in summary_data:
remote_addr = napalm_base.helpers.ip(entry['remote_addr'])
afi = entry['afi'].lower()
# check that we're looking at a supported afi
if afi not in supported_afi:
continue
# get neighbor_entry out of neighbor data
neighbor_entry = None
for neighbor in neighbor_data:
if (neighbor['afi'].lower() == afi and
napalm_base.helpers.ip(neighbor['remote_addr']) == remote_addr):
neighbor_entry = neighbor
break
if not isinstance(neighbor_entry, dict):
raise ValueError(msg="Couldn't find neighbor data for %s in afi %s" %
(remote_addr, afi))
# check for admin down state
try:
if "(Admin)" in entry['state']:
is_enabled = False
else:
is_enabled = True
except KeyError:
is_enabled = True
# parse uptime value
uptime = self.bgp_time_conversion(entry['uptime'])
# Uptime should be -1 if BGP session not up
is_up = True if uptime >= 0 else False
# check whether session is up for address family and get prefix count
try:
accepted_prefixes = int(entry['accepted_prefixes'])
except (ValueError, KeyError):
accepted_prefixes = -1
# Only parse neighbor detailed data if BGP session is-up
if is_up:
try:
# overide accepted_prefixes with neighbor data if possible (since that's newer)
accepted_prefixes = int(neighbor_entry['accepted_prefixes'])
except (ValueError, KeyError):
pass
# try to get received prefix count, otherwise set to accepted_prefixes
received_prefixes = neighbor_entry.get('received_prefixes', accepted_prefixes)
# try to get sent prefix count and convert to int, otherwise set to -1
sent_prefixes = int(neighbor_entry.get('sent_prefixes', -1))
else:
received_prefixes = -1
sent_prefixes = -1
# get description
try:
description = py23_compat.text_type(neighbor_entry['description'])
except KeyError:
description = ''
# check the remote router_id looks like an ipv4 address
remote_id = napalm_base.helpers.ip(neighbor_entry['remote_id'], version=4)
if remote_addr not in bgp_neighbor_data['global']['peers']:
bgp_neighbor_data['global']['peers'][remote_addr] = {
'local_as': napalm_base.helpers.as_number(entry['local_as']),
'remote_as': napalm_base.helpers.as_number(entry['remote_as']),
'remote_id': remote_id,
'is_up': is_up,
'is_enabled': is_enabled,
'description': description,
'uptime': uptime,
'address_family': {
afi: {
'received_prefixes': received_prefixes,
'accepted_prefixes': accepted_prefixes,
'sent_prefixes': sent_prefixes
}
}
}
else:
# found previous data for matching remote_addr, but for different afi
existing = bgp_neighbor_data['global']['peers'][remote_addr]
assert afi not in existing['address_family']
# compare with existing values and croak if they don't match
assert existing['local_as'] == napalm_base.helpers.as_number(entry['local_as'])
assert existing['remote_as'] == napalm_base.helpers.as_number(entry['remote_as'])
assert existing['remote_id'] == remote_id
assert existing['is_enabled'] == is_enabled
assert existing['description'] == description
# merge other values in a sane manner
existing['is_up'] = existing['is_up'] or is_up
existing['uptime'] = max(existing['uptime'], uptime)
existing['address_family'][afi] = {
'received_prefixes': received_prefixes,
'accepted_prefixes': accepted_prefixes,
'sent_prefixes': sent_prefixes
}
return bgp_neighbor_data | BGP neighbor information.
Currently no VRF support. Supports both IPv4 and IPv6. | entailment |
def get_environment(self):
"""
Get environment facts.
power and fan are currently not implemented
cpu is using 1-minute average
cpu hard-coded to cpu0 (i.e. only a single CPU)
"""
environment = {}
cpu_cmd = 'show proc cpu'
mem_cmd = 'show memory statistics'
temp_cmd = 'show env temperature status'
output = self._send_command(cpu_cmd)
environment.setdefault('cpu', {})
environment['cpu'][0] = {}
environment['cpu'][0]['%usage'] = 0.0
for line in output.splitlines():
if 'CPU utilization' in line:
# CPU utilization for five seconds: 2%/0%; one minute: 2%; five minutes: 1%
cpu_regex = r'^.*one minute: (\d+)%; five.*$'
match = re.search(cpu_regex, line)
environment['cpu'][0]['%usage'] = float(match.group(1))
break
output = self._send_command(mem_cmd)
for line in output.splitlines():
if 'Processor' in line:
_, _, _, proc_used_mem, proc_free_mem = line.split()[:5]
elif 'I/O' in line or 'io' in line:
_, _, _, io_used_mem, io_free_mem = line.split()[:5]
used_mem = int(proc_used_mem) + int(io_used_mem)
free_mem = int(proc_free_mem) + int(io_free_mem)
environment.setdefault('memory', {})
environment['memory']['used_ram'] = used_mem
environment['memory']['available_ram'] = free_mem
environment.setdefault('temperature', {})
# The 'show env temperature status' is not ubiquitous in Cisco IOS
output = self._send_command(temp_cmd)
if '% Invalid' not in output:
for line in output.splitlines():
if 'System Temperature Value' in line:
system_temp = float(line.split(':')[1].split()[0])
elif 'Yellow Threshold' in line:
system_temp_alert = float(line.split(':')[1].split()[0])
elif 'Red Threshold' in line:
system_temp_crit = float(line.split(':')[1].split()[0])
env_value = {'is_alert': system_temp >= system_temp_alert,
'is_critical': system_temp >= system_temp_crit, 'temperature': system_temp}
environment['temperature']['system'] = env_value
else:
env_value = {'is_alert': False, 'is_critical': False, 'temperature': -1.0}
environment['temperature']['invalid'] = env_value
# Initialize 'power' and 'fan' to default values (not implemented)
environment.setdefault('power', {})
environment['power']['invalid'] = {'status': True, 'output': -1.0, 'capacity': -1.0}
environment.setdefault('fans', {})
environment['fans']['invalid'] = {'status': True}
return environment | Get environment facts.
power and fan are currently not implemented
cpu is using 1-minute average
cpu hard-coded to cpu0 (i.e. only a single CPU) | entailment |
def get_arp_table(self):
"""
Get arp table information.
Return a list of dictionaries having the following set of keys:
* interface (string)
* mac (string)
* ip (string)
* age (float)
For example::
[
{
'interface' : 'MgmtEth0/RSP0/CPU0/0',
'mac' : '5c:5e:ab:da:3c:f0',
'ip' : '172.17.17.1',
'age' : 1454496274.84
},
{
'interface': 'MgmtEth0/RSP0/CPU0/0',
'mac' : '66:0e:94:96:e0:ff',
'ip' : '172.17.17.2',
'age' : 1435641582.49
}
]
"""
arp_table = []
command = 'show arp | exclude Incomplete'
output = self._send_command(command)
# Skip the first line which is a header
output = output.split('\n')
output = output[1:]
for line in output:
if len(line) == 0:
return {}
if len(line.split()) == 5:
# Static ARP entries have no interface
# Internet 10.0.0.1 - 0010.2345.1cda ARPA
interface = ''
protocol, address, age, mac, eth_type = line.split()
elif len(line.split()) == 6:
protocol, address, age, mac, eth_type, interface = line.split()
else:
raise ValueError("Unexpected output from: {}".format(line.split()))
try:
if age == '-':
age = 0
age = float(age)
except ValueError:
raise ValueError("Unable to convert age value to float: {}".format(age))
# Validate we matched correctly
if not re.search(RE_IPADDR, address):
raise ValueError("Invalid IP Address detected: {}".format(address))
if not re.search(RE_MAC, mac):
raise ValueError("Invalid MAC Address detected: {}".format(mac))
entry = {
'interface': interface,
'mac': napalm_base.helpers.mac(mac),
'ip': address,
'age': age
}
arp_table.append(entry)
return arp_table | Get arp table information.
Return a list of dictionaries having the following set of keys:
* interface (string)
* mac (string)
* ip (string)
* age (float)
For example::
[
{
'interface' : 'MgmtEth0/RSP0/CPU0/0',
'mac' : '5c:5e:ab:da:3c:f0',
'ip' : '172.17.17.1',
'age' : 1454496274.84
},
{
'interface': 'MgmtEth0/RSP0/CPU0/0',
'mac' : '66:0e:94:96:e0:ff',
'ip' : '172.17.17.2',
'age' : 1435641582.49
}
] | entailment |
def cli(self, commands):
"""
Execute a list of commands and return the output in a dictionary format using the command
as the key.
Example input:
['show clock', 'show calendar']
Output example:
{ 'show calendar': u'22:02:01 UTC Thu Feb 18 2016',
'show clock': u'*22:01:51.165 UTC Thu Feb 18 2016'}
"""
cli_output = dict()
if type(commands) is not list:
raise TypeError('Please enter a valid list of commands!')
for command in commands:
output = self._send_command(command)
if 'Invalid input detected' in output:
raise ValueError('Unable to execute command "{}"'.format(command))
cli_output.setdefault(command, {})
cli_output[command] = output
return cli_output | Execute a list of commands and return the output in a dictionary format using the command
as the key.
Example input:
['show clock', 'show calendar']
Output example:
{ 'show calendar': u'22:02:01 UTC Thu Feb 18 2016',
'show clock': u'*22:01:51.165 UTC Thu Feb 18 2016'} | entailment |
def get_mac_address_table(self):
"""
Returns a lists of dictionaries. Each dictionary represents an entry in the MAC Address
Table, having the following keys
* mac (string)
* interface (string)
* vlan (int)
* active (boolean)
* static (boolean)
* moves (int)
* last_move (float)
Format1:
Destination Address Address Type VLAN Destination Port
------------------- ------------ ---- --------------------
6400.f1cf.2cc6 Dynamic 1 Wlan-GigabitEthernet0
Cat 6500:
Legend: * - primary entry
age - seconds since last seen
n/a - not available
vlan mac address type learn age ports
------+----------------+--------+-----+----------+--------------------------
* 999 1111.2222.3333 dynamic Yes 0 Port-channel1
999 1111.2222.3333 dynamic Yes 0 Port-channel1
Cat 4948
Unicast Entries
vlan mac address type protocols port
-------+---------------+--------+---------------------+--------------------
999 1111.2222.3333 dynamic ip Port-channel1
Cat 2960
Mac Address Table
-------------------------------------------
Vlan Mac Address Type Ports
---- ----------- -------- -----
All 1111.2222.3333 STATIC CPU
"""
RE_MACTABLE_DEFAULT = r"^" + MAC_REGEX
RE_MACTABLE_6500_1 = r"^\*\s+{}\s+{}\s+".format(VLAN_REGEX, MAC_REGEX) # 7 fields
RE_MACTABLE_6500_2 = r"^{}\s+{}\s+".format(VLAN_REGEX, MAC_REGEX) # 6 fields
RE_MACTABLE_6500_3 = r"^\s{51}\S+" # Fill down from prior
RE_MACTABLE_4500_1 = r"^{}\s+{}\s+".format(VLAN_REGEX, MAC_REGEX) # 5 fields
RE_MACTABLE_4500_2 = r"^\s{32}\S+" # Fill down from prior
RE_MACTABLE_2960_1 = r"^All\s+{}".format(MAC_REGEX)
RE_MACTABLE_GEN_1 = r"^{}\s+{}\s+".format(VLAN_REGEX, MAC_REGEX) # 4 fields (2960/4500)
def process_mac_fields(vlan, mac, mac_type, interface):
"""Return proper data for mac address fields."""
if mac_type.lower() in ['self', 'static', 'system']:
static = True
if vlan.lower() == 'all':
vlan = 0
if interface.lower() == 'cpu' or re.search(r'router', interface.lower()) or \
re.search(r'switch', interface.lower()):
interface = ''
else:
static = False
if mac_type.lower() in ['dynamic']:
active = True
else:
active = False
return {
'mac': napalm_base.helpers.mac(mac),
'interface': interface,
'vlan': int(vlan),
'static': static,
'active': active,
'moves': -1,
'last_move': -1.0
}
mac_address_table = []
command = IOS_COMMANDS['show_mac_address']
output = self._send_command(command)
# Skip the header lines
output = re.split(r'^----.*', output, flags=re.M)[1:]
output = "\n".join(output).strip()
# Strip any leading astericks
output = re.sub(r"^\*", "", output, flags=re.M)
fill_down_vlan = fill_down_mac = fill_down_mac_type = ''
for line in output.splitlines():
# Cat6500 one off anf 4500 multicast format
if (re.search(RE_MACTABLE_6500_3, line) or re.search(RE_MACTABLE_4500_2, line)):
interface = line.strip()
if ',' in interface:
interfaces = interface.split(',')
else:
interfaces = []
interfaces.append(interface)
for single_interface in interfaces:
mac_address_table.append(process_mac_fields(fill_down_vlan, fill_down_mac,
fill_down_mac_type,
single_interface))
continue
line = line.strip()
if line == '':
continue
if re.search(r"^---", line):
# Convert any '---' to VLAN 0
line = re.sub(r"^---", "0", line, flags=re.M)
# Format1
if re.search(RE_MACTABLE_DEFAULT, line):
if len(line.split()) == 4:
mac, mac_type, vlan, interface = line.split()
mac_address_table.append(process_mac_fields(vlan, mac, mac_type, interface))
else:
raise ValueError("Unexpected output from: {}".format(line.split()))
# Cat6500 format
elif (re.search(RE_MACTABLE_6500_1, line) or re.search(RE_MACTABLE_6500_2, line)) and \
len(line.split()) >= 6:
if len(line.split()) == 7:
_, vlan, mac, mac_type, _, _, interface = line.split()
elif len(line.split()) == 6:
vlan, mac, mac_type, _, _, interface = line.split()
if ',' in interface:
interfaces = interface.split(',')
fill_down_vlan = vlan
fill_down_mac = mac
fill_down_mac_type = mac_type
for single_interface in interfaces:
mac_address_table.append(process_mac_fields(vlan, mac, mac_type,
single_interface))
else:
mac_address_table.append(process_mac_fields(vlan, mac, mac_type, interface))
# Cat4500 format
elif re.search(RE_MACTABLE_4500_1, line) and len(line.split()) == 5:
vlan, mac, mac_type, _, interface = line.split()
mac_address_table.append(process_mac_fields(vlan, mac, mac_type, interface))
# Cat2960 format - ignore extra header line
elif re.search(r"^Vlan\s+Mac Address\s+", line):
continue
# Cat2960 format (Cat4500 format multicast entries)
elif (re.search(RE_MACTABLE_2960_1, line) or re.search(RE_MACTABLE_GEN_1, line)) and \
len(line.split()) == 4:
vlan, mac, mac_type, interface = line.split()
if ',' in interface:
interfaces = interface.split(',')
fill_down_vlan = vlan
fill_down_mac = mac
fill_down_mac_type = mac_type
for single_interface in interfaces:
mac_address_table.append(process_mac_fields(vlan, mac, mac_type,
single_interface))
else:
mac_address_table.append(process_mac_fields(vlan, mac, mac_type, interface))
elif re.search(r"Total Mac Addresses", line):
continue
elif re.search(r"Multicast Entries", line):
continue
elif re.search(r"vlan.*mac.*address.*type.*", line):
continue
else:
raise ValueError("Unexpected output from: {}".format(repr(line)))
return mac_address_table | Returns a lists of dictionaries. Each dictionary represents an entry in the MAC Address
Table, having the following keys
* mac (string)
* interface (string)
* vlan (int)
* active (boolean)
* static (boolean)
* moves (int)
* last_move (float)
Format1:
Destination Address Address Type VLAN Destination Port
------------------- ------------ ---- --------------------
6400.f1cf.2cc6 Dynamic 1 Wlan-GigabitEthernet0
Cat 6500:
Legend: * - primary entry
age - seconds since last seen
n/a - not available
vlan mac address type learn age ports
------+----------------+--------+-----+----------+--------------------------
* 999 1111.2222.3333 dynamic Yes 0 Port-channel1
999 1111.2222.3333 dynamic Yes 0 Port-channel1
Cat 4948
Unicast Entries
vlan mac address type protocols port
-------+---------------+--------+---------------------+--------------------
999 1111.2222.3333 dynamic ip Port-channel1
Cat 2960
Mac Address Table
-------------------------------------------
Vlan Mac Address Type Ports
---- ----------- -------- -----
All 1111.2222.3333 STATIC CPU | entailment |
def traceroute(self, destination, source=C.TRACEROUTE_SOURCE,
ttl=C.TRACEROUTE_TTL, timeout=C.TRACEROUTE_TIMEOUT, vrf=C.TRACEROUTE_VRF):
"""
Executes traceroute on the device and returns a dictionary with the result.
:param destination: Host or IP Address of the destination
:param source (optional): Use a specific IP Address to execute the traceroute
:param ttl (optional): Maimum number of hops -> int (0-255)
:param timeout (optional): Number of seconds to wait for response -> int (1-3600)
Output dictionary has one of the following keys:
* success
* error
In case of success, the keys of the dictionary represent the hop ID, while values are
dictionaries containing the probes results:
* rtt (float)
* ip_address (str)
* host_name (str)
"""
# vrf needs to be right after the traceroute command
if vrf:
command = "traceroute vrf {} {}".format(vrf, destination)
else:
command = "traceroute {}".format(destination)
if source:
command += " source {}".format(source)
if ttl:
if isinstance(ttl, int) and 0 <= timeout <= 255:
command += " ttl 0 {}".format(str(ttl))
if timeout:
# Timeout should be an integer between 1 and 3600
if isinstance(timeout, int) and 1 <= timeout <= 3600:
command += " timeout {}".format(str(timeout))
# Calculation to leave enough time for traceroute to complete assumes send_command
# delay of .2 seconds.
max_loops = (5 * ttl * timeout) + 150
if max_loops < 500: # Make sure max_loops isn't set artificially low
max_loops = 500
output = self.device.send_command(command, max_loops=max_loops)
# Prepare return dict
traceroute_dict = dict()
if re.search('Unrecognized host or address', output):
traceroute_dict['error'] = 'unknown host %s' % destination
return traceroute_dict
else:
traceroute_dict['success'] = dict()
results = dict()
# Find all hops
hops = re.findall('\\n\s+[0-9]{1,3}\s', output)
for hop in hops:
# Search for hop in the output
hop_match = re.search(hop, output)
# Find the start index for hop
start_index = hop_match.start()
# If this is last hop
if hops.index(hop) + 1 == len(hops):
# Set the stop index for hop to len of output
stop_index = len(output)
# else, find the start index for next hop
else:
next_hop_match = re.search(hops[hops.index(hop) + 1], output)
stop_index = next_hop_match.start()
# Now you have the start and stop index for each hop
# and you can parse the probes
# Set the hop_variable, and remove spaces between msec for easier matching
hop_string = output[start_index:stop_index].replace(' msec', 'msec')
hop_list = hop_string.split()
current_hop = int(hop_list.pop(0))
# Prepare dictionary for each hop (assuming there are 3 probes in each hop)
results[current_hop] = dict()
results[current_hop]['probes'] = dict()
results[current_hop]['probes'][1] = {'rtt': float(),
'ip_address': '',
'host_name': ''}
results[current_hop]['probes'][2] = {'rtt': float(),
'ip_address': '',
'host_name': ''}
results[current_hop]['probes'][3] = {'rtt': float(),
'ip_address': '',
'host_name': ''}
current_probe = 1
ip_address = ''
host_name = ''
while hop_list:
current_element = hop_list.pop(0)
# If current_element is * move index in dictionary to next probe
if current_element == '*':
current_probe += 1
# If current_element contains msec record the entry for probe
elif 'msec' in current_element:
ip_address = py23_compat.text_type(ip_address)
host_name = py23_compat.text_type(host_name)
rtt = float(current_element.replace('msec', ''))
results[current_hop]['probes'][current_probe]['ip_address'] = ip_address
results[current_hop]['probes'][current_probe]['host_name'] = host_name
results[current_hop]['probes'][current_probe]['rtt'] = rtt
# After recording the entry move the index to next probe
current_probe += 1
# If element contains '(' and ')', the output format is 'FQDN (IP_ADDRESS)'
# Save the IP address
elif '(' in current_element:
ip_address = current_element.replace('(', '').replace(')', '')
# Save the probe's ip_address and host_name
else:
host_name = current_element
ip_address = current_element
traceroute_dict['success'] = results
return traceroute_dict | Executes traceroute on the device and returns a dictionary with the result.
:param destination: Host or IP Address of the destination
:param source (optional): Use a specific IP Address to execute the traceroute
:param ttl (optional): Maimum number of hops -> int (0-255)
:param timeout (optional): Number of seconds to wait for response -> int (1-3600)
Output dictionary has one of the following keys:
* success
* error
In case of success, the keys of the dictionary represent the hop ID, while values are
dictionaries containing the probes results:
* rtt (float)
* ip_address (str)
* host_name (str) | entailment |
def get_config(self, retrieve='all'):
"""Implementation of get_config for IOS.
Returns the startup or/and running configuration as dictionary.
The keys of the dictionary represent the type of configuration
(startup or running). The candidate is always empty string,
since IOS does not support candidate configuration.
"""
configs = {
'startup': '',
'running': '',
'candidate': '',
}
if retrieve in ('startup', 'all'):
command = 'show startup-config'
output = self._send_command(command)
configs['startup'] = output
if retrieve in ('running', 'all'):
command = 'show running-config'
output = self._send_command(command)
configs['running'] = output
return configs | Implementation of get_config for IOS.
Returns the startup or/and running configuration as dictionary.
The keys of the dictionary represent the type of configuration
(startup or running). The candidate is always empty string,
since IOS does not support candidate configuration. | entailment |
def set_range(self, value):
"""Set the range of the accelerometer to the provided value. Range value
should be one of these constants:
- ADXL345_RANGE_2_G = +/-2G
- ADXL345_RANGE_4_G = +/-4G
- ADXL345_RANGE_8_G = +/-8G
- ADXL345_RANGE_16_G = +/-16G
"""
# Read the data format register to preserve bits. Update the data
# rate, make sure that the FULL-RES bit is enabled for range scaling
format_reg = self._device.readU8(ADXL345_REG_DATA_FORMAT) & ~0x0F
format_reg |= value
format_reg |= 0x08 # FULL-RES bit enabled
# Write the updated format register.
self._device.write8(ADXL345_REG_DATA_FORMAT, format_reg) | Set the range of the accelerometer to the provided value. Range value
should be one of these constants:
- ADXL345_RANGE_2_G = +/-2G
- ADXL345_RANGE_4_G = +/-4G
- ADXL345_RANGE_8_G = +/-8G
- ADXL345_RANGE_16_G = +/-16G | entailment |
def read(self):
"""Read the current value of the accelerometer and return it as a tuple
of signed 16-bit X, Y, Z axis values.
"""
raw = self._device.readList(ADXL345_REG_DATAX0, 6)
return struct.unpack('<hhh', raw) | Read the current value of the accelerometer and return it as a tuple
of signed 16-bit X, Y, Z axis values. | entailment |
def deinit(bus=DEFAULT_SPI_BUS,
chip_select=DEFAULT_SPI_CHIP_SELECT):
"""Stops interrupts on all boards. Only required when using
:func:`digital_read` and :func:`digital_write`.
:param bus: SPI bus /dev/spidev<bus>.<chipselect> (default: {bus})
:type bus: int
:param chip_select: SPI chip select /dev/spidev<bus>.<chipselect>
(default: {chip})
:type chip_select: int
"""
global _pifacedigitals
for pfd in _pifacedigitals:
try:
pfd.deinit_board()
except AttributeError:
pass | Stops interrupts on all boards. Only required when using
:func:`digital_read` and :func:`digital_write`.
:param bus: SPI bus /dev/spidev<bus>.<chipselect> (default: {bus})
:type bus: int
:param chip_select: SPI chip select /dev/spidev<bus>.<chipselect>
(default: {chip})
:type chip_select: int | entailment |
def digital_write(pin_num, value, hardware_addr=0):
"""Writes the value to the input pin specified.
.. note:: This function is for familiarality with users of other types of
IO board. Consider accessing the ``output_pins`` attribute of a
PiFaceDigital object:
>>> pfd = PiFaceDigital(hardware_addr)
>>> pfd.output_pins[pin_num].value = 1
:param pin_num: The pin number to write to.
:type pin_num: int
:param value: The value to write.
:type value: int
:param hardware_addr: The board to read from (default: 0)
:type hardware_addr: int
"""
_get_pifacedigital(hardware_addr).output_pins[pin_num].value = value | Writes the value to the input pin specified.
.. note:: This function is for familiarality with users of other types of
IO board. Consider accessing the ``output_pins`` attribute of a
PiFaceDigital object:
>>> pfd = PiFaceDigital(hardware_addr)
>>> pfd.output_pins[pin_num].value = 1
:param pin_num: The pin number to write to.
:type pin_num: int
:param value: The value to write.
:type value: int
:param hardware_addr: The board to read from (default: 0)
:type hardware_addr: int | entailment |
def digital_write_pullup(pin_num, value, hardware_addr=0):
"""Writes the value to the input pullup specified.
.. note:: This function is for familiarality with users of other types of
IO board. Consider accessing the ``gppub`` attribute of a
PiFaceDigital object:
>>> pfd = PiFaceDigital(hardware_addr)
>>> hex(pfd.gppub.value)
0xff
>>> pfd.gppub.bits[pin_num].value = 1
:param pin_num: The pin number to write to.
:type pin_num: int
:param value: The value to write.
:type value: int
:param hardware_addr: The board to read from (default: 0)
:type hardware_addr: int
"""
_get_pifacedigital(hardware_addr).gppub.bits[pin_num].value = value | Writes the value to the input pullup specified.
.. note:: This function is for familiarality with users of other types of
IO board. Consider accessing the ``gppub`` attribute of a
PiFaceDigital object:
>>> pfd = PiFaceDigital(hardware_addr)
>>> hex(pfd.gppub.value)
0xff
>>> pfd.gppub.bits[pin_num].value = 1
:param pin_num: The pin number to write to.
:type pin_num: int
:param value: The value to write.
:type value: int
:param hardware_addr: The board to read from (default: 0)
:type hardware_addr: int | entailment |
def get_my_ip():
"""Returns this computers IP address as a string."""
ip = subprocess.check_output(GET_IP_CMD, shell=True).decode('utf-8')[:-1]
return ip.strip() | Returns this computers IP address as a string. | entailment |
def set_output_port(self, new_value, old_value=0):
"""Sets the output port value to new_value, defaults to old_value."""
print("Setting output port to {}.".format(new_value))
port_value = old_value
try:
port_value = int(new_value) # dec
except ValueError:
port_value = int(new_value, 16) # hex
finally:
self.pifacedigital.output_port.value = port_value
return port_value | Sets the output port value to new_value, defaults to old_value. | entailment |
def _request_api(self, **kwargs):
"""Wrap the calls the url, with the given arguments.
:param str url: Url to call with the given arguments
:param str method: [POST | GET] Method to use on the request
:param int status: Expected status code
"""
_url = kwargs.get('url')
_method = kwargs.get('method', 'GET')
_status = kwargs.get('status', 200)
counter = 0
if _method not in ['GET', 'POST']:
raise ValueError('Method is not GET or POST')
while True:
try:
res = REQ[_method](_url, cookies=self._cookie)
if res.status_code == _status:
break
else:
raise BadStatusException(res.content)
except requests.exceptions.BaseHTTPError:
if counter < self._retries:
counter += 1
continue
raise MaxRetryError
self._last_result = res
return res | Wrap the calls the url, with the given arguments.
:param str url: Url to call with the given arguments
:param str method: [POST | GET] Method to use on the request
:param int status: Expected status code | entailment |
def get_infos_with_id(self, uid):
"""Get info about a user based on his id.
:return: JSON
"""
_logid = uid
_user_info_url = USER_INFO_URL.format(logid=_logid)
return self._request_api(url=_user_info_url).json() | Get info about a user based on his id.
:return: JSON | entailment |
def get_current_activities(self, login=None, **kwargs):
"""Get the current activities of user.
Either use the `login` param, or the client's login if unset.
:return: JSON
"""
_login = kwargs.get(
'login',
login or self._login
)
_activity_url = ACTIVITY_URL.format(login=_login)
return self._request_api(url=_activity_url).json() | Get the current activities of user.
Either use the `login` param, or the client's login if unset.
:return: JSON | entailment |
def get_notifications(self, login=None, **kwargs):
"""Get the current notifications of a user.
:return: JSON
"""
_login = kwargs.get(
'login',
login or self._login
)
_notif_url = NOTIF_URL.format(login=_login)
return self._request_api(url=_notif_url).json() | Get the current notifications of a user.
:return: JSON | entailment |
def get_grades(self, login=None, promotion=None, **kwargs):
"""Get a user's grades on a single promotion based on his login.
Either use the `login` param, or the client's login if unset.
:return: JSON
"""
_login = kwargs.get(
'login',
login or self._login
)
_promotion_id = kwargs.get('promotion', promotion)
_grades_url = GRADES_URL.format(login=_login, promo_id=_promotion_id)
return self._request_api(url=_grades_url).json() | Get a user's grades on a single promotion based on his login.
Either use the `login` param, or the client's login if unset.
:return: JSON | entailment |
def get_picture(self, login=None, **kwargs):
"""Get a user's picture.
:param str login: Login of the user to check
:return: JSON
"""
_login = kwargs.get(
'login',
login or self._login
)
_activities_url = PICTURE_URL.format(login=_login)
return self._request_api(url=_activities_url).content | Get a user's picture.
:param str login: Login of the user to check
:return: JSON | entailment |
def get_projects(self, **kwargs):
"""Get a user's project.
:param str login: User's login (Default: self._login)
:return: JSON
"""
_login = kwargs.get('login', self._login)
search_url = SEARCH_URL.format(login=_login)
return self._request_api(url=search_url).json() | Get a user's project.
:param str login: User's login (Default: self._login)
:return: JSON | entailment |
def get_activities_for_project(self, module=None, **kwargs):
"""Get the related activities of a project.
:param str module: Stages of a given module
:return: JSON
"""
_module_id = kwargs.get('module', module)
_activities_url = ACTIVITIES_URL.format(module_id=_module_id)
return self._request_api(url=_activities_url).json() | Get the related activities of a project.
:param str module: Stages of a given module
:return: JSON | entailment |
def get_group_for_activity(self, module=None, project=None, **kwargs):
"""Get groups for activity.
:param str module: Base module
:param str module: Project which contains the group requested
:return: JSON
"""
_module_id = kwargs.get('module', module)
_project_id = kwargs.get('project', project)
_url = GROUPS_URL.format(module_id=_module_id, project_id=_project_id)
return self._request_api(url=_url).json() | Get groups for activity.
:param str module: Base module
:param str module: Project which contains the group requested
:return: JSON | entailment |
def get_students(self, **kwargs):
"""Get users by promotion id.
:param int promotion: Promotion ID
:return: JSON
"""
_promotion_id = kwargs.get('promotion')
_url = PROMOTION_URL.format(promo_id=_promotion_id)
return self._request_api(url=_url).json() | Get users by promotion id.
:param int promotion: Promotion ID
:return: JSON | entailment |
def get_log_events(self, login=None, **kwargs):
"""Get a user's log events.
:param str login: User's login (Default: self._login)
:return: JSON
"""
_login = kwargs.get(
'login',
login
)
log_events_url = GSA_EVENTS_URL.format(login=_login)
return self._request_api(url=log_events_url).json() | Get a user's log events.
:param str login: User's login (Default: self._login)
:return: JSON | entailment |
def get_events(self, login=None, start_date=None, end_date=None, **kwargs):
"""Get a user's events.
:param str login: User's login (Default: self._login)
:param str start_date: Start date
:param str end_date: To date
:return: JSON
"""
_login = kwargs.get(
'login',
login
)
log_events_url = EVENTS_URL.format(
login=_login,
start_date=start_date,
end_date=end_date,
)
return self._request_api(url=log_events_url).json() | Get a user's events.
:param str login: User's login (Default: self._login)
:param str start_date: Start date
:param str end_date: To date
:return: JSON | entailment |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.