repository_name
stringlengths
5
67
func_path_in_repository
stringlengths
4
234
func_name
stringlengths
0
314
whole_func_string
stringlengths
52
3.87M
language
stringclasses
6 values
func_code_string
stringlengths
52
3.87M
func_documentation_string
stringlengths
1
47.2k
func_code_url
stringlengths
85
339
sloria/flask-konch
flask_konch/cli.py
cli
def cli(): """An improved shell command, based on konch.""" from flask.globals import _app_ctx_stack app = _app_ctx_stack.top.app options = {key: app.config.get(key, DEFAULTS[key]) for key in DEFAULTS.keys()} base_context = {"app": app} if options["KONCH_FLASK_IMPORTS"]: base_context.update(get_flask_imports()) context = dict(base_context) if options["KONCH_FLASK_SHELL_CONTEXT"]: flask_context = app.make_shell_context() context.update(flask_context) context.update(options["KONCH_CONTEXT"]) def context_formatter(ctx): formatted_base = ", ".join(sorted(base_context.keys())) ret = "\n{FLASK}\n{base_context}\n".format( FLASK=click.style("Flask:", bold=True), base_context=formatted_base ) if options["KONCH_FLASK_SHELL_CONTEXT"]: variables = ", ".join(sorted(flask_context.keys())) ret += "\n{ADDITIONAL}\n{variables}\n".format( ADDITIONAL=click.style( "Flask shell context (see shell_context_processor()):", bold=True ), variables=variables, ) if options["KONCH_CONTEXT"]: variables = ", ".join(sorted(options["KONCH_CONTEXT"].keys())) ret += "\n{ADDITIONAL}\n{variables}".format( ADDITIONAL=click.style( "Additional variables (see KONCH_CONTEXT):", bold=True ), variables=variables, ) return ret context_format = options["KONCH_CONTEXT_FORMAT"] or context_formatter konch.start( context=context, shell=options["KONCH_SHELL"], banner=options["KONCH_BANNER"], prompt=options["KONCH_PROMPT"], output=options["KONCH_OUTPUT"], ptpy_vi_mode=options["KONCH_PTPY_VI_MODE"], context_format=context_format, ipy_extensions=options["KONCH_IPY_EXTENSIONS"], ipy_autoreload=options["KONCH_IPY_AUTORELOAD"], ipy_colors=options["KONCH_IPY_COLORS"], ipy_highlighting_style=options["KONCH_IPY_HIGHLIGHTING_STYLE"], )
python
def cli(): """An improved shell command, based on konch.""" from flask.globals import _app_ctx_stack app = _app_ctx_stack.top.app options = {key: app.config.get(key, DEFAULTS[key]) for key in DEFAULTS.keys()} base_context = {"app": app} if options["KONCH_FLASK_IMPORTS"]: base_context.update(get_flask_imports()) context = dict(base_context) if options["KONCH_FLASK_SHELL_CONTEXT"]: flask_context = app.make_shell_context() context.update(flask_context) context.update(options["KONCH_CONTEXT"]) def context_formatter(ctx): formatted_base = ", ".join(sorted(base_context.keys())) ret = "\n{FLASK}\n{base_context}\n".format( FLASK=click.style("Flask:", bold=True), base_context=formatted_base ) if options["KONCH_FLASK_SHELL_CONTEXT"]: variables = ", ".join(sorted(flask_context.keys())) ret += "\n{ADDITIONAL}\n{variables}\n".format( ADDITIONAL=click.style( "Flask shell context (see shell_context_processor()):", bold=True ), variables=variables, ) if options["KONCH_CONTEXT"]: variables = ", ".join(sorted(options["KONCH_CONTEXT"].keys())) ret += "\n{ADDITIONAL}\n{variables}".format( ADDITIONAL=click.style( "Additional variables (see KONCH_CONTEXT):", bold=True ), variables=variables, ) return ret context_format = options["KONCH_CONTEXT_FORMAT"] or context_formatter konch.start( context=context, shell=options["KONCH_SHELL"], banner=options["KONCH_BANNER"], prompt=options["KONCH_PROMPT"], output=options["KONCH_OUTPUT"], ptpy_vi_mode=options["KONCH_PTPY_VI_MODE"], context_format=context_format, ipy_extensions=options["KONCH_IPY_EXTENSIONS"], ipy_autoreload=options["KONCH_IPY_AUTORELOAD"], ipy_colors=options["KONCH_IPY_COLORS"], ipy_highlighting_style=options["KONCH_IPY_HIGHLIGHTING_STYLE"], )
An improved shell command, based on konch.
https://github.com/sloria/flask-konch/blob/c1829220bcdb3d0a9b41b2016266bee9940cc5e3/flask_konch/cli.py#L35-L89
robgolding/tasklib
tasklib/lazy.py
LazyUUIDTask.replace
def replace(self): """ Performs conversion to the regular Task object, referenced by the stored UUID. """ replacement = self._tw.tasks.get(uuid=self._uuid) self.__class__ = replacement.__class__ self.__dict__ = replacement.__dict__
python
def replace(self): """ Performs conversion to the regular Task object, referenced by the stored UUID. """ replacement = self._tw.tasks.get(uuid=self._uuid) self.__class__ = replacement.__class__ self.__dict__ = replacement.__dict__
Performs conversion to the regular Task object, referenced by the stored UUID.
https://github.com/robgolding/tasklib/blob/0ad882377639865283021041f19add5aeb10126a/tasklib/lazy.py#L69-L77
robgolding/tasklib
tasklib/lazy.py
LazyUUIDTaskSet.replace
def replace(self): """ Performs conversion to the regular TaskQuerySet object, referenced by the stored UUIDs. """ replacement = self._tw.tasks.filter(' '.join(self._uuids)) self.__class__ = replacement.__class__ self.__dict__ = replacement.__dict__
python
def replace(self): """ Performs conversion to the regular TaskQuerySet object, referenced by the stored UUIDs. """ replacement = self._tw.tasks.filter(' '.join(self._uuids)) self.__class__ = replacement.__class__ self.__dict__ = replacement.__dict__
Performs conversion to the regular TaskQuerySet object, referenced by the stored UUIDs.
https://github.com/robgolding/tasklib/blob/0ad882377639865283021041f19add5aeb10126a/tasklib/lazy.py#L227-L235
robgolding/tasklib
tasklib/backends.py
TaskWarrior.save_task
def save_task(self, task): """Save a task into TaskWarrior database using add/modify call""" args = [task['uuid'], 'modify'] if task.saved else ['add'] args.extend(self._get_modified_task_fields_as_args(task)) output = self.execute_command(args) # Parse out the new ID, if the task is being added for the first time if not task.saved: id_lines = [l for l in output if l.startswith('Created task ')] # Complain loudly if it seems that more tasks were created # Should not happen. # Expected output: Created task 1. # Created task 1 (recurrence template). if len(id_lines) != 1 or len(id_lines[0].split(' ')) not in (3, 5): raise TaskWarriorException( 'Unexpected output when creating ' 'task: %s' % '\n'.join(id_lines), ) # Circumvent the ID storage, since ID is considered read-only identifier = id_lines[0].split(' ')[2].rstrip('.') # Identifier can be either ID or UUID for completed tasks try: task._data['id'] = int(identifier) except ValueError: task._data['uuid'] = identifier # Refreshing is very important here, as not only modification time # is updated, but arbitrary attribute may have changed due hooks # altering the data before saving task.refresh(after_save=True)
python
def save_task(self, task): """Save a task into TaskWarrior database using add/modify call""" args = [task['uuid'], 'modify'] if task.saved else ['add'] args.extend(self._get_modified_task_fields_as_args(task)) output = self.execute_command(args) # Parse out the new ID, if the task is being added for the first time if not task.saved: id_lines = [l for l in output if l.startswith('Created task ')] # Complain loudly if it seems that more tasks were created # Should not happen. # Expected output: Created task 1. # Created task 1 (recurrence template). if len(id_lines) != 1 or len(id_lines[0].split(' ')) not in (3, 5): raise TaskWarriorException( 'Unexpected output when creating ' 'task: %s' % '\n'.join(id_lines), ) # Circumvent the ID storage, since ID is considered read-only identifier = id_lines[0].split(' ')[2].rstrip('.') # Identifier can be either ID or UUID for completed tasks try: task._data['id'] = int(identifier) except ValueError: task._data['uuid'] = identifier # Refreshing is very important here, as not only modification time # is updated, but arbitrary attribute may have changed due hooks # altering the data before saving task.refresh(after_save=True)
Save a task into TaskWarrior database using add/modify call
https://github.com/robgolding/tasklib/blob/0ad882377639865283021041f19add5aeb10126a/tasklib/backends.py#L330-L363
robgolding/tasklib
tasklib/serializing.py
SerializingObject._normalize
def _normalize(self, key, value): """ Use normalize_<key> methods to normalize user input. Any user input will be normalized at the moment it is used as filter, or entered as a value of Task attribute. """ # None value should not be converted by normalizer if value is None: return None normalize_func = getattr(self, 'normalize_{0}'.format(key), lambda x: x) return normalize_func(value)
python
def _normalize(self, key, value): """ Use normalize_<key> methods to normalize user input. Any user input will be normalized at the moment it is used as filter, or entered as a value of Task attribute. """ # None value should not be converted by normalizer if value is None: return None normalize_func = getattr(self, 'normalize_{0}'.format(key), lambda x: x) return normalize_func(value)
Use normalize_<key> methods to normalize user input. Any user input will be normalized at the moment it is used as filter, or entered as a value of Task attribute.
https://github.com/robgolding/tasklib/blob/0ad882377639865283021041f19add5aeb10126a/tasklib/serializing.py#L55-L69
robgolding/tasklib
tasklib/serializing.py
SerializingObject.datetime_normalizer
def datetime_normalizer(self, value): """ Normalizes date/datetime value (considered to come from user input) to localized datetime value. Following conversions happen: naive date -> localized datetime with the same date, and time=midnight naive datetime -> localized datetime with the same value localized datetime -> localized datetime (no conversion) """ if ( isinstance(value, datetime.date) and not isinstance(value, datetime.datetime) ): # Convert to local midnight value_full = datetime.datetime.combine(value, datetime.time.min) localized = local_zone.localize(value_full) elif isinstance(value, datetime.datetime): if value.tzinfo is None: # Convert to localized datetime object localized = local_zone.localize(value) else: # If the value is already localized, there is no need to change # time zone at this point. Also None is a valid value too. localized = value elif isinstance(value, six.string_types): localized = self.backend.convert_datetime_string(value) else: raise ValueError("Provided value could not be converted to " "datetime, its type is not supported: {}" .format(type(value))) return localized
python
def datetime_normalizer(self, value): """ Normalizes date/datetime value (considered to come from user input) to localized datetime value. Following conversions happen: naive date -> localized datetime with the same date, and time=midnight naive datetime -> localized datetime with the same value localized datetime -> localized datetime (no conversion) """ if ( isinstance(value, datetime.date) and not isinstance(value, datetime.datetime) ): # Convert to local midnight value_full = datetime.datetime.combine(value, datetime.time.min) localized = local_zone.localize(value_full) elif isinstance(value, datetime.datetime): if value.tzinfo is None: # Convert to localized datetime object localized = local_zone.localize(value) else: # If the value is already localized, there is no need to change # time zone at this point. Also None is a valid value too. localized = value elif isinstance(value, six.string_types): localized = self.backend.convert_datetime_string(value) else: raise ValueError("Provided value could not be converted to " "datetime, its type is not supported: {}" .format(type(value))) return localized
Normalizes date/datetime value (considered to come from user input) to localized datetime value. Following conversions happen: naive date -> localized datetime with the same date, and time=midnight naive datetime -> localized datetime with the same value localized datetime -> localized datetime (no conversion)
https://github.com/robgolding/tasklib/blob/0ad882377639865283021041f19add5aeb10126a/tasklib/serializing.py#L214-L246
robgolding/tasklib
tasklib/task.py
TaskResource._update_data
def _update_data(self, data, update_original=False, remove_missing=False): """ Low level update of the internal _data dict. Data which are coming as updates should already be serialized. If update_original is True, the original_data dict is updated as well. """ self._data.update(dict((key, self._deserialize(key, value)) for key, value in data.items())) # In certain situations, we want to treat missing keys as removals if remove_missing: for key in set(self._data.keys()) - set(data.keys()): self._data[key] = None if update_original: self._original_data = copy.deepcopy(self._data)
python
def _update_data(self, data, update_original=False, remove_missing=False): """ Low level update of the internal _data dict. Data which are coming as updates should already be serialized. If update_original is True, the original_data dict is updated as well. """ self._data.update(dict((key, self._deserialize(key, value)) for key, value in data.items())) # In certain situations, we want to treat missing keys as removals if remove_missing: for key in set(self._data.keys()) - set(data.keys()): self._data[key] = None if update_original: self._original_data = copy.deepcopy(self._data)
Low level update of the internal _data dict. Data which are coming as updates should already be serialized. If update_original is True, the original_data dict is updated as well.
https://github.com/robgolding/tasklib/blob/0ad882377639865283021041f19add5aeb10126a/tasklib/task.py#L69-L84
robgolding/tasklib
tasklib/task.py
TaskResource.export_data
def export_data(self): """ Exports current data contained in the Task as JSON """ # We need to remove spaces for TW-1504, use custom separators data_tuples = ((key, self._serialize(key, value)) for key, value in six.iteritems(self._data)) # Empty string denotes empty serialized value, we do not want # to pass that to TaskWarrior. data_tuples = filter(lambda t: t[1] is not '', data_tuples) data = dict(data_tuples) return json.dumps(data, separators=(',', ':'))
python
def export_data(self): """ Exports current data contained in the Task as JSON """ # We need to remove spaces for TW-1504, use custom separators data_tuples = ((key, self._serialize(key, value)) for key, value in six.iteritems(self._data)) # Empty string denotes empty serialized value, we do not want # to pass that to TaskWarrior. data_tuples = filter(lambda t: t[1] is not '', data_tuples) data = dict(data_tuples) return json.dumps(data, separators=(',', ':'))
Exports current data contained in the Task as JSON
https://github.com/robgolding/tasklib/blob/0ad882377639865283021041f19add5aeb10126a/tasklib/task.py#L117-L130
robgolding/tasklib
tasklib/task.py
Task.from_input
def from_input(cls, input_file=sys.stdin, modify=None, backend=None): """ Creates a Task object, directly from the stdin, by reading one line. If modify=True, two lines are used, first line interpreted as the original state of the Task object, and second line as its new, modified value. This is consistent with the TaskWarrior's hook system. Object created by this method should not be saved, deleted or refreshed, as t could create a infinite loop. For this reason, TaskWarrior instance is set to None. Input_file argument can be used to specify the input file, but defaults to sys.stdin. """ # Detect the hook type if not given directly name = os.path.basename(sys.argv[0]) modify = name.startswith('on-modify') if modify is None else modify # Create the TaskWarrior instance if none passed if backend is None: backends = importlib.import_module('tasklib.backends') hook_parent_dir = os.path.dirname(os.path.dirname(sys.argv[0])) backend = backends.TaskWarrior(data_location=hook_parent_dir) # TaskWarrior instance is set to None task = cls(backend) # Load the data from the input task._load_data(json.loads(input_file.readline().strip())) # If this is a on-modify event, we are provided with additional # line of input, which provides updated data if modify: task._update_data(json.loads(input_file.readline().strip()), remove_missing=True) return task
python
def from_input(cls, input_file=sys.stdin, modify=None, backend=None): """ Creates a Task object, directly from the stdin, by reading one line. If modify=True, two lines are used, first line interpreted as the original state of the Task object, and second line as its new, modified value. This is consistent with the TaskWarrior's hook system. Object created by this method should not be saved, deleted or refreshed, as t could create a infinite loop. For this reason, TaskWarrior instance is set to None. Input_file argument can be used to specify the input file, but defaults to sys.stdin. """ # Detect the hook type if not given directly name = os.path.basename(sys.argv[0]) modify = name.startswith('on-modify') if modify is None else modify # Create the TaskWarrior instance if none passed if backend is None: backends = importlib.import_module('tasklib.backends') hook_parent_dir = os.path.dirname(os.path.dirname(sys.argv[0])) backend = backends.TaskWarrior(data_location=hook_parent_dir) # TaskWarrior instance is set to None task = cls(backend) # Load the data from the input task._load_data(json.loads(input_file.readline().strip())) # If this is a on-modify event, we are provided with additional # line of input, which provides updated data if modify: task._update_data(json.loads(input_file.readline().strip()), remove_missing=True) return task
Creates a Task object, directly from the stdin, by reading one line. If modify=True, two lines are used, first line interpreted as the original state of the Task object, and second line as its new, modified value. This is consistent with the TaskWarrior's hook system. Object created by this method should not be saved, deleted or refreshed, as t could create a infinite loop. For this reason, TaskWarrior instance is set to None. Input_file argument can be used to specify the input file, but defaults to sys.stdin.
https://github.com/robgolding/tasklib/blob/0ad882377639865283021041f19add5aeb10126a/tasklib/task.py#L215-L253
robgolding/tasklib
tasklib/task.py
TaskQuerySet.filter
def filter(self, *args, **kwargs): """ Returns a new TaskQuerySet with the given filters added. """ clone = self._clone() for f in args: clone.filter_obj.add_filter(f) for key, value in kwargs.items(): clone.filter_obj.add_filter_param(key, value) return clone
python
def filter(self, *args, **kwargs): """ Returns a new TaskQuerySet with the given filters added. """ clone = self._clone() for f in args: clone.filter_obj.add_filter(f) for key, value in kwargs.items(): clone.filter_obj.add_filter_param(key, value) return clone
Returns a new TaskQuerySet with the given filters added.
https://github.com/robgolding/tasklib/blob/0ad882377639865283021041f19add5aeb10126a/tasklib/task.py#L540-L549
robgolding/tasklib
tasklib/task.py
TaskQuerySet.get
def get(self, **kwargs): """ Performs the query and returns a single object matching the given keyword arguments. """ clone = self.filter(**kwargs) num = len(clone) if num == 1: return clone._result_cache[0] if not num: raise Task.DoesNotExist( 'Task matching query does not exist. ' 'Lookup parameters were {0}'.format(kwargs), ) raise ValueError( 'get() returned more than one Task -- it returned {0}! ' 'Lookup parameters were {1}'.format(num, kwargs), )
python
def get(self, **kwargs): """ Performs the query and returns a single object matching the given keyword arguments. """ clone = self.filter(**kwargs) num = len(clone) if num == 1: return clone._result_cache[0] if not num: raise Task.DoesNotExist( 'Task matching query does not exist. ' 'Lookup parameters were {0}'.format(kwargs), ) raise ValueError( 'get() returned more than one Task -- it returned {0}! ' 'Lookup parameters were {1}'.format(num, kwargs), )
Performs the query and returns a single object matching the given keyword arguments.
https://github.com/robgolding/tasklib/blob/0ad882377639865283021041f19add5aeb10126a/tasklib/task.py#L551-L568
NiklasRosenstein/py-localimport
localimport.py
is_local
def is_local(filename, pathlist): ''' Returns True if *filename* is a subpath of any of the paths in *pathlist*. ''' filename = os.path.abspath(filename) for path_name in pathlist: path_name = os.path.abspath(path_name) if is_subpath(filename, path_name): return True return False
python
def is_local(filename, pathlist): ''' Returns True if *filename* is a subpath of any of the paths in *pathlist*. ''' filename = os.path.abspath(filename) for path_name in pathlist: path_name = os.path.abspath(path_name) if is_subpath(filename, path_name): return True return False
Returns True if *filename* is a subpath of any of the paths in *pathlist*.
https://github.com/NiklasRosenstein/py-localimport/blob/69af71c37f8bd3b2121ec39083dff18a9a2d04a1/localimport.py#L48-L58
NiklasRosenstein/py-localimport
localimport.py
is_subpath
def is_subpath(path, parent): ''' Returns True if *path* points to the same or a subpath of *parent*. ''' try: relpath = os.path.relpath(path, parent) except ValueError: return False # happens on Windows if drive letters don't match return relpath == os.curdir or not relpath.startswith(os.pardir)
python
def is_subpath(path, parent): ''' Returns True if *path* points to the same or a subpath of *parent*. ''' try: relpath = os.path.relpath(path, parent) except ValueError: return False # happens on Windows if drive letters don't match return relpath == os.curdir or not relpath.startswith(os.pardir)
Returns True if *path* points to the same or a subpath of *parent*.
https://github.com/NiklasRosenstein/py-localimport/blob/69af71c37f8bd3b2121ec39083dff18a9a2d04a1/localimport.py#L61-L70
NiklasRosenstein/py-localimport
localimport.py
eval_pth
def eval_pth(filename, sitedir, dest=None, imports=None): ''' Evaluates a `.pth` file (including support for `import` statements), and appends the result to the list *dest*. If *dest* is #None, it will fall back to `sys.path`. If *imports* is specified, it must be a list. `import` statements will not executed but instead appended to that list in tuples of (*filename*, *line*, *stmt*). Returns a tuple of (*dest*, *imports*). ''' if dest is None: dest = sys.path if not os.path.isfile(filename): return with open(filename, 'r') as fp: for index, line in enumerate(fp): if line.startswith('import'): if imports is None: exec_pth_import(filename, index+1, line) else: imports.append((filename, index+1, line)) else: index = line.find('#') if index > 0: line = line[:index] line = line.strip() if not os.path.isabs(line): line = os.path.join(os.path.dirname(filename), line) line = os.path.normpath(line) if line and line not in dest: dest.insert(0, line) return dest
python
def eval_pth(filename, sitedir, dest=None, imports=None): ''' Evaluates a `.pth` file (including support for `import` statements), and appends the result to the list *dest*. If *dest* is #None, it will fall back to `sys.path`. If *imports* is specified, it must be a list. `import` statements will not executed but instead appended to that list in tuples of (*filename*, *line*, *stmt*). Returns a tuple of (*dest*, *imports*). ''' if dest is None: dest = sys.path if not os.path.isfile(filename): return with open(filename, 'r') as fp: for index, line in enumerate(fp): if line.startswith('import'): if imports is None: exec_pth_import(filename, index+1, line) else: imports.append((filename, index+1, line)) else: index = line.find('#') if index > 0: line = line[:index] line = line.strip() if not os.path.isabs(line): line = os.path.join(os.path.dirname(filename), line) line = os.path.normpath(line) if line and line not in dest: dest.insert(0, line) return dest
Evaluates a `.pth` file (including support for `import` statements), and appends the result to the list *dest*. If *dest* is #None, it will fall back to `sys.path`. If *imports* is specified, it must be a list. `import` statements will not executed but instead appended to that list in tuples of (*filename*, *line*, *stmt*). Returns a tuple of (*dest*, *imports*).
https://github.com/NiklasRosenstein/py-localimport/blob/69af71c37f8bd3b2121ec39083dff18a9a2d04a1/localimport.py#L73-L107
NiklasRosenstein/py-localimport
localimport.py
extend_path
def extend_path(pth, name): ''' Better implementation of #pkgutil.extend_path() which adds support for zipped Python eggs. The original #pkgutil.extend_path() gets mocked by this function inside the #localimport context. ''' def zip_isfile(z, name): name.rstrip('/') return name in z.namelist() pname = os.path.join(*name.split('.')) zname = '/'.join(name.split('.')) init_py = '__init__' + os.extsep + 'py' init_pyc = '__init__' + os.extsep + 'pyc' init_pyo = '__init__' + os.extsep + 'pyo' mod_path = list(pth) for path in sys.path: if zipfile.is_zipfile(path): try: egg = zipfile.ZipFile(path, 'r') addpath = ( zip_isfile(egg, zname + '/__init__.py') or zip_isfile(egg, zname + '/__init__.pyc') or zip_isfile(egg, zname + '/__init__.pyo')) fpath = os.path.join(path, path, zname) if addpath and fpath not in mod_path: mod_path.append(fpath) except (zipfile.BadZipfile, zipfile.LargeZipFile): pass # xxx: Show a warning at least? else: path = os.path.join(path, pname) if os.path.isdir(path) and path not in mod_path: addpath = ( os.path.isfile(os.path.join(path, init_py)) or os.path.isfile(os.path.join(path, init_pyc)) or os.path.isfile(os.path.join(path, init_pyo))) if addpath and path not in mod_path: mod_path.append(path) return [os.path.normpath(x) for x in mod_path]
python
def extend_path(pth, name): ''' Better implementation of #pkgutil.extend_path() which adds support for zipped Python eggs. The original #pkgutil.extend_path() gets mocked by this function inside the #localimport context. ''' def zip_isfile(z, name): name.rstrip('/') return name in z.namelist() pname = os.path.join(*name.split('.')) zname = '/'.join(name.split('.')) init_py = '__init__' + os.extsep + 'py' init_pyc = '__init__' + os.extsep + 'pyc' init_pyo = '__init__' + os.extsep + 'pyo' mod_path = list(pth) for path in sys.path: if zipfile.is_zipfile(path): try: egg = zipfile.ZipFile(path, 'r') addpath = ( zip_isfile(egg, zname + '/__init__.py') or zip_isfile(egg, zname + '/__init__.pyc') or zip_isfile(egg, zname + '/__init__.pyo')) fpath = os.path.join(path, path, zname) if addpath and fpath not in mod_path: mod_path.append(fpath) except (zipfile.BadZipfile, zipfile.LargeZipFile): pass # xxx: Show a warning at least? else: path = os.path.join(path, pname) if os.path.isdir(path) and path not in mod_path: addpath = ( os.path.isfile(os.path.join(path, init_py)) or os.path.isfile(os.path.join(path, init_pyc)) or os.path.isfile(os.path.join(path, init_pyo))) if addpath and path not in mod_path: mod_path.append(path) return [os.path.normpath(x) for x in mod_path]
Better implementation of #pkgutil.extend_path() which adds support for zipped Python eggs. The original #pkgutil.extend_path() gets mocked by this function inside the #localimport context.
https://github.com/NiklasRosenstein/py-localimport/blob/69af71c37f8bd3b2121ec39083dff18a9a2d04a1/localimport.py#L118-L159
NiklasRosenstein/py-localimport
localimport.py
localimport._declare_namespace
def _declare_namespace(self, package_name): ''' Mock for #pkg_resources.declare_namespace() which calls #pkgutil.extend_path() afterwards as the original implementation doesn't seem to properly find all available namespace paths. ''' self.state['declare_namespace'](package_name) mod = sys.modules[package_name] mod.__path__ = pkgutil.extend_path(mod.__path__, package_name)
python
def _declare_namespace(self, package_name): ''' Mock for #pkg_resources.declare_namespace() which calls #pkgutil.extend_path() afterwards as the original implementation doesn't seem to properly find all available namespace paths. ''' self.state['declare_namespace'](package_name) mod = sys.modules[package_name] mod.__path__ = pkgutil.extend_path(mod.__path__, package_name)
Mock for #pkg_resources.declare_namespace() which calls #pkgutil.extend_path() afterwards as the original implementation doesn't seem to properly find all available namespace paths.
https://github.com/NiklasRosenstein/py-localimport/blob/69af71c37f8bd3b2121ec39083dff18a9a2d04a1/localimport.py#L329-L338
hubo1016/namedstruct
misc/ethernet.py
ip_frag
def ip_frag(packet): ''' Not fragments: ip_frag(packet) == 0 not ip_frag(packet) First packet of fragments: ip_frag(packet) == IP_FRAG_ANY Not first packet of fragments: ip_frag(packet) & IP_FRAG_LATER All fragments: ip_frag(packet) & IP_FRAG_ANY ''' return ((packet.frag_off & IP_OFFMASK) and IP_FRAG_LATER) | ((packet.frag_off & (IP_OFFMASK | IP_MF)) and IP_FRAG_ANY)
python
def ip_frag(packet): ''' Not fragments: ip_frag(packet) == 0 not ip_frag(packet) First packet of fragments: ip_frag(packet) == IP_FRAG_ANY Not first packet of fragments: ip_frag(packet) & IP_FRAG_LATER All fragments: ip_frag(packet) & IP_FRAG_ANY ''' return ((packet.frag_off & IP_OFFMASK) and IP_FRAG_LATER) | ((packet.frag_off & (IP_OFFMASK | IP_MF)) and IP_FRAG_ANY)
Not fragments: ip_frag(packet) == 0 not ip_frag(packet) First packet of fragments: ip_frag(packet) == IP_FRAG_ANY Not first packet of fragments: ip_frag(packet) & IP_FRAG_LATER All fragments: ip_frag(packet) & IP_FRAG_ANY
https://github.com/hubo1016/namedstruct/blob/5039026e0df4ce23003d212358918dbe1a6e1d76/misc/ethernet.py#L342-L358
hubo1016/namedstruct
misc/openflow/nicira_ext.py
create_extension
def create_extension(namespace, nicira_header, nx_action, nx_stats_request, nx_stats_reply, msg_subtype, action_subtype, stats_subtype): ''' /* This command enables or disables an Open vSwitch extension that allows a * controller to specify the OpenFlow table to which a flow should be added, * instead of having the switch decide which table is most appropriate as * required by OpenFlow 1.0. Because NXM was designed as an extension to * OpenFlow 1.0, the extension applies equally to ofp10_flow_mod and * nx_flow_mod. By default, the extension is disabled. * * When this feature is enabled, Open vSwitch treats struct ofp10_flow_mod's * and struct nx_flow_mod's 16-bit 'command' member as two separate fields. * The upper 8 bits are used as the table ID, the lower 8 bits specify the * command as usual. A table ID of 0xff is treated like a wildcarded table ID. * * The specific treatment of the table ID depends on the type of flow mod: * * - OFPFC_ADD: Given a specific table ID, the flow is always placed in that * table. If an identical flow already exists in that table only, then it * is replaced. If the flow cannot be placed in the specified table, * either because the table is full or because the table cannot support * flows of the given type, the switch replies with an OFPFMFC_TABLE_FULL * error. (A controller can distinguish these cases by comparing the * current and maximum number of entries reported in ofp_table_stats.) * * If the table ID is wildcarded, the switch picks an appropriate table * itself. If an identical flow already exist in the selected flow table, * then it is replaced. The choice of table might depend on the flows * that are already in the switch; for example, if one table fills up then * the switch might fall back to another one. * * - OFPFC_MODIFY, OFPFC_DELETE: Given a specific table ID, only flows * within that table are matched and modified or deleted. If the table ID * is wildcarded, flows within any table may be matched and modified or * deleted. * * - OFPFC_MODIFY_STRICT, OFPFC_DELETE_STRICT: Given a specific table ID, * only a flow within that table may be matched and modified or deleted. * If the table ID is wildcarded and exactly one flow within any table * matches, then it is modified or deleted; if flows in more than one * table match, then none is modified or deleted. */ ''' with _warnings.catch_warnings(): _warnings.filterwarnings('ignore', '^padding', StructDefWarning) nx_flow_mod_table_id = nstruct( (uint8, 'set'), # /* Nonzero to enable, zero to disable. */ (uint8[7],), name = 'nx_flow_mod_table_id', base = nicira_header, criteria = lambda x: getattr(x, msg_subtype) == NXT_FLOW_MOD_TABLE_ID, classifyby = (NXT_FLOW_MOD_TABLE_ID,), init = packvalue(NXT_FLOW_MOD_TABLE_ID, msg_subtype) ) namespace['nx_flow_mod_table_id'] = nx_flow_mod_table_id ''' /* NXT_SET_PACKET_IN_FORMAT request. */ ''' nx_set_packet_in_format = nstruct( (uint32, 'format'), # /* One of NXPIF_*. */ name = 'nx_set_packet_in_format', base = nicira_header, criteria = lambda x: getattr(x, msg_subtype) == NXT_SET_PACKET_IN_FORMAT, classifyby = (NXT_SET_PACKET_IN_FORMAT,), init = packvalue(NXT_SET_PACKET_IN_FORMAT, msg_subtype) ) namespace['nx_set_packet_in_format'] = nx_set_packet_in_format ''' /* NXT_PACKET_IN (analogous to OFPT_PACKET_IN). * * NXT_PACKET_IN is similar to the OpenFlow 1.2 OFPT_PACKET_IN. The * differences are: * * - NXT_PACKET_IN includes the cookie of the rule that triggered the * message. (OpenFlow 1.3 OFPT_PACKET_IN also includes the cookie.) * * - The metadata fields use NXM (instead of OXM) field numbers. * * Open vSwitch 1.9.0 and later omits metadata fields that are zero (as allowed * by OpenFlow 1.2). Earlier versions included all implemented metadata * fields. * * Open vSwitch does not include non-metadata in the nx_match, because by * definition that information can be found in the packet itself. The format * and the standards allow this, however, so controllers should be prepared to * tolerate future changes. * * The NXM format is convenient for reporting metadata values, but it is * important not to interpret the format as matching against a flow, because it * does not. Nothing is being matched; arbitrary metadata masks would not be * meaningful. * * Whereas in most cases a controller can expect to only get back NXM fields * that it set up itself (e.g. flow dumps will ordinarily report only NXM * fields from flows that the controller added), NXT_PACKET_IN messages might * contain fields that the controller does not understand, because the switch * might support fields (new registers, new protocols, etc.) that the * controller does not. The controller must prepared to tolerate these. * * The 'cookie' field has no meaning when 'reason' is OFPR_NO_MATCH. In this * case it should be UINT64_MAX. */ ''' if 'ofp_oxm' in namespace: nx_match = namespace['ofp_oxm'] namespace['nx_match'] = nx_match nx_match_mask = namespace['ofp_oxm_mask'] namespace['nx_match_mask'] = nx_match_mask nx_match_nomask = namespace['ofp_oxm_nomask'] namespace['nx_match_nomask'] = nx_match_nomask create_nxm = namespace['create_oxm'] namespace['create_nxm'] = create_nxm nx_match_nomask_ext = nstruct( base = nx_match_nomask, criteria = lambda x: NXM_VENDOR(x.header) <= 1, extend = {'header': nxm_header}, name = 'nx_match_nomask_ext' ) namespace['nx_match_nomask_ext'] = nx_match_nomask_ext nx_match_mask_ext = nstruct( base = nx_match_mask, criteria = lambda x: NXM_VENDOR(x.header) <= 1, extend = {'header': nxm_header}, name = 'nx_match_mask_ext' ) namespace['nx_match_mask_ext'] = nx_match_mask_ext else: nx_match = nstruct( (nxm_header, 'header'), name = 'nx_match', padding = 1, size = lambda x: NXM_LENGTH(x.header) + 4 ) namespace['nx_match'] = nx_match nx_match_nomask = nstruct( (raw, 'value'), base = nx_match, criteria = lambda x: not NXM_HASMASK(x.header), init = packvalue(NXM_OF_IN_PORT, 'header'), name = 'nx_match_nomask' ) namespace['nx_match_nomask'] = nx_match_nomask _nxm_mask_value = nstruct( (raw, 'value'), name = 'nxm_mask_value', size = lambda x: NXM_LENGTH(x.header) // 2, padding = 1 ) nx_match_mask = nstruct( (_nxm_mask_value,), (raw, 'mask'), base = nx_match, criteria = lambda x: NXM_HASMASK(x.header), init = packvalue(NXM_OF_ETH_SRC_W, 'header'), name = 'nx_match_mask', ) namespace['nx_match_mask'] = nx_match_mask def create_nxm(header, value = None, mask = None): if NXM_HASMASK(header): nxm = nx_match_mask.new() size = NXM_LENGTH(header) // 2 else: nxm = nx_match_nomask.new() size = NXM_LENGTH(header) nxm.header = header nxm.value = common.create_binary(value, size) if NXM_HASMASK(header): nxm.mask = common.create_binary(mask, size) nxm._pack() nxm._autosubclass() return nxm namespace['create_nxm'] = create_nxm nx_match_nomask_ext = nx_match_nomask nx_match_mask_ext = nx_match_mask namespace['nx_match_nomask_ext'] = nx_match_nomask_ext namespace['nx_match_mask_ext'] = nx_match_mask_ext from namedstruct.namedstruct import rawtype as _rawtype import socket as _socket if 'ip4_addr_bytes' in namespace: ip4_addr_bytes = namespace['ip4_addr_bytes'] else: ip4_addr_bytes = prim('4s', 'ip4_addr_bytes') ip4_addr_bytes.formatter = lambda x: _socket.inet_ntoa(x) namespace['ip4_addr_bytes'] = ip4_addr_bytes nxm_mask_ipv4 = nstruct(name = 'nxm_mask_ipv4', base = nx_match_mask_ext, criteria = lambda x: x.header in (NXM_OF_IP_SRC_W, NXM_OF_IP_DST_W, NXM_OF_ARP_SPA_W, NXM_OF_ARP_TPA_W, NXM_NX_TUN_IPV4_SRC_W, NXM_NX_TUN_IPV4_DST_W), init = packvalue(NXM_OF_IP_SRC_W, 'header'), extend = {'value' : ip4_addr_bytes, 'mask' : ip4_addr_bytes} ) namespace['nxm_mask_ipv4'] = nxm_mask_ipv4 nxm_nomask_ipv4 = nstruct(name = 'nxm_nomask_ipv4', base = nx_match_nomask_ext, criteria = lambda x: x.header in (NXM_OF_IP_SRC, NXM_OF_IP_DST, NXM_OF_ARP_SPA, NXM_OF_ARP_TPA, NXM_NX_TUN_IPV4_SRC, NXM_NX_TUN_IPV4_DST), init = packvalue(NXM_OF_IP_SRC, 'header'), extend = {'value' : ip4_addr_bytes} ) namespace['nxm_nomask_ipv4'] = nxm_nomask_ipv4 if 'mac_addr_bytes' in namespace: mac_addr_bytes = namespace['mac_addr_bytes'] else: mac_addr_bytes = _rawtype() mac_addr_bytes.formatter = lambda x: ':'.join('%02X' % (c,) for c in bytearray(x)) namespace['mac_addr_bytes'] = mac_addr_bytes nxm_mask_eth = nstruct(name = 'nxm_mask_eth', base = nx_match_mask_ext, criteria = lambda x: x.header in (NXM_OF_ETH_SRC_W, NXM_OF_ETH_DST_W), init = packvalue(NXM_OF_ETH_SRC_W, 'header'), extend = {'value' : mac_addr_bytes, 'mask' : mac_addr_bytes}) namespace['nxm_mask_eth'] = nxm_mask_eth nxm_nomask_eth = nstruct(name = 'nxm_nomask_eth', base = nx_match_nomask_ext, criteria = lambda x: x.header in (NXM_OF_ETH_SRC, NXM_OF_ETH_DST, NXM_NX_ND_SLL, NXM_NX_ND_TLL, NXM_NX_ARP_SHA, NXM_NX_ARP_THA), init = packvalue(NXM_OF_ETH_SRC, 'header'), extend = {'value' : mac_addr_bytes}) namespace['nxm_nomask_eth'] = nxm_nomask_eth ofp_port_no = namespace['ofp_port_no'] nx_port_no = enum('nx_port_no', None, uint16, **dict((k, v & 0xffff) for k,v in ofp_port_no.getDict().items()) ) nxm_port_no_raw = _rawtype() nxm_port_no_raw.formatter = lambda x: nx_port_no.formatter(nx_port_no.parse(x)[0]) namespace['nx_port_no'] = nx_port_no namespace['nxm_port_no_raw'] = nxm_port_no_raw nxm_nomask_port = nstruct(name = 'nxm_nomask_port', base = nx_match_nomask_ext, criteria = lambda x: x.header == NXM_OF_IN_PORT, init = packvalue(NXM_OF_IN_PORT, 'header'), extend = {'value': nxm_port_no_raw} ) namespace['nxm_nomask_port'] = nxm_nomask_port if 'ethtype_raw' in namespace: ethtype_raw = namespace['ethtype_raw'] else: ethtype_raw = _rawtype() ethtype_raw.formatter = lambda x: ethertype.formatter(ethertype.parse(x)[0]) namespace['ethtype_raw'] = ethtype_raw nxm_nomask_ethertype = nstruct(name = 'nxm_nomask_ethertype', base = nx_match_nomask_ext, criteria = lambda x: x.header == NXM_OF_ETH_TYPE, init = packvalue(NXM_OF_ETH_TYPE, 'header'), extend = {'value': ethtype_raw}) namespace['nxm_nomask_ethertype'] = nxm_nomask_ethertype if 'arpop_raw' in namespace: arpop_raw = namespace['arpop_raw'] else: arpop_raw = _rawtype() arpop_raw.formatter = lambda x: arp_op_code.formatter(arp_op_code.parse(x)[0]) namespace['arpop_raw'] = arpop_raw nxm_nomask_arpopcode = nstruct(name = 'nxm_nomask_arpopcode', base = nx_match_nomask_ext, criteria = lambda x: x.header == NXM_OF_ARP_OP, init = packvalue(NXM_OF_ARP_OP, 'header'), extend = {'value': arpop_raw}) namespace['nxm_nomask_arpopcode'] = nxm_nomask_arpopcode if 'ip_protocol_raw' in namespace: ip_protocol_raw = namespace['ip_protocol_raw'] else: ip_protocol_raw = _rawtype() ip_protocol_raw.formatter = lambda x: ip_protocol.formatter(ip_protocol.parse(x)[0]) namespace['ip_protocol_raw'] = ip_protocol_raw nxm_nomask_ip_protocol = nstruct(name = 'nxm_nomask_ip_protocol', base = nx_match_nomask_ext, criteria = lambda x: x.header == NXM_OF_IP_PROTO, init = packvalue(NXM_OF_IP_PROTO, 'header'), extend = {'value': ip_protocol_raw}) namespace['nxm_nomask_ip_protocol'] = nxm_nomask_ip_protocol if 'ip6_addr_bytes' in namespace: nxm_nomask_ipv6 = nstruct(name = 'nxm_nomask_ipv6', base = nx_match_nomask_ext, criteria = lambda x: x.header in (NXM_NX_IPV6_SRC, NXM_NX_IPV6_DST, NXM_NX_ND_TARGET), init = packvalue(NXM_NX_IPV6_SRC, 'header'), extend = {'value': ip6_addr_bytes}) namespace['nxm_nomask_ipv6'] = nxm_nomask_ipv6 nxm_mask_ipv6 = nstruct(name = 'nxm_mask_ipv6', base = nx_match_mask_ext, criteria = lambda x: x.header in (NXM_NX_IPV6_SRC_W, NXM_NX_IPV6_DST_W), init = packvalue(NXM_NX_IPV6_SRC_W, 'header'), extend = {'value': ip6_addr_bytes, 'mask': ip6_addr_bytes}) namespace['nxm_mask_ipv6'] = nxm_mask_ipv6 nx_ip_frag_raw = _rawtype() nx_ip_frag_raw.formatter = lambda x: nx_ip_frag.formatter(nx_ip_frag.parse(x)[0]) nxm_nomask_ipfrag = nstruct(name = 'nxm_nomask_ipfrag', base = nx_match_nomask_ext, criteria = lambda x: x.header == NXM_NX_IP_FRAG, init = packvalue(NXM_NX_IP_FRAG, 'header'), extend = {'value': nx_ip_frag_raw}) namespace['nxm_nomask_ipfrag'] = nxm_nomask_ipfrag nxm_mask_ipfrag = nstruct(name = 'nxm_mask_ipfrag', base = nx_match_mask_ext, criteria = lambda x: x.header == NXM_NX_IP_FRAG_W, init = packvalue(NXM_NX_IP_FRAG_W, 'header'), extend = {'value': nx_ip_frag_raw, 'mask': nx_ip_frag_raw}) namespace['nxm_mask_ipfrag'] = nxm_mask_ipfrag nx_matches = nstruct( (nx_match[0], 'matches'), name = 'nx_matches', size = sizefromlen(65536, 'match_len'), prepack = packrealsize('match_len'), padding = 8 ) namespace['nx_matches'] = nx_matches nx_packet_in = nstruct( (uint32, 'buffer_id'), # /* ID assigned by datapath. */ (uint16, 'total_len'), # /* Full length of frame. */ (uint8, 'reason'), # /* Reason packet is sent (one of OFPR_*). */ (uint8, 'table_id'), # /* ID of the table that was looked up. */ (uint64, 'cookie'), # /* Cookie of the rule that was looked up. */ (uint16, 'match_len'), # /* Size of nx_match. */ (uint8[6],), # /* Align to 64-bits. */ (nx_matches,), (uint8[2],), (raw, 'data'), name = 'nx_packet_in', base = nicira_header, classifyby = (NXT_PACKET_IN,), criteria = lambda x: getattr(x, msg_subtype) == NXT_PACKET_IN, init = packvalue(NXT_PACKET_IN, msg_subtype) ) namespace['nx_packet_in'] = nx_packet_in ''' /* Configures the "role" of the sending controller. The default role is: * * - Other (NX_ROLE_OTHER), which allows the controller access to all * OpenFlow features. * * The other possible roles are a related pair: * * - Master (NX_ROLE_MASTER) is equivalent to Other, except that there may * be at most one Master controller at a time: when a controller * configures itself as Master, any existing Master is demoted to the * Slave role. * * - Slave (NX_ROLE_SLAVE) allows the controller read-only access to * OpenFlow features. In particular attempts to modify the flow table * will be rejected with an OFPBRC_EPERM error. * * Slave controllers do not receive OFPT_PACKET_IN or OFPT_FLOW_REMOVED * messages, but they do receive OFPT_PORT_STATUS messages. */ ''' nx_role_request = nstruct( (nx_role, 'role'), # /* One of NX_ROLE_*. */ name = 'nx_role_request', base = nicira_header, classifyby = (NXT_ROLE_REQUEST, NXT_ROLE_REPLY), criteria = lambda x: getattr(x, msg_subtype) == NXT_ROLE_REQUEST or getattr(x, msg_subtype) == NXT_ROLE_REPLY, init = packvalue(NXT_ROLE_REQUEST, msg_subtype) ) namespace['nx_role_request'] = nx_role_request ''' /* NXT_SET_ASYNC_CONFIG. * * Sent by a controller, this message configures the asynchronous messages that * the controller wants to receive. Element 0 in each array specifies messages * of interest when the controller has an "other" or "master" role; element 1, * when the controller has a "slave" role. * * Each array element is a bitmask in which a 0-bit disables receiving a * particular message and a 1-bit enables receiving it. Each bit controls the * message whose 'reason' corresponds to the bit index. For example, the bit * with value 1<<2 == 4 in port_status_mask[1] determines whether the * controller will receive OFPT_PORT_STATUS messages with reason OFPPR_MODIFY * (value 2) when the controller has a "slave" role. * * As a side effect, for service controllers, this message changes the * miss_send_len from default of zero to OFP_DEFAULT_MISS_SEND_LEN (128). */ ''' ofp_packet_in_reason = namespace['ofp_packet_in_reason'] if 'ofp_packet_in_reason_bitwise' in namespace: ofp_packet_in_reason_bitwise = namespace['ofp_packet_in_reason_bitwise'] else: ofp_packet_in_reason_bitwise = enum('ofp_packet_in_reason_bitwise', None, uint32, **dict((k, 1<<v) for k,v in ofp_packet_in_reason.getDict().items())) namespace['ofp_packet_in_reason_bitwise'] = ofp_packet_in_reason_bitwise ofp_port_reason = namespace['ofp_port_reason'] if 'ofp_port_reason_bitwise' in namespace: ofp_port_reason_bitwise = namespace['ofp_port_reason_bitwise'] else: ofp_port_reason_bitwise = enum('ofp_port_reason_bitwise', None, uint32, **dict((k, 1<<v) for k,v in ofp_port_reason.getDict().items())) namespace['ofp_port_reason_bitwise'] = ofp_port_reason_bitwise ofp_flow_removed_reason = namespace['ofp_flow_removed_reason'] if 'ofp_flow_removed_reason_bitwise' in namespace: ofp_flow_removed_reason_bitwise = namespace['ofp_flow_removed_reason_bitwise'] else: ofp_flow_removed_reason_bitwise = enum('ofp_flow_removed_reason_bitwise', None, uint32, **dict((k, 1<<v) for k,v in ofp_flow_removed_reason.getDict().items())) namespace['ofp_flow_removed_reason_bitwise'] = ofp_flow_removed_reason_bitwise nx_async_config = nstruct( (ofp_packet_in_reason_bitwise[2], 'packet_in_mask'), # /* Bitmasks of OFPR_* values. */ (ofp_port_reason_bitwise[2], 'port_status_mask'), # /* Bitmasks of OFPRR_* values. */ (ofp_flow_removed_reason_bitwise[2], 'flow_removed_mask'), #/* Bitmasks of OFPPR_* values. */ name = 'nx_async_config', base = nicira_header, classifyby = (NXT_SET_ASYNC_CONFIG,), criteria = lambda x: getattr(x, msg_subtype) == NXT_SET_ASYNC_CONFIG, init = packvalue(NXT_SET_ASYNC_CONFIG, msg_subtype) ) namespace['nx_async_config'] = nx_async_config ''' /* Nicira vendor flow actions. */ ''' ''' /* Action structures for NXAST_RESUBMIT and NXAST_RESUBMIT_TABLE. * * These actions search one of the switch's flow tables: * * - For NXAST_RESUBMIT_TABLE only, if the 'table' member is not 255, then * it specifies the table to search. * * - Otherwise (for NXAST_RESUBMIT_TABLE with a 'table' of 255, or for * NXAST_RESUBMIT regardless of 'table'), it searches the current flow * table, that is, the OpenFlow flow table that contains the flow from * which this action was obtained. If this action did not come from a * flow table (e.g. it came from an OFPT_PACKET_OUT message), then table 0 * is the current table. * * The flow table lookup uses a flow that may be slightly modified from the * original lookup: * * - For NXAST_RESUBMIT, the 'in_port' member of struct nx_action_resubmit * is used as the flow's in_port. * * - For NXAST_RESUBMIT_TABLE, if the 'in_port' member is not OFPP_IN_PORT, * then its value is used as the flow's in_port. Otherwise, the original * in_port is used. * * - If actions that modify the flow (e.g. OFPAT_SET_VLAN_VID) precede the * resubmit action, then the flow is updated with the new values. * * Following the lookup, the original in_port is restored. * * If the modified flow matched in the flow table, then the corresponding * actions are executed. Afterward, actions following the resubmit in the * original set of actions, if any, are executed; any changes made to the * packet (e.g. changes to VLAN) by secondary actions persist when those * actions are executed, although the original in_port is restored. * * Resubmit actions may be used any number of times within a set of actions. * * Resubmit actions may nest to an implementation-defined depth. Beyond this * implementation-defined depth, further resubmit actions are simply ignored. * * NXAST_RESUBMIT ignores 'table' and 'pad'. NXAST_RESUBMIT_TABLE requires * 'pad' to be all-bits-zero. * * Open vSwitch 1.0.1 and earlier did not support recursion. Open vSwitch * before 1.2.90 did not support NXAST_RESUBMIT_TABLE. */ ''' nx_action_resubmit = nstruct( (nx_port_no, 'in_port'), # /* New in_port for checking flow table. */ (uint8, 'table'), # /* NXAST_RESUBMIT_TABLE: table to use. */ (uint8[3],), base = nx_action, criteria = lambda x: getattr(x, action_subtype) == NXAST_RESUBMIT_TABLE or getattr(x, action_subtype) == NXAST_RESUBMIT, classifyby = (NXAST_RESUBMIT_TABLE, NXAST_RESUBMIT), name = 'nx_action_resubmit', init = packvalue(NXAST_RESUBMIT_TABLE, action_subtype) ) namespace['nx_action_resubmit'] = nx_action_resubmit ''' /* Action structure for NXAST_SET_TUNNEL. * * Sets the encapsulating tunnel ID to a 32-bit value. The most-significant 32 * bits of the tunnel ID are set to 0. */ ''' nx_action_set_tunnel = nstruct( (uint8[2],), (uint32, 'tun_id'), # /* Tunnel ID. */ name = 'nx_action_set_tunnel', base = nx_action, classifyby = (NXAST_SET_TUNNEL,), criteria = lambda x: getattr(x, action_subtype) == NXAST_SET_TUNNEL, init = packvalue(NXAST_SET_TUNNEL, action_subtype) ) namespace['nx_action_set_tunnel'] = nx_action_set_tunnel ''' /* Action structure for NXAST_SET_TUNNEL64. * * Sets the encapsulating tunnel ID to a 64-bit value. */ ''' nx_action_set_tunnel64 = nstruct( (uint8[6],), (uint64, 'tun_id'), # /* Tunnel ID. */ name = 'nx_action_set_tunnel64', base = nx_action, classifyby = (NXAST_SET_TUNNEL64,), criteria = lambda x: getattr(x, action_subtype) == NXAST_SET_TUNNEL64, init = packvalue(NXAST_SET_TUNNEL64, action_subtype) ) namespace['nx_action_set_tunnel64'] = nx_action_set_tunnel64 ''' /* Action structure for NXAST_SET_QUEUE. * * Set the queue that should be used when packets are output. This is similar * to the OpenFlow OFPAT_ENQUEUE action, but does not take the output port as * an argument. This allows the queue to be defined before the port is * known. */ ''' nx_action_set_queue = nstruct( (uint8[2],), (uint32, 'queue_id'), # /* Where to enqueue packets. */ name = 'nx_action_set_queue', base = nx_action, classifyby = (NXAST_SET_QUEUE,), criteria = lambda x: getattr(x, action_subtype) == NXAST_SET_QUEUE, init = packvalue(NXAST_SET_QUEUE, action_subtype) ) namespace['nx_action_set_queue'] = nx_action_set_queue ''' /* Action structure for NXAST_POP_QUEUE. * * Restores the queue to the value it was before any NXAST_SET_QUEUE actions * were used. Only the original queue can be restored this way; no stack is * maintained. */ ''' nx_action_pop_queue = nstruct( (uint8[6],), name = 'nx_action_pop_queue', base = nx_action, classifyby = (NXAST_POP_QUEUE,), criteria = lambda x: getattr(x, action_subtype) == NXAST_POP_QUEUE, init = packvalue(NXAST_POP_QUEUE, action_subtype) ) namespace['nx_action_pop_queue'] = nx_action_pop_queue ''' /* Action structure for NXAST_REG_MOVE. * * Copies src[src_ofs:src_ofs+n_bits] to dst[dst_ofs:dst_ofs+n_bits], where * a[b:c] denotes the bits within 'a' numbered 'b' through 'c' (not including * bit 'c'). Bit numbering starts at 0 for the least-significant bit, 1 for * the next most significant bit, and so on. * * 'src' and 'dst' are nxm_header values with nxm_hasmask=0. (It doesn't make * sense to use nxm_hasmask=1 because the action does not do any kind of * matching; it uses the actual value of a field.) * * The following nxm_header values are potentially acceptable as 'src': * * - NXM_OF_IN_PORT * - NXM_OF_ETH_DST * - NXM_OF_ETH_SRC * - NXM_OF_ETH_TYPE * - NXM_OF_VLAN_TCI * - NXM_OF_IP_TOS * - NXM_OF_IP_PROTO * - NXM_OF_IP_SRC * - NXM_OF_IP_DST * - NXM_OF_TCP_SRC * - NXM_OF_TCP_DST * - NXM_OF_UDP_SRC * - NXM_OF_UDP_DST * - NXM_OF_ICMP_TYPE * - NXM_OF_ICMP_CODE * - NXM_OF_ARP_OP * - NXM_OF_ARP_SPA * - NXM_OF_ARP_TPA * - NXM_NX_TUN_ID * - NXM_NX_ARP_SHA * - NXM_NX_ARP_THA * - NXM_NX_ICMPV6_TYPE * - NXM_NX_ICMPV6_CODE * - NXM_NX_ND_SLL * - NXM_NX_ND_TLL * - NXM_NX_REG(idx) for idx in the switch's accepted range. * - NXM_NX_PKT_MARK * - NXM_NX_TUN_IPV4_SRC * - NXM_NX_TUN_IPV4_DST * * The following nxm_header values are potentially acceptable as 'dst': * * - NXM_OF_ETH_DST * - NXM_OF_ETH_SRC * - NXM_OF_IP_TOS * - NXM_OF_IP_SRC * - NXM_OF_IP_DST * - NXM_OF_TCP_SRC * - NXM_OF_TCP_DST * - NXM_OF_UDP_SRC * - NXM_OF_UDP_DST * - NXM_NX_ARP_SHA * - NXM_NX_ARP_THA * - NXM_OF_ARP_OP * - NXM_OF_ARP_SPA * - NXM_OF_ARP_TPA * Modifying any of the above fields changes the corresponding packet * header. * * - NXM_OF_IN_PORT * * - NXM_NX_REG(idx) for idx in the switch's accepted range. * * - NXM_NX_PKT_MARK * * - NXM_OF_VLAN_TCI. Modifying this field's value has side effects on the * packet's 802.1Q header. Setting a value with CFI=0 removes the 802.1Q * header (if any), ignoring the other bits. Setting a value with CFI=1 * adds or modifies the 802.1Q header appropriately, setting the TCI field * to the field's new value (with the CFI bit masked out). * * - NXM_NX_TUN_ID, NXM_NX_TUN_IPV4_SRC, NXM_NX_TUN_IPV4_DST. Modifying * any of these values modifies the corresponding tunnel header field used * for the packet's next tunnel encapsulation, if allowed by the * configuration of the output tunnel port. * * A given nxm_header value may be used as 'src' or 'dst' only on a flow whose * nx_match satisfies its prerequisites. For example, NXM_OF_IP_TOS may be * used only if the flow's nx_match includes an nxm_entry that specifies * nxm_type=NXM_OF_ETH_TYPE, nxm_hasmask=0, and nxm_value=0x0800. * * The switch will reject actions for which src_ofs+n_bits is greater than the * width of 'src' or dst_ofs+n_bits is greater than the width of 'dst' with * error type OFPET_BAD_ACTION, code OFPBAC_BAD_ARGUMENT. * * This action behaves properly when 'src' overlaps with 'dst', that is, it * behaves as if 'src' were copied out to a temporary buffer, then the * temporary buffer copied to 'dst'. */ ''' nx_action_reg_move = nstruct( (uint16, 'n_bits'), # /* Number of bits. */ (uint16, 'src_ofs'), # /* Starting bit offset in source. */ (uint16, 'dst_ofs'), # /* Starting bit offset in destination. */ (nxm_header, 'src'), # /* Source register. */ (nxm_header, 'dst'), # /* Destination register. */ name = 'nx_action_reg_move', base = nx_action, classifyby = (NXAST_REG_MOVE,), criteria = lambda x: getattr(x, action_subtype) == NXAST_REG_MOVE, init = packvalue(NXAST_REG_MOVE, action_subtype), formatter = _createdesc(lambda x:'move:%s[%d..%d]->%s[%d..%d]' % (x['src'], x['src_ofs'], x['src_ofs'] + x['n_bits'] - 1, x['dst'], x['dst_ofs'], x['dst_ofs'] + x['n_bits'] - 1)) ) namespace['nx_action_reg_move'] = nx_action_reg_move ''' /* Action structure for NXAST_REG_LOAD. * * Copies value[0:n_bits] to dst[ofs:ofs+n_bits], where a[b:c] denotes the bits * within 'a' numbered 'b' through 'c' (not including bit 'c'). Bit numbering * starts at 0 for the least-significant bit, 1 for the next most significant * bit, and so on. * * 'dst' is an nxm_header with nxm_hasmask=0. See the documentation for * NXAST_REG_MOVE, above, for the permitted fields and for the side effects of * loading them. * * The 'ofs' and 'n_bits' fields are combined into a single 'ofs_nbits' field * to avoid enlarging the structure by another 8 bytes. To allow 'n_bits' to * take a value between 1 and 64 (inclusive) while taking up only 6 bits, it is * also stored as one less than its true value: * * 15 6 5 0 * +------------------------------+------------------+ * | ofs | n_bits - 1 | * +------------------------------+------------------+ * * The switch will reject actions for which ofs+n_bits is greater than the * width of 'dst', or in which any bits in 'value' with value 2**n_bits or * greater are set to 1, with error type OFPET_BAD_ACTION, code * OFPBAC_BAD_ARGUMENT. */ ''' nx_action_reg_load = nstruct( (uint16, 'ofs_nbits'), # /* (ofs << 6) | (n_bits - 1). */ (nxm_header, 'dst'), # /* Destination register. */ (uint64, 'value'), # /* Immediate value. */ name = 'nx_action_reg_load', base = nx_action, classifyby = (NXAST_REG_LOAD,), criteria = lambda x: getattr(x, action_subtype) == NXAST_REG_LOAD, init = packvalue(NXAST_REG_LOAD, action_subtype), formatter = _createdesc(lambda x: 'load:0x%x->%s[%d..%d]' % (x['value'], x['dst'], x['ofs_nbits'] >> 6, (x['ofs_nbits'] >> 6) + (x['ofs_nbits'] & 0x3f))) ) namespace['nx_action_reg_load'] = nx_action_reg_load ''' /* Action structure for NXAST_STACK_PUSH and NXAST_STACK_POP. * * Pushes (or pops) field[offset: offset + n_bits] to (or from) * top of the stack. */ ''' nx_action_stack = nstruct( (uint16, 'offset'), # /* Bit offset into the field. */ (nxm_header, 'field'), # /* The field used for push or pop. */ (uint16, 'n_bits'), # /* (n_bits + 1) bits of the field. */ (uint8[6],), # /* Reserved, must be zero. */ name = 'nx_action_stack', base = nx_action, classifyby = (NXAST_STACK_PUSH, NXAST_STACK_POP), criteria = lambda x: getattr(x, action_subtype) == NXAST_STACK_PUSH or getattr(x, action_subtype) == NXAST_STACK_POP, init = packvalue(NXAST_STACK_PUSH, action_subtype), formatter = _createdesc(lambda x: '%s:%s[%d..%d]' % ('push' if x[action_subtype] == 'NXAST_STACK_PUSH' else 'pop', x['field'], x['offset'], (x['offset'] + x['n_bits'] - 1))) ) namespace['nx_action_stack'] = nx_action_stack ''' /* Action structure for NXAST_NOTE. * * This action has no effect. It is variable length. The switch does not * attempt to interpret the user-defined 'note' data in any way. A controller * can use this action to attach arbitrary metadata to a flow. * * This action might go away in the future. */ ''' nx_action_note = nstruct( (varchr, 'note'), name = 'nx_action_note', base = nx_action, classifyby = (NXAST_NOTE,), criteria = lambda x: getattr(x, action_subtype) == NXAST_NOTE, init = packvalue(NXAST_NOTE, action_subtype) ) namespace['nx_action_note'] = nx_action_note ''' /* Action structure for NXAST_MULTIPATH. * * This action performs the following steps in sequence: * * 1. Hashes the fields designated by 'fields', one of NX_HASH_FIELDS_*. * Refer to the definition of "enum nx_mp_fields" for details. * * The 'basis' value is used as a universal hash parameter, that is, * different values of 'basis' yield different hash functions. The * particular universal hash function used is implementation-defined. * * The hashed fields' values are drawn from the current state of the * flow, including all modifications that have been made by actions up to * this point. * * 2. Applies the multipath link choice algorithm specified by 'algorithm', * one of NX_MP_ALG_*. Refer to the definition of "enum nx_mp_algorithm" * for details. * * The output of the algorithm is 'link', an unsigned integer less than * or equal to 'max_link'. * * Some algorithms use 'arg' as an additional argument. * * 3. Stores 'link' in dst[ofs:ofs+n_bits]. The format and semantics of * 'dst' and 'ofs_nbits' are similar to those for the NXAST_REG_LOAD * action. * * The switch will reject actions that have an unknown 'fields', or an unknown * 'algorithm', or in which ofs+n_bits is greater than the width of 'dst', or * in which 'max_link' is greater than or equal to 2**n_bits, with error type * OFPET_BAD_ACTION, code OFPBAC_BAD_ARGUMENT. */ ''' nx_action_multipath = nstruct( #/* What fields to hash and how. */ (nx_hash_fields, 'fields'), # /* One of NX_HASH_FIELDS_*. */ (uint16, 'basis'), # /* Universal hash parameter. */ (uint16,), #/* Multipath link choice algorithm to apply to hash value. */ (nx_mp_algorithm, 'algorithm'), # /* One of NX_MP_ALG_*. */ (uint16, 'max_link'), # /* Number of output links, minus 1. */ (uint32, 'arg'), # /* Algorithm-specific argument. */ (uint16,), # /* Where to store the result. */ (uint16, 'ofs_nbits'), # /* (ofs << 6) | (n_bits - 1). */ (nxm_header, 'dst'), # /* Destination. */ name = 'nx_action_multipath', base = nx_action, classifyby = (NXAST_MULTIPATH,), criteria = lambda x: getattr(x, action_subtype) == NXAST_MULTIPATH, init = packvalue(NXAST_MULTIPATH, action_subtype), formatter = _createdesc(lambda x: 'multipath(%s,%d,%s,%d,%d,%s[%d..%d])' % (x['fields'], x['basis'], x['algorithm'],x['max_link'] + 1, x['arg'], x['dst'], x['ofs_nbits'] >> 6, (x['ofs_nbits'] >> 6) + (x['ofs_nbits'] & 0x3f))) ) namespace['nx_action_multipath'] = nx_action_multipath ''' /* Action structure for NXAST_LEARN. * * This action adds or modifies a flow in an OpenFlow table, similar to * OFPT_FLOW_MOD with OFPFC_MODIFY_STRICT as 'command'. The new flow has the * specified idle timeout, hard timeout, priority, cookie, and flags. The new * flow's match criteria and actions are built by applying each of the series * of flow_mod_spec elements included as part of the action. * * A flow_mod_spec starts with a 16-bit header. A header that is all-bits-0 is * a no-op used for padding the action as a whole to a multiple of 8 bytes in * length. Otherwise, the flow_mod_spec can be thought of as copying 'n_bits' * bits from a source to a destination. In this case, the header contains * multiple fields: * * 15 14 13 12 11 10 0 * +------+---+------+---------------------------------+ * | 0 |src| dst | n_bits | * +------+---+------+---------------------------------+ * * The meaning and format of a flow_mod_spec depends on 'src' and 'dst'. The * following table summarizes the meaning of each possible combination. * Details follow the table: * * src dst meaning * --- --- ---------------------------------------------------------- * 0 0 Add match criteria based on value in a field. * 1 0 Add match criteria based on an immediate value. * 0 1 Add NXAST_REG_LOAD action to copy field into a different field. * 1 1 Add NXAST_REG_LOAD action to load immediate value into a field. * 0 2 Add OFPAT_OUTPUT action to output to port from specified field. * All other combinations are undefined and not allowed. * * The flow_mod_spec header is followed by a source specification and a * destination specification. The format and meaning of the source * specification depends on 'src': * * - If 'src' is 0, the source bits are taken from a field in the flow to * which this action is attached. (This should be a wildcarded field. If * its value is fully specified then the source bits being copied have * constant values.) * * The source specification is an ovs_be32 'field' and an ovs_be16 'ofs'. * 'field' is an nxm_header with nxm_hasmask=0, and 'ofs' the starting bit * offset within that field. The source bits are field[ofs:ofs+n_bits-1]. * 'field' and 'ofs' are subject to the same restrictions as the source * field in NXAST_REG_MOVE. * * - If 'src' is 1, the source bits are a constant value. The source * specification is (n_bits+15)/16*2 bytes long. Taking those bytes as a * number in network order, the source bits are the 'n_bits' * least-significant bits. The switch will report an error if other bits * in the constant are nonzero. * * The flow_mod_spec destination specification, for 'dst' of 0 or 1, is an * ovs_be32 'field' and an ovs_be16 'ofs'. 'field' is an nxm_header with * nxm_hasmask=0 and 'ofs' is a starting bit offset within that field. The * meaning of the flow_mod_spec depends on 'dst': * * - If 'dst' is 0, the flow_mod_spec specifies match criteria for the new * flow. The new flow matches only if bits field[ofs:ofs+n_bits-1] in a * packet equal the source bits. 'field' may be any nxm_header with * nxm_hasmask=0 that is allowed in NXT_FLOW_MOD. * * Order is significant. Earlier flow_mod_specs must satisfy any * prerequisites for matching fields specified later, by copying constant * values into prerequisite fields. * * The switch will reject flow_mod_specs that do not satisfy NXM masking * restrictions. * * - If 'dst' is 1, the flow_mod_spec specifies an NXAST_REG_LOAD action for * the new flow. The new flow copies the source bits into * field[ofs:ofs+n_bits-1]. Actions are executed in the same order as the * flow_mod_specs. * * A single NXAST_REG_LOAD action writes no more than 64 bits, so n_bits * greater than 64 yields multiple NXAST_REG_LOAD actions. * * The flow_mod_spec destination spec for 'dst' of 2 (when 'src' is 0) is * empty. It has the following meaning: * * - The flow_mod_spec specifies an OFPAT_OUTPUT action for the new flow. * The new flow outputs to the OpenFlow port specified by the source field. * Of the special output ports with value OFPP_MAX or larger, OFPP_IN_PORT, * OFPP_FLOOD, OFPP_LOCAL, and OFPP_ALL are supported. Other special ports * may not be used. * * Resource Management * ------------------- * * A switch has a finite amount of flow table space available for learning. * When this space is exhausted, no new learning table entries will be learned * until some existing flow table entries expire. The controller should be * prepared to handle this by flooding (which can be implemented as a * low-priority flow). * * If a learned flow matches a single TCP stream with a relatively long * timeout, one may make the best of resource constraints by setting * 'fin_idle_timeout' or 'fin_hard_timeout' (both measured in seconds), or * both, to shorter timeouts. When either of these is specified as a nonzero * value, OVS adds a NXAST_FIN_TIMEOUT action, with the specified timeouts, to * the learned flow. * * Examples * -------- * * The following examples give a prose description of the flow_mod_specs along * with informal notation for how those would be represented and a hex dump of * the bytes that would be required. * * These examples could work with various nx_action_learn parameters. Typical * values would be idle_timeout=OFP_FLOW_PERMANENT, hard_timeout=60, * priority=OFP_DEFAULT_PRIORITY, flags=0, table_id=10. * * 1. Learn input port based on the source MAC, with lookup into * NXM_NX_REG1[16:31] by resubmit to in_port=99: * * Match on in_port=99: * ovs_be16(src=1, dst=0, n_bits=16), 20 10 * ovs_be16(99), 00 63 * ovs_be32(NXM_OF_IN_PORT), ovs_be16(0) 00 00 00 02 00 00 * * Match Ethernet destination on Ethernet source from packet: * ovs_be16(src=0, dst=0, n_bits=48), 00 30 * ovs_be32(NXM_OF_ETH_SRC), ovs_be16(0) 00 00 04 06 00 00 * ovs_be32(NXM_OF_ETH_DST), ovs_be16(0) 00 00 02 06 00 00 * * Set NXM_NX_REG1[16:31] to the packet's input port: * ovs_be16(src=0, dst=1, n_bits=16), 08 10 * ovs_be32(NXM_OF_IN_PORT), ovs_be16(0) 00 00 00 02 00 00 * ovs_be32(NXM_NX_REG1), ovs_be16(16) 00 01 02 04 00 10 * * Given a packet that arrived on port A with Ethernet source address B, * this would set up the flow "in_port=99, dl_dst=B, * actions=load:A->NXM_NX_REG1[16..31]". * * In syntax accepted by ovs-ofctl, this action is: learn(in_port=99, * NXM_OF_ETH_DST[]=NXM_OF_ETH_SRC[], * load:NXM_OF_IN_PORT[]->NXM_NX_REG1[16..31]) * * 2. Output to input port based on the source MAC and VLAN VID, with lookup * into NXM_NX_REG1[16:31]: * * Match on same VLAN ID as packet: * ovs_be16(src=0, dst=0, n_bits=12), 00 0c * ovs_be32(NXM_OF_VLAN_TCI), ovs_be16(0) 00 00 08 02 00 00 * ovs_be32(NXM_OF_VLAN_TCI), ovs_be16(0) 00 00 08 02 00 00 * * Match Ethernet destination on Ethernet source from packet: * ovs_be16(src=0, dst=0, n_bits=48), 00 30 * ovs_be32(NXM_OF_ETH_SRC), ovs_be16(0) 00 00 04 06 00 00 * ovs_be32(NXM_OF_ETH_DST), ovs_be16(0) 00 00 02 06 00 00 * * Output to the packet's input port: * ovs_be16(src=0, dst=2, n_bits=16), 10 10 * ovs_be32(NXM_OF_IN_PORT), ovs_be16(0) 00 00 00 02 00 00 * * Given a packet that arrived on port A with Ethernet source address B in * VLAN C, this would set up the flow "dl_dst=B, vlan_vid=C, * actions=output:A". * * In syntax accepted by ovs-ofctl, this action is: * learn(NXM_OF_VLAN_TCI[0..11], NXM_OF_ETH_DST[]=NXM_OF_ETH_SRC[], * output:NXM_OF_IN_PORT[]) * * 3. Here's a recipe for a very simple-minded MAC learning switch. It uses a * 10-second MAC expiration time to make it easier to see what's going on * * ovs-vsctl del-controller br0 * ovs-ofctl del-flows br0 * ovs-ofctl add-flow br0 "table=0 actions=learn(table=1, \ hard_timeout=10, NXM_OF_VLAN_TCI[0..11], \ NXM_OF_ETH_DST[]=NXM_OF_ETH_SRC[], \ output:NXM_OF_IN_PORT[]), resubmit(,1)" * ovs-ofctl add-flow br0 "table=1 priority=0 actions=flood" * * You can then dump the MAC learning table with: * * ovs-ofctl dump-flows br0 table=1 * * Usage Advice * ------------ * * For best performance, segregate learned flows into a table that is not used * for any other flows except possibly for a lowest-priority "catch-all" flow * (a flow with no match criteria). If different learning actions specify * different match criteria, use different tables for the learned flows. * * The meaning of 'hard_timeout' and 'idle_timeout' can be counterintuitive. * These timeouts apply to the flow that is added, which means that a flow with * an idle timeout will expire when no traffic has been sent *to* the learned * address. This is not usually the intent in MAC learning; instead, we want * the MAC learn entry to expire when no traffic has been sent *from* the * learned address. Use a hard timeout for that. */ ''' def _nx_flow_mod_spec_formatter(x): if NX_FLOWMODSPEC_SRC(x['header']): srcdesc = '0x' + ''.join('%02x' % (c,) for c in bytearray(x['value'])) else: srcdesc = '%s[%d..%d]' % (x['src'], x['src_ofs'], x['src_ofs'] + NX_FLOWMODSPEC_NBITS(x['header']) - 1) dstv = NX_FLOWMODSPEC_DST(x['header']) if dstv != NX_LEARN_DST_OUTPUT: dstdesc = '%s[%d..%d]' % (x['dst'], x['dst_ofs'], x['dst_ofs'] + NX_FLOWMODSPEC_NBITS(x['header']) - 1) if dstv == NX_LEARN_DST_MATCH: x['_desc'] = '%s=%s' % (dstdesc, srcdesc) elif dstv == NX_LEARN_DST_LOAD: x['_desc'] = 'load:%s->%s' % (srcdesc, dstdesc) elif NX_FLOWMODSPEC_SRC(x['header']): x['_desc'] = 'output:%s' % nxm_port_no_raw.formatter(common.create_binary(x['value'], 2)) else: x['_desc'] = 'output:%s' % (srcdesc,) x['header'] = nx_flow_mod_spec_header.formatter(x['header']) return x nx_flow_mod_spec = nstruct( (uint16, 'header'), (_nx_flow_mod_spec_src,), (_nx_flow_mod_spec_dst,), name = 'nx_flow_mod_spec', padding = 1, formatter = _nx_flow_mod_spec_formatter, lastextra = False # if x.header == 0, size is 14, the padding should not be so large so it will not be successfully parsed ) namespace['nx_flow_mod_spec'] = nx_flow_mod_spec def create_nxfms_matchfield(src, dst, src_ofs = 0, dst_ofs = 0, n_bits = None): if n_bits is None: n_bits = min(NXM_LENGTH(dst) * 8 - dst_ofs, NXM_LENGTH(src) * 8 - src_ofs) if n_bits <= 0: raise ValueError('Cannot create flow mod spec with 0 bits') return nx_flow_mod_spec.parse(_create_header(NX_LEARN_SRC_FIELD, NX_LEARN_DST_MATCH, n_bits) + _create_field(src, src_ofs) + _create_field(dst, dst_ofs))[0] namespace['create_nxfms_matchfield'] = create_nxfms_matchfield def create_nxfms_matchvalue(dst, value, dst_ofs, n_bits = None): if n_bits is None: n_bits = NXM_LENGTH(dst) * 8 - dst_ofs if n_bits <= 0: raise ValueError('Cannot create flow mod spec with 0 bits') return nx_flow_mod_spec.parse(_create_header(NX_LEARN_SRC_IMMEDIATE, NX_LEARN_DST_MATCH, n_bits) + common.create_binary(value, (n_bits + 15) // 16 * 2) + _create_field(dst, dst_ofs))[0] namespace['create_nxfms_matchvalue'] = create_nxfms_matchvalue def create_nxfms_loadfield(src, dst, src_ofs = 0, dst_ofs = 0, n_bits = None): if n_bits is None: n_bits = min(NXM_LENGTH(dst) * 8 - dst_ofs, NXM_LENGTH(src) * 8 - src_ofs) if n_bits <= 0: raise ValueError('Cannot create flow mod spec with 0 bits') return nx_flow_mod_spec.parse(_create_header(NX_LEARN_SRC_FIELD, NX_LEARN_DST_LOAD, n_bits) + _create_field(src, src_ofs) + _create_field(dst, dst_ofs))[0] namespace['create_nxfms_loadfield'] = create_nxfms_loadfield def create_nxfms_loadvalue(dst, value, dst_ofs, n_bits = None): if n_bits is None: n_bits = NXM_LENGTH(dst) * 8 - dst_ofs if n_bits <= 0: raise ValueError('Cannot create flow mod spec with 0 bits') return nx_flow_mod_spec.parse(_create_header(NX_LEARN_SRC_IMMEDIATE, NX_LEARN_DST_LOAD, n_bits) + common.create_binary(value, (n_bits + 15) // 16 * 2) + _create_field(dst, dst_ofs))[0] namespace['create_nxfms_loadvalue'] = create_nxfms_loadvalue def create_nxfms_outputfield(src, src_ofs = 0, n_bits = None): if n_bits is None: n_bits = NXM_LENGTH(src) * 8 - src_ofs if n_bits <= 0: raise ValueError('Cannot create flow mod spec with 0 bits') return nx_flow_mod_spec.parse(_create_header(NX_LEARN_SRC_FIELD, NX_LEARN_DST_OUTPUT, n_bits) + _create_field(src, src_ofs))[0] namespace['create_nxfms_outputfield'] = create_nxfms_outputfield def create_nxfms_outputvalue(dst, value): return nx_flow_mod_spec.parse(_create_header(NX_LEARN_SRC_IMMEDIATE, NX_LEARN_DST_OUTPUT, 16) + common.create_binary(value, 2))[0] namespace['create_nxfms_outputvalue'] = create_nxfms_outputvalue ofp_flow_mod_flags = namespace['ofp_flow_mod_flags'] nx_action_learn = nstruct( (uint16, 'idle_timeout'), # /* Idle time before discarding (seconds). */ (uint16, 'hard_timeout'), # /* Max time before discarding (seconds). */ (uint16, 'priority'), # /* Priority level of flow entry. */ (uint64, 'cookie'), # /* Cookie for new flow. */ (ofp_flow_mod_flags, 'flags'), # /* Either 0 or OFPFF_SEND_FLOW_REM. */ (uint8, 'table_id'), # /* Table to insert flow entry. */ (uint8,), # /* Must be zero. */ (uint16, 'fin_idle_timeout'),# /* Idle timeout after FIN, if nonzero. */ (uint16, 'fin_hard_timeout'),# /* Hard timeout after FIN, if nonzero. */ (nx_flow_mod_spec[0], 'specs'), base = nx_action, name = 'nx_action_learn', classifyby = (NXAST_LEARN,), criteria = lambda x: getattr(x, action_subtype) == NXAST_LEARN, init = packvalue(NXAST_LEARN, action_subtype), ) namespace['nx_action_learn'] = nx_action_learn ''' /* Action structure for NXAST_FIN_TIMEOUT. * * This action changes the idle timeout or hard timeout, or both, of this * OpenFlow rule when the rule matches a TCP packet with the FIN or RST flag. * When such a packet is observed, the action reduces the rule's idle timeout * to 'fin_idle_timeout' and its hard timeout to 'fin_hard_timeout'. This * action has no effect on an existing timeout that is already shorter than the * one that the action specifies. A 'fin_idle_timeout' or 'fin_hard_timeout' * of zero has no effect on the respective timeout. * * 'fin_idle_timeout' and 'fin_hard_timeout' are measured in seconds. * 'fin_hard_timeout' specifies time since the flow's creation, not since the * receipt of the FIN or RST. * * This is useful for quickly discarding learned TCP flows that otherwise will * take a long time to expire. * * This action is intended for use with an OpenFlow rule that matches only a * single TCP flow. If the rule matches multiple TCP flows (e.g. it wildcards * all TCP traffic, or all TCP traffic to a particular port), then any FIN or * RST in any of those flows will cause the entire OpenFlow rule to expire * early, which is not normally desirable. */ ''' nx_action_fin_timeout = nstruct( (uint16, 'fin_idle_timeout'), # /* New idle timeout, if nonzero. */ (uint16, 'fin_hard_timeout'), # /* New hard timeout, if nonzero. */ (uint16,), base = nx_action, name = 'nx_action_fin_timeout', criteria = lambda x: getattr(x, action_subtype) == NXAST_FIN_TIMEOUT, classifyby = (NXAST_FIN_TIMEOUT,), init = packvalue(NXAST_FIN_TIMEOUT, action_subtype) ) namespace['nx_action_fin_timeout'] = nx_action_fin_timeout ''' /* Action structure for NXAST_BUNDLE and NXAST_BUNDLE_LOAD. * * The bundle actions choose a slave from a supplied list of options. * NXAST_BUNDLE outputs to its selection. NXAST_BUNDLE_LOAD writes its * selection to a register. * * The list of possible slaves follows the nx_action_bundle structure. The size * of each slave is governed by its type as indicated by the 'slave_type' * parameter. The list of slaves should be padded at its end with zeros to make * the total length of the action a multiple of 8. * * Switches infer from the 'slave_type' parameter the size of each slave. All * implementations must support the NXM_OF_IN_PORT 'slave_type' which indicates * that the slaves are OpenFlow port numbers with NXM_LENGTH(NXM_OF_IN_PORT) == * 2 byte width. Switches should reject actions which indicate unknown or * unsupported slave types. * * Switches use a strategy dictated by the 'algorithm' parameter to choose a * slave. If the switch does not support the specified 'algorithm' parameter, * it should reject the action. * * Several algorithms take into account liveness when selecting slaves. The * liveness of a slave is implementation defined (with one exception), but will * generally take into account things like its carrier status and the results * of any link monitoring protocols which happen to be running on it. In order * to give controllers a place-holder value, the OFPP_NONE port is always * considered live. * * Some slave selection strategies require the use of a hash function, in which * case the 'fields' and 'basis' parameters should be populated. The 'fields' * parameter (one of NX_HASH_FIELDS_*) designates which parts of the flow to * hash. Refer to the definition of "enum nx_hash_fields" for details. The * 'basis' parameter is used as a universal hash parameter. Different values * of 'basis' yield different hash results. * * The 'zero' parameter at the end of the action structure is reserved for * future use. Switches are required to reject actions which have nonzero * bytes in the 'zero' field. * * NXAST_BUNDLE actions should have 'ofs_nbits' and 'dst' zeroed. Switches * should reject actions which have nonzero bytes in either of these fields. * * NXAST_BUNDLE_LOAD stores the OpenFlow port number of the selected slave in * dst[ofs:ofs+n_bits]. The format and semantics of 'dst' and 'ofs_nbits' are * similar to those for the NXAST_REG_LOAD action. */ ''' nx_action_bundle = nstruct( # /* Slave choice algorithm to apply to hash value. */ (nx_bd_algorithm, 'algorithm'), # /* One of NX_BD_ALG_*. */ # /* What fields to hash and how. */ (nx_hash_fields, 'fields'), # /* One of NX_HASH_FIELDS_*. */ (uint16, 'basis'), # /* Universal hash parameter. */ (nxm_header, 'slave_type'), # /* NXM_OF_IN_PORT. */ (uint16, 'n_slaves'), # /* Number of slaves. */ (uint16, 'ofs_nbits'), # /* (ofs << 6) | (n_bits - 1). */ (nxm_header, 'dst'), # /* Destination. */ (uint8[4],), # /* Reserved. Must be zero. */ name = 'nx_action_bundle', base = nx_action, criteria = lambda x: getattr(x, action_subtype) == NXAST_BUNDLE or getattr(x, action_subtype) == NXAST_BUNDLE_LOAD, classifyby = (NXAST_BUNDLE, NXAST_BUNDLE_LOAD), init = packvalue(NXAST_BUNDLE, action_subtype) ) namespace['nx_action_bundle'] = nx_action_bundle def _nx_slave_ports_prepack(x): x.n_slaves = len(x.bundles) _nx_slave_ports = nstruct( (nx_port_no[0], 'bundles'), name = '_nx_slave_ports', size = lambda x: x.n_slaves * 2, prepack = _nx_slave_ports_prepack, padding = 1 ) nx_action_bundle_port = nstruct( (_nx_slave_ports,), base = nx_action_bundle, name = 'nx_action_bundle_port', criteria = lambda x: x.slave_type == NXM_OF_IN_PORT, init = packvalue(NXM_OF_IN_PORT, 'slave_type'), lastextra = False, formatter = _createdesc(lambda x: 'bundle_load(%s,%d,%s,%s,%s[%d..%d],slaves:%r)' % \ (x['fields'], x['basis'], x['algorithm'], x['slave_type'], x['dst'], x['ofs_nbits'] >> 6, (x['ofs_nbits'] >> 6) + (x['ofs_nbits'] & 0x3f), x['bundles']) \ if x[action_subtype] == 'NXAST_BUNDLE_LOAD' else 'bundle(%s,%d,%s,%s,slaves:%r)' % (x['fields'], x['basis'], x['algorithm'], x['slave_type'], x['bundles'])) ) namespace['nx_action_bundle_port'] = nx_action_bundle_port def _nx_slave_others_prepack(x): x.n_slaves = len(x.bundlesraw) // NXM_LENGTH(x.slave_type) _nx_slave_others = nstruct( (raw, 'bundlesraw'), name = '_nx_slave_others', size = lambda x: x.n_slaves * NXM_LENGTH(x.slave_type), prepack = _nx_slave_others_prepack, padding = 1 ) nx_action_bundle_others = nstruct( (_nx_slave_others,), base = nx_action_bundle, name = 'nx_action_bundle_others', criteria = lambda x: x.slave_type != NXM_OF_IN_PORT, lastextra = False, init = packvalue(NXM_OF_ETH_DST, 'slave_type'), formatter = _createdesc(lambda x: 'bundle_load(%s,%d,%s,%s,%s[%d..%d],slaves:%r)' % \ (x['fields'], x['basis'], x['algorithm'], x['slave_type'], x['dst'], x['ofs_nbits'] >> 6, (x['ofs_nbits'] >> 6) + (x['ofs_nbits'] & 0x3f), x['bundleraw']) \ if x[action_subtype] == 'NXAST_BUNDLE_LOAD' else 'bundle(%s,%d,%s,%s,slaves:%r)' % (x['fields'], x['basis'], x['algorithm'], x['slave_type'], x['bundleraw'])) ) namespace['nx_action_bundle_others'] = nx_action_bundle_others ''' /* Action structure for NXAST_DEC_TTL_CNT_IDS. * * If the packet is not IPv4 or IPv6, does nothing. For IPv4 or IPv6, if the * TTL or hop limit is at least 2, decrements it by 1. Otherwise, if TTL or * hop limit is 0 or 1, sends a packet-in to the controllers with each of the * 'n_controllers' controller IDs specified in 'cnt_ids'. * * (This differs from NXAST_DEC_TTL in that for NXAST_DEC_TTL the packet-in is * sent only to controllers with id 0.) */ ''' def _nx_action_cnt_ids_ids_prepack(x): x.n_controllers = len(x.cnt_ids) _nx_action_cnt_ids_ids = nstruct( (uint16[0], 'cnt_ids'), name = '_nx_action_cnt_ids_ids', size = lambda x: 2 * x.n_controllers, prepack = _nx_action_cnt_ids_ids_prepack ) nx_action_cnt_ids = nstruct( (uint16, 'n_controllers'), # /* Number of controllers. */ (uint8[4],), # /* Must be zero. */ (_nx_action_cnt_ids_ids,), base = nx_action, classifyby = (NXAST_DEC_TTL_CNT_IDS,), criteria = lambda x: getattr(x, action_subtype) == NXAST_DEC_TTL_CNT_IDS, init = packvalue(NXAST_DEC_TTL_CNT_IDS, action_subtype), lastextra = False, name = 'nx_action_cnt_ids' ) namespace['nx_action_cnt_ids'] = nx_action_cnt_ids ''' /* Action structure for NXAST_OUTPUT_REG. * * Outputs to the OpenFlow port number written to src[ofs:ofs+nbits]. * * The format and semantics of 'src' and 'ofs_nbits' are similar to those for * the NXAST_REG_LOAD action. * * The acceptable nxm_header values for 'src' are the same as the acceptable * nxm_header values for the 'src' field of NXAST_REG_MOVE. * * The 'max_len' field indicates the number of bytes to send when the chosen * port is OFPP_CONTROLLER. Its semantics are equivalent to the 'max_len' * field of OFPAT_OUTPUT. * * The 'zero' field is required to be zeroed for forward compatibility. */ ''' nx_action_output_reg = nstruct( (uint16, 'ofs_nbits'), # /* (ofs << 6) | (n_bits - 1). */ (nxm_header, 'src'), # /* Source. */ (uint16, 'max_len'), # /* Max length to send to controller. */ (uint8[6],), # /* Reserved, must be zero. */ base = nx_action, classifyby = (NXAST_OUTPUT_REG,), criteria = lambda x: getattr(x, action_subtype) == NXAST_OUTPUT_REG, init = packvalue(NXAST_OUTPUT_REG, action_subtype), name = 'nx_action_output_reg', formatter = _createdesc(lambda x: 'output:%s[%d..%d]' % (x['src'], x['ofs_nbits'] >> 6, (x['ofs_nbits'] >> 6) + (x['ofs_nbits'] & 0x3f))) ) namespace['nx_action_output_reg'] = nx_action_output_reg ''' /* NXAST_EXIT * * Discontinues action processing. * * The NXAST_EXIT action causes the switch to immediately halt processing * actions for the flow. Any actions which have already been processed are * executed by the switch. However, any further actions, including those which * may be in different tables, or different levels of the NXAST_RESUBMIT * hierarchy, will be ignored. * * Uses the nx_action_header structure. */ /* ## --------------------- ## */ /* ## Requests and replies. ## */ /* ## --------------------- ## */ ''' ''' /* NXT_SET_FLOW_FORMAT request. */ ''' nx_set_flow_format = nstruct( (nx_flow_format, 'format'), # /* One of NXFF_*. */ name = 'nx_set_flow_format', base = nicira_header, criteria = lambda x: getattr(x, msg_subtype) == NXT_SET_FLOW_FORMAT, classifyby = (NXT_SET_FLOW_FORMAT,), init = packvalue(NXT_SET_FLOW_FORMAT, msg_subtype) ) namespace['nx_set_flow_format'] = nx_set_flow_format ''' /* NXT_FLOW_MOD (analogous to OFPT_FLOW_MOD). * * It is possible to limit flow deletions and modifications to certain * cookies by using the NXM_NX_COOKIE(_W) matches. The "cookie" field * is used only to add or modify flow cookies. */ ''' ofp_flow_mod_command = namespace['ofp_flow_mod_command'] nx_flow_mod = nstruct( (uint64, 'cookie'), # /* Opaque controller-issued identifier. */ (ofp_flow_mod_command, 'command'), # /* OFPFC_* + possibly a table ID (see comment # * on struct nx_flow_mod_table_id). */ (uint16, 'idle_timeout'), # /* Idle time before discarding (seconds). */ (uint16, 'hard_timeout'), # /* Max time before discarding (seconds). */ (uint16, 'priority'), # /* Priority level of flow entry. */ (uint32, 'buffer_id'), # /* Buffered packet to apply to (or -1). # Not meaningful for OFPFC_DELETE*. */ (nx_port_no, 'out_port'), # /* For OFPFC_DELETE* commands, require # matching entries to include this as an # output port. A value of OFPP_NONE # indicates no restriction. */ (ofp_flow_mod_flags, 'flags'), # /* One of OFPFF_*. */ (uint16, 'match_len'), # /* Size of nx_match. */ (uint8[6],), # /* Align to 64-bits. */ (nx_matches,), base = nicira_header, criteria = lambda x: getattr(x, msg_subtype) == NXT_FLOW_MOD, classifyby = (NXT_FLOW_MOD,), init = packvalue(NXT_FLOW_MOD, msg_subtype), name = 'nx_flow_mod' ) namespace['nx_flow_mod'] = nx_flow_mod ''' /* NXT_FLOW_REMOVED (analogous to OFPT_FLOW_REMOVED). * * 'table_id' is present only in Open vSwitch 1.11 and later. In earlier * versions of Open vSwitch, this is a padding byte that is always zeroed. * Therefore, a 'table_id' value of 0 indicates that the table ID is not known, * and other values may be interpreted as one more than the flow's former table * ID. */ ''' nx_flow_removed = nstruct( (uint64, 'cookie'), # /* Opaque controller-issued identifier. */ (uint16, 'priority'), # /* Priority level of flow entry. */ (ofp_flow_removed_reason, 'reason'), # /* One of OFPRR_*. */ (uint8, 'table_id'), # /* Flow's former table ID, plus one. */ (uint32, 'duration_sec'), # /* Time flow was alive in seconds. */ (uint32, 'duration_nsec'), # /* Time flow was alive in nanoseconds beyond # duration_sec. */ (uint16, 'idle_timeout'), # /* Idle timeout from original flow mod. */ (uint16, 'match_len'), # /* Size of nx_match. */ (uint64, 'packet_count'), (uint64, 'byte_count'), (nx_matches,), base = nicira_header, criteria = lambda x: getattr(x, msg_subtype) == NXT_FLOW_REMOVED, classifyby = (NXT_FLOW_REMOVED,), init = packvalue(NXT_FLOW_REMOVED, msg_subtype), name = 'nx_flow_removed' ) namespace['nx_flow_removed'] = nx_flow_removed ''' /* Nicira vendor stats request of type NXST_FLOW (analogous to OFPST_FLOW * request). * * It is possible to limit matches to certain cookies by using the * NXM_NX_COOKIE and NXM_NX_COOKIE_W matches. */ ''' nx_flow_stats_request = nstruct( (nx_port_no, 'out_port'), #/* Require matching entries to include this # as an output port. A value of OFPP_NONE # indicates no restriction. */ (uint16, 'match_len'), # /* Length of nx_match. */ (uint8, 'table_id'), # /* ID of table to read (from ofp_table_stats) # or 0xff for all tables. */ (uint8[3],), # /* Align to 64 bits. */ (nx_matches,), base = nx_stats_request, criteria = lambda x: getattr(x, stats_subtype) == NXST_FLOW, classifyby = (NXST_FLOW,), init = packvalue(NXST_FLOW, stats_subtype), name = 'nx_flow_stats_request' ) namespace['nx_flow_stats_request'] = nx_flow_stats_request ''' /* Body for Nicira vendor stats reply of type NXST_FLOW (analogous to * OFPST_FLOW reply). * * The values of 'idle_age' and 'hard_age' are only meaningful when talking to * a switch that implements the NXT_FLOW_AGE extension. Zero means that the * true value is unknown, perhaps because hardware does not track the value. * (Zero is also the value that one should ordinarily expect to see talking to * a switch that does not implement NXT_FLOW_AGE, since those switches zero the * padding bytes that these fields replaced.) A nonzero value X represents X-1 * seconds. A value of 65535 represents 65534 or more seconds. * * 'idle_age' is the number of seconds that the flow has been idle, that is, * the number of seconds since a packet passed through the flow. 'hard_age' is * the number of seconds since the flow was last modified (e.g. OFPFC_MODIFY or * OFPFC_MODIFY_STRICT). (The 'duration_*' fields are the elapsed time since * the flow was added, regardless of subsequent modifications.) * * For a flow with an idle or hard timeout, 'idle_age' or 'hard_age', * respectively, will ordinarily be smaller than the timeout, but flow * expiration times are only approximate and so one must be prepared to * tolerate expirations that occur somewhat early or late. */ ''' ofp_action = namespace['ofp_action'] nx_flow_stats = nstruct( (uint16, 'length'), # /* Length of this entry. */ (uint8, 'table_id'), # /* ID of table flow came from. */ (uint8,), (uint32, 'duration_sec'), # /* Time flow has been alive in seconds. */ (uint32, 'duration_nsec'), # /* Time flow has been alive in nanoseconds # beyond duration_sec. */ (uint16, 'priority'), # /* Priority of the entry. */ (uint16, 'idle_timeout'), # /* Number of seconds idle before expiration. */ (uint16, 'hard_timeout'), # /* Number of seconds before expiration. */ (uint16, 'match_len'), # /* Length of nx_match. */ (uint16, 'idle_age'), # /* Seconds since last packet, plus one. */ (uint16, 'hard_age'), # /* Seconds since last modification, plus one. */ (uint64, 'cookie'), # /* Opaque controller-issued identifier. */ (uint64, 'packet_count'), # /* Number of packets, UINT64_MAX if unknown. */ (uint64, 'byte_count'), # /* Number of bytes, UINT64_MAX if unknown. */ #======================================================================= # /* Followed by: # * - Exactly match_len (possibly 0) bytes containing the nx_match, then # * - Exactly (match_len + 7)/8*8 - match_len (between 0 and 7) bytes of # * all-zero bytes, then # * - Actions to fill out the remainder 'length' bytes (always a multiple # * of 8). # */ #======================================================================= (nx_matches,), (ofp_action[0], 'actions'), name = 'nx_flow_stats', size = sizefromlen(65536, 'length'), prepack = packsize('length') ) namespace['nx_flow_stats'] = nx_flow_stats nx_flow_stats_reply = nstruct( (nx_flow_stats[0], 'stats'), base = nx_stats_reply, classifyby = (NXST_FLOW,), criteria = lambda x: getattr(x, stats_subtype) == NXST_FLOW, init = packvalue(NXST_FLOW, stats_subtype), name = 'nx_flow_stats_reply' ) namespace['nx_flow_stats_reply'] = nx_flow_stats_reply ''' /* Nicira vendor stats request of type NXST_AGGREGATE (analogous to * OFPST_AGGREGATE request). * * The reply format is identical to the reply format for OFPST_AGGREGATE, * except for the header. */ ''' nx_aggregate_stats_request = nstruct( (nx_port_no, 'out_port'), # /* Require matching entries to include this # as an output port. A value of OFPP_NONE # indicates no restriction. */ (uint16, 'match_len'), # /* Length of nx_match. */ (uint8, 'table_id'), # /* ID of table to read (from ofp_table_stats) # or 0xff for all tables. */ (uint8[3],), # /* Align to 64 bits. */ #======================================================================= # /* Followed by: # * - Exactly match_len (possibly 0) bytes containing the nx_match, then # * - Exactly (match_len + 7)/8*8 - match_len (between 0 and 7) bytes of # * all-zero bytes, which must also exactly fill out the length of the # * message. # */ #======================================================================= (nx_matches,), base = nx_stats_request, name = 'nx_aggregate_stats_request', criteria = lambda x: getattr(x, stats_subtype) == NXST_AGGREGATE, classifyby = (NXST_AGGREGATE,), init = packvalue(NXST_AGGREGATE, stats_subtype), lastextra = False ) namespace['nx_aggregate_stats_request'] = nx_aggregate_stats_request nx_aggregate_stats_reply = nstruct( (uint64, 'packet_count'), # /* Number of packets in flows. */ (uint64, 'byte_count'), # /* Number of bytes in flows. */ (uint32, 'flow_count'), # /* Number of flows. */ (uint8[4],), base = nx_stats_reply, name = 'nx_aggregate_stats_reply', criteria = lambda x: getattr(x, stats_subtype) == NXST_AGGREGATE, classifyby = (NXST_AGGREGATE,), init = packvalue(NXST_AGGREGATE, stats_subtype) ) namespace['nx_aggregate_stats_reply'] = nx_aggregate_stats_reply ''' /* NXT_SET_CONTROLLER_ID. * * Each OpenFlow controller connection has a 16-bit identifier that is * initially 0. This message changes the connection's ID to 'id'. * * Controller connection IDs need not be unique. * * The NXAST_CONTROLLER action is the only current user of controller * connection IDs. */ ''' nx_controller_id = nstruct( (uint8[6],), # /* Must be zero. */ (uint16, 'controller_id'), # /* New controller connection ID. */ base = nicira_header, name = 'nx_controller_id', criteria = lambda x: getattr(x, msg_subtype) == NXT_SET_CONTROLLER_ID, init = packvalue(NXT_SET_CONTROLLER_ID, msg_subtype), classifyby = (NXT_SET_CONTROLLER_ID,) ) namespace['nx_controller_id'] = nx_controller_id ''' /* Action structure for NXAST_CONTROLLER. * * This generalizes using OFPAT_OUTPUT to send a packet to OFPP_CONTROLLER. In * addition to the 'max_len' that OFPAT_OUTPUT supports, it also allows * specifying: * * - 'reason': The reason code to use in the ofp_packet_in or nx_packet_in. * * - 'controller_id': The ID of the controller connection to which the * ofp_packet_in should be sent. The ofp_packet_in or nx_packet_in is * sent only to controllers that have the specified controller connection * ID. See "struct nx_controller_id" for more information. */ ''' nx_action_controller = nstruct( (uint16, 'max_len'), # /* Maximum length to send to controller. */ (uint16, 'controller_id'), # /* Controller ID to send packet-in. */ (ofp_packet_in_reason, 'reason'), # /* enum ofp_packet_in_reason (OFPR_*). */ (uint8,), base = nx_action, name = 'nx_action_controller', criteria = lambda x: getattr(x, action_subtype) == NXAST_CONTROLLER, classifyby = (NXAST_CONTROLLER,), init = packvalue(NXAST_CONTROLLER, action_subtype) ) namespace['nx_action_controller'] = nx_action_controller ''' /* Flow Table Monitoring * ===================== * * NXST_FLOW_MONITOR allows a controller to keep track of changes to OpenFlow * flow table(s) or subsets of them, with the following workflow: * * 1. The controller sends an NXST_FLOW_MONITOR request to begin monitoring * flows. The 'id' in the request must be unique among all monitors that * the controller has started and not yet canceled on this OpenFlow * connection. * * 2. The switch responds with an NXST_FLOW_MONITOR reply. If the request's * 'flags' included NXFMF_INITIAL, the reply includes all the flows that * matched the request at the time of the request (with event NXFME_ADDED). * If 'flags' did not include NXFMF_INITIAL, the reply is empty. * * The reply uses the xid of the request (as do all replies to OpenFlow * requests). * * 3. Whenever a change to a flow table entry matches some outstanding monitor * request's criteria and flags, the switch sends a notification to the * controller as an additional NXST_FLOW_MONITOR reply with xid 0. * * When multiple outstanding monitors match a single change, only a single * notification is sent. This merged notification includes the information * requested in any of the individual monitors. That is, if any of the * matching monitors requests actions (NXFMF_ACTIONS), the notification * includes actions, and if any of the monitors request full changes for the * controller's own changes (NXFMF_OWN), the controller's own changes will * be included in full. * * 4. The controller may cancel a monitor with NXT_FLOW_MONITOR_CANCEL. No * further notifications will be sent on the basis of the canceled monitor * afterward. * * * Buffer Management * ================= * * OpenFlow messages for flow monitor notifications can overflow the buffer * space available to the switch, either temporarily (e.g. due to network * conditions slowing OpenFlow traffic) or more permanently (e.g. the sustained * rate of flow table change exceeds the network bandwidth between switch and * controller). * * When Open vSwitch's notification buffer space reaches a limiting threshold, * OVS reacts as follows: * * 1. OVS sends an NXT_FLOW_MONITOR_PAUSED message to the controller, following * all the already queued notifications. After it receives this message, * the controller knows that its view of the flow table, as represented by * flow monitor notifications, is incomplete. * * 2. As long as the notification buffer is not empty: * * - NXMFE_ADD and NXFME_MODIFIED notifications will not be sent. * * - NXFME_DELETED notifications will still be sent, but only for flows * that existed before OVS sent NXT_FLOW_MONITOR_PAUSED. * * - NXFME_ABBREV notifications will not be sent. They are treated as * the expanded version (and therefore only the NXFME_DELETED * components, if any, are sent). * * 3. When the notification buffer empties, OVS sends NXFME_ADD notifications * for flows added since the buffer reached its limit and NXFME_MODIFIED * notifications for flows that existed before the limit was reached and * changed after the limit was reached. * * 4. OVS sends an NXT_FLOW_MONITOR_RESUMED message to the controller. After * it receives this message, the controller knows that its view of the flow * table, as represented by flow monitor notifications, is again complete. * * This allows the maximum buffer space requirement for notifications to be * bounded by the limit plus the maximum number of supported flows. * * * "Flow Removed" messages * ======================= * * The flow monitor mechanism is independent of OFPT_FLOW_REMOVED and * NXT_FLOW_REMOVED. Flow monitor updates for deletion are sent if * NXFMF_DELETE is set on a monitor, regardless of whether the * OFPFF_SEND_FLOW_REM flag was set when the flow was added. */ /* NXST_FLOW_MONITOR request. * * The NXST_FLOW_MONITOR request's body consists of an array of zero or more * instances of this structure. The request arranges to monitor the flows * that match the specified criteria, which are interpreted in the same way as * for NXST_FLOW. * * 'id' identifies a particular monitor for the purpose of allowing it to be * canceled later with NXT_FLOW_MONITOR_CANCEL. 'id' must be unique among * existing monitors that have not already been canceled. * * The reply includes the initial flow matches for monitors that have the * NXFMF_INITIAL flag set. No single flow will be included in the reply more * than once, even if more than one requested monitor matches that flow. The * reply will be empty if none of the monitors has NXFMF_INITIAL set or if none * of the monitors initially matches any flows. * * For NXFMF_ADD, an event will be reported if 'out_port' matches against the * actions of the flow being added or, for a flow that is replacing an existing * flow, if 'out_port' matches against the actions of the flow being replaced. * For NXFMF_DELETE, 'out_port' matches against the actions of a flow being * deleted. For NXFMF_MODIFY, an event will be reported if 'out_port' matches * either the old or the new actions. */ ''' ofp_table = namespace['ofp_table'] nx_flow_monitor_request = nstruct( (uint32, 'id'), # /* Controller-assigned ID for this monitor. */ (nx_flow_monitor_flags, 'flags'), # /* NXFMF_*. */ (nx_port_no, 'out_port'), # /* Required output port, if not OFPP_NONE. */ (uint16, 'match_len'), # /* Length of nx_match. */ (ofp_table, 'table_id'), # /* One table's ID or 0xff for all tables. */ (uint8[5],), # /* Align to 64 bits (must be zero). */ (nx_matches,), name = 'nx_flow_monitor_request', base = nx_stats_request, criteria = lambda x: getattr(x, stats_subtype) == NXST_FLOW_MONITOR, init = packvalue(NXST_FLOW_MONITOR, stats_subtype), classifyby = (NXST_FLOW_MONITOR,) ) namespace['nx_flow_monitor_request'] = nx_flow_monitor_request ''' /* NXST_FLOW_MONITOR reply header. * * The body of an NXST_FLOW_MONITOR reply is an array of variable-length * structures, each of which begins with this header. The 'length' member may * be used to traverse the array, and the 'event' member may be used to * determine the particular structure. * * Every instance is a multiple of 8 bytes long. */ ''' nx_flow_update = nstruct( (uint16, 'length'), #/* Length of this entry. */ (nx_flow_update_event, 'event'), # /* One of NXFME_*. */ name = 'nx_flow_update', size = sizefromlen(65536, 'length'), prepack = packsize('length') ) namespace['nx_flow_update'] = nx_flow_update ''' /* NXST_FLOW_MONITOR reply for NXFME_ADDED, NXFME_DELETED, and * NXFME_MODIFIED. */ ''' nx_flow_update_full = nstruct( (ofp_flow_removed_reason, 'reason'), # /* OFPRR_* for NXFME_DELETED, else zero. */ (uint16, 'priority'), # /* Priority of the entry. */ (uint16, 'idle_timeout'), # /* Number of seconds idle before expiration. */ (uint16, 'hard_timeout'), # /* Number of seconds before expiration. */ (uint16, 'match_len'), # /* Length of nx_match. */ (uint8, 'table_id'), # /* ID of flow's table. */ (uint8,), # /* Reserved, currently zeroed. */ (uint64, 'cookie'), # /* Opaque controller-issued identifier. */ #======================================================================= # /* Followed by: # * - Exactly match_len (possibly 0) bytes containing the nx_match, then # * - Exactly (match_len + 7)/8*8 - match_len (between 0 and 7) bytes of # * all-zero bytes, then # * - Actions to fill out the remainder 'length' bytes (always a multiple # * of 8). If NXFMF_ACTIONS was not specified, or 'event' is # * NXFME_DELETED, no actions are included. # */ #======================================================================= (nx_matches,), (ofp_action[0], 'actions'), name = 'nx_flow_update_full', base = nx_flow_update, criteria = lambda x: x.event in (NXFME_ADDED, NXFME_DELETED, NXFME_MODIFIED), init = packvalue(NXFME_ADDED, 'event') ) namespace['nx_flow_update_full'] = nx_flow_update_full ''' /* NXST_FLOW_MONITOR reply for NXFME_ABBREV. * * When the controller does not specify NXFMF_OWN in a monitor request, any * flow tables changes due to the controller's own requests (on the same * OpenFlow channel) will be abbreviated, when possible, to this form, which * simply specifies the 'xid' of the OpenFlow request (e.g. an OFPT_FLOW_MOD or * NXT_FLOW_MOD) that caused the change. * * Some changes cannot be abbreviated and will be sent in full: * * - Changes that only partially succeed. This can happen if, for example, * a flow_mod with type OFPFC_MODIFY affects multiple flows, but only some * of those modifications succeed (e.g. due to hardware limitations). * * This cannot occur with the current implementation of the Open vSwitch * software datapath. It could happen with other datapath implementations. * * - Changes that race with conflicting changes made by other controllers or * other flow_mods (not separated by barriers) by the same controller. * * This cannot occur with the current Open vSwitch implementation * (regardless of datapath) because Open vSwitch internally serializes * potentially conflicting changes. * * A flow_mod that does not change the flow table will not trigger any * notification, even an abbreviated one. For example, a "modify" or "delete" * flow_mod that does not match any flows will not trigger a notification. * Whether an "add" or "modify" that specifies all the same parameters that a * flow already has triggers a notification is unspecified and subject to * change in future versions of Open vSwitch. * * OVS will always send the notifications for a given flow table change before * the reply to a OFPT_BARRIER_REQUEST request that follows the flow table * change. Thus, if the controller does not receive an abbreviated (or * unabbreviated) notification for a flow_mod before the next * OFPT_BARRIER_REPLY, it will never receive one. */ ''' nx_flow_update_abbrev = nstruct( (uint32, 'xid'), # /* Controller-specified xid from flow_mod. */ name = 'nx_flow_update_abbrev', base = nx_flow_update, criteria = lambda x: x.event == NXFME_ABBREV, init = packvalue(NXFME_ABBREV, 'event') ) namespace['nx_flow_update_abbrev'] = nx_flow_update_abbrev nx_flow_monitor_reply = nstruct( (nx_flow_update[0], 'stats'), base = nx_stats_reply, classifyby = (NXST_FLOW_MONITOR,), name = 'nx_flow_monitor_reply', criteria = lambda x: getattr(x, stats_subtype) == NXST_FLOW_MONITOR, init = packvalue(NXST_FLOW_MONITOR, stats_subtype) ) namespace['nx_flow_monitor_reply'] = nx_flow_monitor_reply ''' /* NXT_FLOW_MONITOR_CANCEL. * * Used by a controller to cancel an outstanding monitor. */ ''' nx_flow_monitor_cancel = nstruct( (uint32, 'id'), # /* 'id' from nx_flow_monitor_request. */ name = 'nx_flow_monitor_cancel', base = nicira_header, classifyby = (NXT_FLOW_MONITOR_CANCEL,), criteria = lambda x: getattr(x, msg_subtype) == NXT_FLOW_MONITOR_CANCEL, init = packvalue(NXT_FLOW_MONITOR_CANCEL, msg_subtype) ) namespace['nx_flow_monitor_cancel'] = nx_flow_monitor_cancel ''' /* Action structure for NXAST_WRITE_METADATA. * * Modifies the 'mask' bits of the metadata value. */ ''' nx_action_write_metadata = nstruct( (uint8[6],), # /* Must be zero. */ (uint64, 'metadata'), # /* Metadata register. */ (uint64, 'mask'), # /* Metadata mask. */ base = nx_action, classifyby = (NXAST_WRITE_METADATA,), criteria = lambda x: getattr(x, action_subtype) == NXAST_WRITE_METADATA, init = packvalue(NXAST_WRITE_METADATA, action_subtype), name = 'nx_action_write_metadata' ) namespace['nx_action_write_metadata'] = nx_action_write_metadata ''' /* Action structure for NXAST_PUSH_MPLS. */ ''' nx_action_push_mpls = nstruct( (ethertype, 'ethertype'), # /* Ethertype */ (uint8[4],), base = nx_action, classifyby = (NXAST_PUSH_MPLS,), criteria = lambda x: getattr(x, action_subtype) == NXAST_PUSH_MPLS, init = packvalue(NXAST_PUSH_MPLS, action_subtype), name = 'nx_action_push_mpls' ) namespace['nx_action_push_mpls'] = nx_action_push_mpls ''' /* Action structure for NXAST_POP_MPLS. */ ''' nx_action_pop_mpls = nstruct( (ethertype, 'ethertype'), # /* Ethertype */ (uint8[4],), base = nx_action, classifyby = (NXAST_POP_MPLS,), criteria = lambda x: getattr(x, action_subtype) == NXAST_POP_MPLS, init = packvalue(NXAST_POP_MPLS, action_subtype), name = 'nx_action_pop_mpls' ) namespace['nx_action_pop_mpls'] = nx_action_pop_mpls ''' /* Action structure for NXAST_SET_MPLS_LABEL. */ ''' nx_action_mpls_label = nstruct( (uint8[2],), # /* Must be zero. */ (uint32, 'label'), # /* LABEL */ base = nx_action, classifyby = (NXAST_SET_MPLS_LABEL,), criteria = lambda x: getattr(x, action_subtype) == NXAST_SET_MPLS_LABEL, init = packvalue(NXAST_SET_MPLS_LABEL, action_subtype), name = 'nx_action_mpls_label' ) namespace['nx_action_mpls_label'] = nx_action_mpls_label ''' /* Action structure for NXAST_SET_MPLS_TC. */ ''' nx_action_mpls_tc = nstruct( (uint8, 'tc'), # /* TC */ (uint8[5],), base = nx_action, classifyby = (NXAST_SET_MPLS_TC,), criteria = lambda x: getattr(x, action_subtype) == NXAST_SET_MPLS_TC, init = packvalue(NXAST_SET_MPLS_TC, action_subtype), name = 'nx_action_mpls_tc' ) namespace['nx_action_mpls_tc'] = nx_action_mpls_tc ''' /* Action structure for NXAST_SET_MPLS_TTL. */ ''' nx_action_mpls_ttl = nstruct( (uint8, 'ttl'), # /* TTL */ (uint8[5],), base = nx_action, classifyby = (NXAST_SET_MPLS_TTL,), criteria = lambda x: getattr(x, action_subtype) == NXAST_SET_MPLS_TTL, init = packvalue(NXAST_SET_MPLS_TTL, action_subtype), name = 'nx_action_mpls_ttl' ) namespace['nx_action_mpls_ttl'] = nx_action_mpls_ttl ''' /* Action structure for NXAST_SAMPLE. * * Samples matching packets with the given probability and sends them * each to the set of collectors identified with the given ID. The * probability is expressed as a number of packets to be sampled out * of USHRT_MAX packets, and must be >0. * * When sending packet samples to IPFIX collectors, the IPFIX flow * record sent for each sampled packet is associated with the given * observation domain ID and observation point ID. Each IPFIX flow * record contain the sampled packet's headers when executing this * rule. If a sampled packet's headers are modified by previous * actions in the flow, those modified headers are sent. */ ''' nx_action_sample = nstruct( (uint16, 'probability'), # /* Fraction of packets to sample. */ (uint32, 'collector_set_id'), # /* ID of collector set in OVSDB. */ (uint32, 'obs_domain_id'), # /* ID of sampling observation domain. */ (uint32, 'obs_point_id'), # /* ID of sampling observation point. */ base = nx_action, classifyby = (NXAST_SAMPLE,), criteria = lambda x: getattr(x, action_subtype) == NXAST_SAMPLE, init = packvalue(NXAST_SAMPLE, action_subtype), name = 'nx_action_sample' ) namespace['nx_action_sample'] = nx_action_sample
python
def create_extension(namespace, nicira_header, nx_action, nx_stats_request, nx_stats_reply, msg_subtype, action_subtype, stats_subtype): ''' /* This command enables or disables an Open vSwitch extension that allows a * controller to specify the OpenFlow table to which a flow should be added, * instead of having the switch decide which table is most appropriate as * required by OpenFlow 1.0. Because NXM was designed as an extension to * OpenFlow 1.0, the extension applies equally to ofp10_flow_mod and * nx_flow_mod. By default, the extension is disabled. * * When this feature is enabled, Open vSwitch treats struct ofp10_flow_mod's * and struct nx_flow_mod's 16-bit 'command' member as two separate fields. * The upper 8 bits are used as the table ID, the lower 8 bits specify the * command as usual. A table ID of 0xff is treated like a wildcarded table ID. * * The specific treatment of the table ID depends on the type of flow mod: * * - OFPFC_ADD: Given a specific table ID, the flow is always placed in that * table. If an identical flow already exists in that table only, then it * is replaced. If the flow cannot be placed in the specified table, * either because the table is full or because the table cannot support * flows of the given type, the switch replies with an OFPFMFC_TABLE_FULL * error. (A controller can distinguish these cases by comparing the * current and maximum number of entries reported in ofp_table_stats.) * * If the table ID is wildcarded, the switch picks an appropriate table * itself. If an identical flow already exist in the selected flow table, * then it is replaced. The choice of table might depend on the flows * that are already in the switch; for example, if one table fills up then * the switch might fall back to another one. * * - OFPFC_MODIFY, OFPFC_DELETE: Given a specific table ID, only flows * within that table are matched and modified or deleted. If the table ID * is wildcarded, flows within any table may be matched and modified or * deleted. * * - OFPFC_MODIFY_STRICT, OFPFC_DELETE_STRICT: Given a specific table ID, * only a flow within that table may be matched and modified or deleted. * If the table ID is wildcarded and exactly one flow within any table * matches, then it is modified or deleted; if flows in more than one * table match, then none is modified or deleted. */ ''' with _warnings.catch_warnings(): _warnings.filterwarnings('ignore', '^padding', StructDefWarning) nx_flow_mod_table_id = nstruct( (uint8, 'set'), # /* Nonzero to enable, zero to disable. */ (uint8[7],), name = 'nx_flow_mod_table_id', base = nicira_header, criteria = lambda x: getattr(x, msg_subtype) == NXT_FLOW_MOD_TABLE_ID, classifyby = (NXT_FLOW_MOD_TABLE_ID,), init = packvalue(NXT_FLOW_MOD_TABLE_ID, msg_subtype) ) namespace['nx_flow_mod_table_id'] = nx_flow_mod_table_id ''' /* NXT_SET_PACKET_IN_FORMAT request. */ ''' nx_set_packet_in_format = nstruct( (uint32, 'format'), # /* One of NXPIF_*. */ name = 'nx_set_packet_in_format', base = nicira_header, criteria = lambda x: getattr(x, msg_subtype) == NXT_SET_PACKET_IN_FORMAT, classifyby = (NXT_SET_PACKET_IN_FORMAT,), init = packvalue(NXT_SET_PACKET_IN_FORMAT, msg_subtype) ) namespace['nx_set_packet_in_format'] = nx_set_packet_in_format ''' /* NXT_PACKET_IN (analogous to OFPT_PACKET_IN). * * NXT_PACKET_IN is similar to the OpenFlow 1.2 OFPT_PACKET_IN. The * differences are: * * - NXT_PACKET_IN includes the cookie of the rule that triggered the * message. (OpenFlow 1.3 OFPT_PACKET_IN also includes the cookie.) * * - The metadata fields use NXM (instead of OXM) field numbers. * * Open vSwitch 1.9.0 and later omits metadata fields that are zero (as allowed * by OpenFlow 1.2). Earlier versions included all implemented metadata * fields. * * Open vSwitch does not include non-metadata in the nx_match, because by * definition that information can be found in the packet itself. The format * and the standards allow this, however, so controllers should be prepared to * tolerate future changes. * * The NXM format is convenient for reporting metadata values, but it is * important not to interpret the format as matching against a flow, because it * does not. Nothing is being matched; arbitrary metadata masks would not be * meaningful. * * Whereas in most cases a controller can expect to only get back NXM fields * that it set up itself (e.g. flow dumps will ordinarily report only NXM * fields from flows that the controller added), NXT_PACKET_IN messages might * contain fields that the controller does not understand, because the switch * might support fields (new registers, new protocols, etc.) that the * controller does not. The controller must prepared to tolerate these. * * The 'cookie' field has no meaning when 'reason' is OFPR_NO_MATCH. In this * case it should be UINT64_MAX. */ ''' if 'ofp_oxm' in namespace: nx_match = namespace['ofp_oxm'] namespace['nx_match'] = nx_match nx_match_mask = namespace['ofp_oxm_mask'] namespace['nx_match_mask'] = nx_match_mask nx_match_nomask = namespace['ofp_oxm_nomask'] namespace['nx_match_nomask'] = nx_match_nomask create_nxm = namespace['create_oxm'] namespace['create_nxm'] = create_nxm nx_match_nomask_ext = nstruct( base = nx_match_nomask, criteria = lambda x: NXM_VENDOR(x.header) <= 1, extend = {'header': nxm_header}, name = 'nx_match_nomask_ext' ) namespace['nx_match_nomask_ext'] = nx_match_nomask_ext nx_match_mask_ext = nstruct( base = nx_match_mask, criteria = lambda x: NXM_VENDOR(x.header) <= 1, extend = {'header': nxm_header}, name = 'nx_match_mask_ext' ) namespace['nx_match_mask_ext'] = nx_match_mask_ext else: nx_match = nstruct( (nxm_header, 'header'), name = 'nx_match', padding = 1, size = lambda x: NXM_LENGTH(x.header) + 4 ) namespace['nx_match'] = nx_match nx_match_nomask = nstruct( (raw, 'value'), base = nx_match, criteria = lambda x: not NXM_HASMASK(x.header), init = packvalue(NXM_OF_IN_PORT, 'header'), name = 'nx_match_nomask' ) namespace['nx_match_nomask'] = nx_match_nomask _nxm_mask_value = nstruct( (raw, 'value'), name = 'nxm_mask_value', size = lambda x: NXM_LENGTH(x.header) // 2, padding = 1 ) nx_match_mask = nstruct( (_nxm_mask_value,), (raw, 'mask'), base = nx_match, criteria = lambda x: NXM_HASMASK(x.header), init = packvalue(NXM_OF_ETH_SRC_W, 'header'), name = 'nx_match_mask', ) namespace['nx_match_mask'] = nx_match_mask def create_nxm(header, value = None, mask = None): if NXM_HASMASK(header): nxm = nx_match_mask.new() size = NXM_LENGTH(header) // 2 else: nxm = nx_match_nomask.new() size = NXM_LENGTH(header) nxm.header = header nxm.value = common.create_binary(value, size) if NXM_HASMASK(header): nxm.mask = common.create_binary(mask, size) nxm._pack() nxm._autosubclass() return nxm namespace['create_nxm'] = create_nxm nx_match_nomask_ext = nx_match_nomask nx_match_mask_ext = nx_match_mask namespace['nx_match_nomask_ext'] = nx_match_nomask_ext namespace['nx_match_mask_ext'] = nx_match_mask_ext from namedstruct.namedstruct import rawtype as _rawtype import socket as _socket if 'ip4_addr_bytes' in namespace: ip4_addr_bytes = namespace['ip4_addr_bytes'] else: ip4_addr_bytes = prim('4s', 'ip4_addr_bytes') ip4_addr_bytes.formatter = lambda x: _socket.inet_ntoa(x) namespace['ip4_addr_bytes'] = ip4_addr_bytes nxm_mask_ipv4 = nstruct(name = 'nxm_mask_ipv4', base = nx_match_mask_ext, criteria = lambda x: x.header in (NXM_OF_IP_SRC_W, NXM_OF_IP_DST_W, NXM_OF_ARP_SPA_W, NXM_OF_ARP_TPA_W, NXM_NX_TUN_IPV4_SRC_W, NXM_NX_TUN_IPV4_DST_W), init = packvalue(NXM_OF_IP_SRC_W, 'header'), extend = {'value' : ip4_addr_bytes, 'mask' : ip4_addr_bytes} ) namespace['nxm_mask_ipv4'] = nxm_mask_ipv4 nxm_nomask_ipv4 = nstruct(name = 'nxm_nomask_ipv4', base = nx_match_nomask_ext, criteria = lambda x: x.header in (NXM_OF_IP_SRC, NXM_OF_IP_DST, NXM_OF_ARP_SPA, NXM_OF_ARP_TPA, NXM_NX_TUN_IPV4_SRC, NXM_NX_TUN_IPV4_DST), init = packvalue(NXM_OF_IP_SRC, 'header'), extend = {'value' : ip4_addr_bytes} ) namespace['nxm_nomask_ipv4'] = nxm_nomask_ipv4 if 'mac_addr_bytes' in namespace: mac_addr_bytes = namespace['mac_addr_bytes'] else: mac_addr_bytes = _rawtype() mac_addr_bytes.formatter = lambda x: ':'.join('%02X' % (c,) for c in bytearray(x)) namespace['mac_addr_bytes'] = mac_addr_bytes nxm_mask_eth = nstruct(name = 'nxm_mask_eth', base = nx_match_mask_ext, criteria = lambda x: x.header in (NXM_OF_ETH_SRC_W, NXM_OF_ETH_DST_W), init = packvalue(NXM_OF_ETH_SRC_W, 'header'), extend = {'value' : mac_addr_bytes, 'mask' : mac_addr_bytes}) namespace['nxm_mask_eth'] = nxm_mask_eth nxm_nomask_eth = nstruct(name = 'nxm_nomask_eth', base = nx_match_nomask_ext, criteria = lambda x: x.header in (NXM_OF_ETH_SRC, NXM_OF_ETH_DST, NXM_NX_ND_SLL, NXM_NX_ND_TLL, NXM_NX_ARP_SHA, NXM_NX_ARP_THA), init = packvalue(NXM_OF_ETH_SRC, 'header'), extend = {'value' : mac_addr_bytes}) namespace['nxm_nomask_eth'] = nxm_nomask_eth ofp_port_no = namespace['ofp_port_no'] nx_port_no = enum('nx_port_no', None, uint16, **dict((k, v & 0xffff) for k,v in ofp_port_no.getDict().items()) ) nxm_port_no_raw = _rawtype() nxm_port_no_raw.formatter = lambda x: nx_port_no.formatter(nx_port_no.parse(x)[0]) namespace['nx_port_no'] = nx_port_no namespace['nxm_port_no_raw'] = nxm_port_no_raw nxm_nomask_port = nstruct(name = 'nxm_nomask_port', base = nx_match_nomask_ext, criteria = lambda x: x.header == NXM_OF_IN_PORT, init = packvalue(NXM_OF_IN_PORT, 'header'), extend = {'value': nxm_port_no_raw} ) namespace['nxm_nomask_port'] = nxm_nomask_port if 'ethtype_raw' in namespace: ethtype_raw = namespace['ethtype_raw'] else: ethtype_raw = _rawtype() ethtype_raw.formatter = lambda x: ethertype.formatter(ethertype.parse(x)[0]) namespace['ethtype_raw'] = ethtype_raw nxm_nomask_ethertype = nstruct(name = 'nxm_nomask_ethertype', base = nx_match_nomask_ext, criteria = lambda x: x.header == NXM_OF_ETH_TYPE, init = packvalue(NXM_OF_ETH_TYPE, 'header'), extend = {'value': ethtype_raw}) namespace['nxm_nomask_ethertype'] = nxm_nomask_ethertype if 'arpop_raw' in namespace: arpop_raw = namespace['arpop_raw'] else: arpop_raw = _rawtype() arpop_raw.formatter = lambda x: arp_op_code.formatter(arp_op_code.parse(x)[0]) namespace['arpop_raw'] = arpop_raw nxm_nomask_arpopcode = nstruct(name = 'nxm_nomask_arpopcode', base = nx_match_nomask_ext, criteria = lambda x: x.header == NXM_OF_ARP_OP, init = packvalue(NXM_OF_ARP_OP, 'header'), extend = {'value': arpop_raw}) namespace['nxm_nomask_arpopcode'] = nxm_nomask_arpopcode if 'ip_protocol_raw' in namespace: ip_protocol_raw = namespace['ip_protocol_raw'] else: ip_protocol_raw = _rawtype() ip_protocol_raw.formatter = lambda x: ip_protocol.formatter(ip_protocol.parse(x)[0]) namespace['ip_protocol_raw'] = ip_protocol_raw nxm_nomask_ip_protocol = nstruct(name = 'nxm_nomask_ip_protocol', base = nx_match_nomask_ext, criteria = lambda x: x.header == NXM_OF_IP_PROTO, init = packvalue(NXM_OF_IP_PROTO, 'header'), extend = {'value': ip_protocol_raw}) namespace['nxm_nomask_ip_protocol'] = nxm_nomask_ip_protocol if 'ip6_addr_bytes' in namespace: nxm_nomask_ipv6 = nstruct(name = 'nxm_nomask_ipv6', base = nx_match_nomask_ext, criteria = lambda x: x.header in (NXM_NX_IPV6_SRC, NXM_NX_IPV6_DST, NXM_NX_ND_TARGET), init = packvalue(NXM_NX_IPV6_SRC, 'header'), extend = {'value': ip6_addr_bytes}) namespace['nxm_nomask_ipv6'] = nxm_nomask_ipv6 nxm_mask_ipv6 = nstruct(name = 'nxm_mask_ipv6', base = nx_match_mask_ext, criteria = lambda x: x.header in (NXM_NX_IPV6_SRC_W, NXM_NX_IPV6_DST_W), init = packvalue(NXM_NX_IPV6_SRC_W, 'header'), extend = {'value': ip6_addr_bytes, 'mask': ip6_addr_bytes}) namespace['nxm_mask_ipv6'] = nxm_mask_ipv6 nx_ip_frag_raw = _rawtype() nx_ip_frag_raw.formatter = lambda x: nx_ip_frag.formatter(nx_ip_frag.parse(x)[0]) nxm_nomask_ipfrag = nstruct(name = 'nxm_nomask_ipfrag', base = nx_match_nomask_ext, criteria = lambda x: x.header == NXM_NX_IP_FRAG, init = packvalue(NXM_NX_IP_FRAG, 'header'), extend = {'value': nx_ip_frag_raw}) namespace['nxm_nomask_ipfrag'] = nxm_nomask_ipfrag nxm_mask_ipfrag = nstruct(name = 'nxm_mask_ipfrag', base = nx_match_mask_ext, criteria = lambda x: x.header == NXM_NX_IP_FRAG_W, init = packvalue(NXM_NX_IP_FRAG_W, 'header'), extend = {'value': nx_ip_frag_raw, 'mask': nx_ip_frag_raw}) namespace['nxm_mask_ipfrag'] = nxm_mask_ipfrag nx_matches = nstruct( (nx_match[0], 'matches'), name = 'nx_matches', size = sizefromlen(65536, 'match_len'), prepack = packrealsize('match_len'), padding = 8 ) namespace['nx_matches'] = nx_matches nx_packet_in = nstruct( (uint32, 'buffer_id'), # /* ID assigned by datapath. */ (uint16, 'total_len'), # /* Full length of frame. */ (uint8, 'reason'), # /* Reason packet is sent (one of OFPR_*). */ (uint8, 'table_id'), # /* ID of the table that was looked up. */ (uint64, 'cookie'), # /* Cookie of the rule that was looked up. */ (uint16, 'match_len'), # /* Size of nx_match. */ (uint8[6],), # /* Align to 64-bits. */ (nx_matches,), (uint8[2],), (raw, 'data'), name = 'nx_packet_in', base = nicira_header, classifyby = (NXT_PACKET_IN,), criteria = lambda x: getattr(x, msg_subtype) == NXT_PACKET_IN, init = packvalue(NXT_PACKET_IN, msg_subtype) ) namespace['nx_packet_in'] = nx_packet_in ''' /* Configures the "role" of the sending controller. The default role is: * * - Other (NX_ROLE_OTHER), which allows the controller access to all * OpenFlow features. * * The other possible roles are a related pair: * * - Master (NX_ROLE_MASTER) is equivalent to Other, except that there may * be at most one Master controller at a time: when a controller * configures itself as Master, any existing Master is demoted to the * Slave role. * * - Slave (NX_ROLE_SLAVE) allows the controller read-only access to * OpenFlow features. In particular attempts to modify the flow table * will be rejected with an OFPBRC_EPERM error. * * Slave controllers do not receive OFPT_PACKET_IN or OFPT_FLOW_REMOVED * messages, but they do receive OFPT_PORT_STATUS messages. */ ''' nx_role_request = nstruct( (nx_role, 'role'), # /* One of NX_ROLE_*. */ name = 'nx_role_request', base = nicira_header, classifyby = (NXT_ROLE_REQUEST, NXT_ROLE_REPLY), criteria = lambda x: getattr(x, msg_subtype) == NXT_ROLE_REQUEST or getattr(x, msg_subtype) == NXT_ROLE_REPLY, init = packvalue(NXT_ROLE_REQUEST, msg_subtype) ) namespace['nx_role_request'] = nx_role_request ''' /* NXT_SET_ASYNC_CONFIG. * * Sent by a controller, this message configures the asynchronous messages that * the controller wants to receive. Element 0 in each array specifies messages * of interest when the controller has an "other" or "master" role; element 1, * when the controller has a "slave" role. * * Each array element is a bitmask in which a 0-bit disables receiving a * particular message and a 1-bit enables receiving it. Each bit controls the * message whose 'reason' corresponds to the bit index. For example, the bit * with value 1<<2 == 4 in port_status_mask[1] determines whether the * controller will receive OFPT_PORT_STATUS messages with reason OFPPR_MODIFY * (value 2) when the controller has a "slave" role. * * As a side effect, for service controllers, this message changes the * miss_send_len from default of zero to OFP_DEFAULT_MISS_SEND_LEN (128). */ ''' ofp_packet_in_reason = namespace['ofp_packet_in_reason'] if 'ofp_packet_in_reason_bitwise' in namespace: ofp_packet_in_reason_bitwise = namespace['ofp_packet_in_reason_bitwise'] else: ofp_packet_in_reason_bitwise = enum('ofp_packet_in_reason_bitwise', None, uint32, **dict((k, 1<<v) for k,v in ofp_packet_in_reason.getDict().items())) namespace['ofp_packet_in_reason_bitwise'] = ofp_packet_in_reason_bitwise ofp_port_reason = namespace['ofp_port_reason'] if 'ofp_port_reason_bitwise' in namespace: ofp_port_reason_bitwise = namespace['ofp_port_reason_bitwise'] else: ofp_port_reason_bitwise = enum('ofp_port_reason_bitwise', None, uint32, **dict((k, 1<<v) for k,v in ofp_port_reason.getDict().items())) namespace['ofp_port_reason_bitwise'] = ofp_port_reason_bitwise ofp_flow_removed_reason = namespace['ofp_flow_removed_reason'] if 'ofp_flow_removed_reason_bitwise' in namespace: ofp_flow_removed_reason_bitwise = namespace['ofp_flow_removed_reason_bitwise'] else: ofp_flow_removed_reason_bitwise = enum('ofp_flow_removed_reason_bitwise', None, uint32, **dict((k, 1<<v) for k,v in ofp_flow_removed_reason.getDict().items())) namespace['ofp_flow_removed_reason_bitwise'] = ofp_flow_removed_reason_bitwise nx_async_config = nstruct( (ofp_packet_in_reason_bitwise[2], 'packet_in_mask'), # /* Bitmasks of OFPR_* values. */ (ofp_port_reason_bitwise[2], 'port_status_mask'), # /* Bitmasks of OFPRR_* values. */ (ofp_flow_removed_reason_bitwise[2], 'flow_removed_mask'), #/* Bitmasks of OFPPR_* values. */ name = 'nx_async_config', base = nicira_header, classifyby = (NXT_SET_ASYNC_CONFIG,), criteria = lambda x: getattr(x, msg_subtype) == NXT_SET_ASYNC_CONFIG, init = packvalue(NXT_SET_ASYNC_CONFIG, msg_subtype) ) namespace['nx_async_config'] = nx_async_config ''' /* Nicira vendor flow actions. */ ''' ''' /* Action structures for NXAST_RESUBMIT and NXAST_RESUBMIT_TABLE. * * These actions search one of the switch's flow tables: * * - For NXAST_RESUBMIT_TABLE only, if the 'table' member is not 255, then * it specifies the table to search. * * - Otherwise (for NXAST_RESUBMIT_TABLE with a 'table' of 255, or for * NXAST_RESUBMIT regardless of 'table'), it searches the current flow * table, that is, the OpenFlow flow table that contains the flow from * which this action was obtained. If this action did not come from a * flow table (e.g. it came from an OFPT_PACKET_OUT message), then table 0 * is the current table. * * The flow table lookup uses a flow that may be slightly modified from the * original lookup: * * - For NXAST_RESUBMIT, the 'in_port' member of struct nx_action_resubmit * is used as the flow's in_port. * * - For NXAST_RESUBMIT_TABLE, if the 'in_port' member is not OFPP_IN_PORT, * then its value is used as the flow's in_port. Otherwise, the original * in_port is used. * * - If actions that modify the flow (e.g. OFPAT_SET_VLAN_VID) precede the * resubmit action, then the flow is updated with the new values. * * Following the lookup, the original in_port is restored. * * If the modified flow matched in the flow table, then the corresponding * actions are executed. Afterward, actions following the resubmit in the * original set of actions, if any, are executed; any changes made to the * packet (e.g. changes to VLAN) by secondary actions persist when those * actions are executed, although the original in_port is restored. * * Resubmit actions may be used any number of times within a set of actions. * * Resubmit actions may nest to an implementation-defined depth. Beyond this * implementation-defined depth, further resubmit actions are simply ignored. * * NXAST_RESUBMIT ignores 'table' and 'pad'. NXAST_RESUBMIT_TABLE requires * 'pad' to be all-bits-zero. * * Open vSwitch 1.0.1 and earlier did not support recursion. Open vSwitch * before 1.2.90 did not support NXAST_RESUBMIT_TABLE. */ ''' nx_action_resubmit = nstruct( (nx_port_no, 'in_port'), # /* New in_port for checking flow table. */ (uint8, 'table'), # /* NXAST_RESUBMIT_TABLE: table to use. */ (uint8[3],), base = nx_action, criteria = lambda x: getattr(x, action_subtype) == NXAST_RESUBMIT_TABLE or getattr(x, action_subtype) == NXAST_RESUBMIT, classifyby = (NXAST_RESUBMIT_TABLE, NXAST_RESUBMIT), name = 'nx_action_resubmit', init = packvalue(NXAST_RESUBMIT_TABLE, action_subtype) ) namespace['nx_action_resubmit'] = nx_action_resubmit ''' /* Action structure for NXAST_SET_TUNNEL. * * Sets the encapsulating tunnel ID to a 32-bit value. The most-significant 32 * bits of the tunnel ID are set to 0. */ ''' nx_action_set_tunnel = nstruct( (uint8[2],), (uint32, 'tun_id'), # /* Tunnel ID. */ name = 'nx_action_set_tunnel', base = nx_action, classifyby = (NXAST_SET_TUNNEL,), criteria = lambda x: getattr(x, action_subtype) == NXAST_SET_TUNNEL, init = packvalue(NXAST_SET_TUNNEL, action_subtype) ) namespace['nx_action_set_tunnel'] = nx_action_set_tunnel ''' /* Action structure for NXAST_SET_TUNNEL64. * * Sets the encapsulating tunnel ID to a 64-bit value. */ ''' nx_action_set_tunnel64 = nstruct( (uint8[6],), (uint64, 'tun_id'), # /* Tunnel ID. */ name = 'nx_action_set_tunnel64', base = nx_action, classifyby = (NXAST_SET_TUNNEL64,), criteria = lambda x: getattr(x, action_subtype) == NXAST_SET_TUNNEL64, init = packvalue(NXAST_SET_TUNNEL64, action_subtype) ) namespace['nx_action_set_tunnel64'] = nx_action_set_tunnel64 ''' /* Action structure for NXAST_SET_QUEUE. * * Set the queue that should be used when packets are output. This is similar * to the OpenFlow OFPAT_ENQUEUE action, but does not take the output port as * an argument. This allows the queue to be defined before the port is * known. */ ''' nx_action_set_queue = nstruct( (uint8[2],), (uint32, 'queue_id'), # /* Where to enqueue packets. */ name = 'nx_action_set_queue', base = nx_action, classifyby = (NXAST_SET_QUEUE,), criteria = lambda x: getattr(x, action_subtype) == NXAST_SET_QUEUE, init = packvalue(NXAST_SET_QUEUE, action_subtype) ) namespace['nx_action_set_queue'] = nx_action_set_queue ''' /* Action structure for NXAST_POP_QUEUE. * * Restores the queue to the value it was before any NXAST_SET_QUEUE actions * were used. Only the original queue can be restored this way; no stack is * maintained. */ ''' nx_action_pop_queue = nstruct( (uint8[6],), name = 'nx_action_pop_queue', base = nx_action, classifyby = (NXAST_POP_QUEUE,), criteria = lambda x: getattr(x, action_subtype) == NXAST_POP_QUEUE, init = packvalue(NXAST_POP_QUEUE, action_subtype) ) namespace['nx_action_pop_queue'] = nx_action_pop_queue ''' /* Action structure for NXAST_REG_MOVE. * * Copies src[src_ofs:src_ofs+n_bits] to dst[dst_ofs:dst_ofs+n_bits], where * a[b:c] denotes the bits within 'a' numbered 'b' through 'c' (not including * bit 'c'). Bit numbering starts at 0 for the least-significant bit, 1 for * the next most significant bit, and so on. * * 'src' and 'dst' are nxm_header values with nxm_hasmask=0. (It doesn't make * sense to use nxm_hasmask=1 because the action does not do any kind of * matching; it uses the actual value of a field.) * * The following nxm_header values are potentially acceptable as 'src': * * - NXM_OF_IN_PORT * - NXM_OF_ETH_DST * - NXM_OF_ETH_SRC * - NXM_OF_ETH_TYPE * - NXM_OF_VLAN_TCI * - NXM_OF_IP_TOS * - NXM_OF_IP_PROTO * - NXM_OF_IP_SRC * - NXM_OF_IP_DST * - NXM_OF_TCP_SRC * - NXM_OF_TCP_DST * - NXM_OF_UDP_SRC * - NXM_OF_UDP_DST * - NXM_OF_ICMP_TYPE * - NXM_OF_ICMP_CODE * - NXM_OF_ARP_OP * - NXM_OF_ARP_SPA * - NXM_OF_ARP_TPA * - NXM_NX_TUN_ID * - NXM_NX_ARP_SHA * - NXM_NX_ARP_THA * - NXM_NX_ICMPV6_TYPE * - NXM_NX_ICMPV6_CODE * - NXM_NX_ND_SLL * - NXM_NX_ND_TLL * - NXM_NX_REG(idx) for idx in the switch's accepted range. * - NXM_NX_PKT_MARK * - NXM_NX_TUN_IPV4_SRC * - NXM_NX_TUN_IPV4_DST * * The following nxm_header values are potentially acceptable as 'dst': * * - NXM_OF_ETH_DST * - NXM_OF_ETH_SRC * - NXM_OF_IP_TOS * - NXM_OF_IP_SRC * - NXM_OF_IP_DST * - NXM_OF_TCP_SRC * - NXM_OF_TCP_DST * - NXM_OF_UDP_SRC * - NXM_OF_UDP_DST * - NXM_NX_ARP_SHA * - NXM_NX_ARP_THA * - NXM_OF_ARP_OP * - NXM_OF_ARP_SPA * - NXM_OF_ARP_TPA * Modifying any of the above fields changes the corresponding packet * header. * * - NXM_OF_IN_PORT * * - NXM_NX_REG(idx) for idx in the switch's accepted range. * * - NXM_NX_PKT_MARK * * - NXM_OF_VLAN_TCI. Modifying this field's value has side effects on the * packet's 802.1Q header. Setting a value with CFI=0 removes the 802.1Q * header (if any), ignoring the other bits. Setting a value with CFI=1 * adds or modifies the 802.1Q header appropriately, setting the TCI field * to the field's new value (with the CFI bit masked out). * * - NXM_NX_TUN_ID, NXM_NX_TUN_IPV4_SRC, NXM_NX_TUN_IPV4_DST. Modifying * any of these values modifies the corresponding tunnel header field used * for the packet's next tunnel encapsulation, if allowed by the * configuration of the output tunnel port. * * A given nxm_header value may be used as 'src' or 'dst' only on a flow whose * nx_match satisfies its prerequisites. For example, NXM_OF_IP_TOS may be * used only if the flow's nx_match includes an nxm_entry that specifies * nxm_type=NXM_OF_ETH_TYPE, nxm_hasmask=0, and nxm_value=0x0800. * * The switch will reject actions for which src_ofs+n_bits is greater than the * width of 'src' or dst_ofs+n_bits is greater than the width of 'dst' with * error type OFPET_BAD_ACTION, code OFPBAC_BAD_ARGUMENT. * * This action behaves properly when 'src' overlaps with 'dst', that is, it * behaves as if 'src' were copied out to a temporary buffer, then the * temporary buffer copied to 'dst'. */ ''' nx_action_reg_move = nstruct( (uint16, 'n_bits'), # /* Number of bits. */ (uint16, 'src_ofs'), # /* Starting bit offset in source. */ (uint16, 'dst_ofs'), # /* Starting bit offset in destination. */ (nxm_header, 'src'), # /* Source register. */ (nxm_header, 'dst'), # /* Destination register. */ name = 'nx_action_reg_move', base = nx_action, classifyby = (NXAST_REG_MOVE,), criteria = lambda x: getattr(x, action_subtype) == NXAST_REG_MOVE, init = packvalue(NXAST_REG_MOVE, action_subtype), formatter = _createdesc(lambda x:'move:%s[%d..%d]->%s[%d..%d]' % (x['src'], x['src_ofs'], x['src_ofs'] + x['n_bits'] - 1, x['dst'], x['dst_ofs'], x['dst_ofs'] + x['n_bits'] - 1)) ) namespace['nx_action_reg_move'] = nx_action_reg_move ''' /* Action structure for NXAST_REG_LOAD. * * Copies value[0:n_bits] to dst[ofs:ofs+n_bits], where a[b:c] denotes the bits * within 'a' numbered 'b' through 'c' (not including bit 'c'). Bit numbering * starts at 0 for the least-significant bit, 1 for the next most significant * bit, and so on. * * 'dst' is an nxm_header with nxm_hasmask=0. See the documentation for * NXAST_REG_MOVE, above, for the permitted fields and for the side effects of * loading them. * * The 'ofs' and 'n_bits' fields are combined into a single 'ofs_nbits' field * to avoid enlarging the structure by another 8 bytes. To allow 'n_bits' to * take a value between 1 and 64 (inclusive) while taking up only 6 bits, it is * also stored as one less than its true value: * * 15 6 5 0 * +------------------------------+------------------+ * | ofs | n_bits - 1 | * +------------------------------+------------------+ * * The switch will reject actions for which ofs+n_bits is greater than the * width of 'dst', or in which any bits in 'value' with value 2**n_bits or * greater are set to 1, with error type OFPET_BAD_ACTION, code * OFPBAC_BAD_ARGUMENT. */ ''' nx_action_reg_load = nstruct( (uint16, 'ofs_nbits'), # /* (ofs << 6) | (n_bits - 1). */ (nxm_header, 'dst'), # /* Destination register. */ (uint64, 'value'), # /* Immediate value. */ name = 'nx_action_reg_load', base = nx_action, classifyby = (NXAST_REG_LOAD,), criteria = lambda x: getattr(x, action_subtype) == NXAST_REG_LOAD, init = packvalue(NXAST_REG_LOAD, action_subtype), formatter = _createdesc(lambda x: 'load:0x%x->%s[%d..%d]' % (x['value'], x['dst'], x['ofs_nbits'] >> 6, (x['ofs_nbits'] >> 6) + (x['ofs_nbits'] & 0x3f))) ) namespace['nx_action_reg_load'] = nx_action_reg_load ''' /* Action structure for NXAST_STACK_PUSH and NXAST_STACK_POP. * * Pushes (or pops) field[offset: offset + n_bits] to (or from) * top of the stack. */ ''' nx_action_stack = nstruct( (uint16, 'offset'), # /* Bit offset into the field. */ (nxm_header, 'field'), # /* The field used for push or pop. */ (uint16, 'n_bits'), # /* (n_bits + 1) bits of the field. */ (uint8[6],), # /* Reserved, must be zero. */ name = 'nx_action_stack', base = nx_action, classifyby = (NXAST_STACK_PUSH, NXAST_STACK_POP), criteria = lambda x: getattr(x, action_subtype) == NXAST_STACK_PUSH or getattr(x, action_subtype) == NXAST_STACK_POP, init = packvalue(NXAST_STACK_PUSH, action_subtype), formatter = _createdesc(lambda x: '%s:%s[%d..%d]' % ('push' if x[action_subtype] == 'NXAST_STACK_PUSH' else 'pop', x['field'], x['offset'], (x['offset'] + x['n_bits'] - 1))) ) namespace['nx_action_stack'] = nx_action_stack ''' /* Action structure for NXAST_NOTE. * * This action has no effect. It is variable length. The switch does not * attempt to interpret the user-defined 'note' data in any way. A controller * can use this action to attach arbitrary metadata to a flow. * * This action might go away in the future. */ ''' nx_action_note = nstruct( (varchr, 'note'), name = 'nx_action_note', base = nx_action, classifyby = (NXAST_NOTE,), criteria = lambda x: getattr(x, action_subtype) == NXAST_NOTE, init = packvalue(NXAST_NOTE, action_subtype) ) namespace['nx_action_note'] = nx_action_note ''' /* Action structure for NXAST_MULTIPATH. * * This action performs the following steps in sequence: * * 1. Hashes the fields designated by 'fields', one of NX_HASH_FIELDS_*. * Refer to the definition of "enum nx_mp_fields" for details. * * The 'basis' value is used as a universal hash parameter, that is, * different values of 'basis' yield different hash functions. The * particular universal hash function used is implementation-defined. * * The hashed fields' values are drawn from the current state of the * flow, including all modifications that have been made by actions up to * this point. * * 2. Applies the multipath link choice algorithm specified by 'algorithm', * one of NX_MP_ALG_*. Refer to the definition of "enum nx_mp_algorithm" * for details. * * The output of the algorithm is 'link', an unsigned integer less than * or equal to 'max_link'. * * Some algorithms use 'arg' as an additional argument. * * 3. Stores 'link' in dst[ofs:ofs+n_bits]. The format and semantics of * 'dst' and 'ofs_nbits' are similar to those for the NXAST_REG_LOAD * action. * * The switch will reject actions that have an unknown 'fields', or an unknown * 'algorithm', or in which ofs+n_bits is greater than the width of 'dst', or * in which 'max_link' is greater than or equal to 2**n_bits, with error type * OFPET_BAD_ACTION, code OFPBAC_BAD_ARGUMENT. */ ''' nx_action_multipath = nstruct( #/* What fields to hash and how. */ (nx_hash_fields, 'fields'), # /* One of NX_HASH_FIELDS_*. */ (uint16, 'basis'), # /* Universal hash parameter. */ (uint16,), #/* Multipath link choice algorithm to apply to hash value. */ (nx_mp_algorithm, 'algorithm'), # /* One of NX_MP_ALG_*. */ (uint16, 'max_link'), # /* Number of output links, minus 1. */ (uint32, 'arg'), # /* Algorithm-specific argument. */ (uint16,), # /* Where to store the result. */ (uint16, 'ofs_nbits'), # /* (ofs << 6) | (n_bits - 1). */ (nxm_header, 'dst'), # /* Destination. */ name = 'nx_action_multipath', base = nx_action, classifyby = (NXAST_MULTIPATH,), criteria = lambda x: getattr(x, action_subtype) == NXAST_MULTIPATH, init = packvalue(NXAST_MULTIPATH, action_subtype), formatter = _createdesc(lambda x: 'multipath(%s,%d,%s,%d,%d,%s[%d..%d])' % (x['fields'], x['basis'], x['algorithm'],x['max_link'] + 1, x['arg'], x['dst'], x['ofs_nbits'] >> 6, (x['ofs_nbits'] >> 6) + (x['ofs_nbits'] & 0x3f))) ) namespace['nx_action_multipath'] = nx_action_multipath ''' /* Action structure for NXAST_LEARN. * * This action adds or modifies a flow in an OpenFlow table, similar to * OFPT_FLOW_MOD with OFPFC_MODIFY_STRICT as 'command'. The new flow has the * specified idle timeout, hard timeout, priority, cookie, and flags. The new * flow's match criteria and actions are built by applying each of the series * of flow_mod_spec elements included as part of the action. * * A flow_mod_spec starts with a 16-bit header. A header that is all-bits-0 is * a no-op used for padding the action as a whole to a multiple of 8 bytes in * length. Otherwise, the flow_mod_spec can be thought of as copying 'n_bits' * bits from a source to a destination. In this case, the header contains * multiple fields: * * 15 14 13 12 11 10 0 * +------+---+------+---------------------------------+ * | 0 |src| dst | n_bits | * +------+---+------+---------------------------------+ * * The meaning and format of a flow_mod_spec depends on 'src' and 'dst'. The * following table summarizes the meaning of each possible combination. * Details follow the table: * * src dst meaning * --- --- ---------------------------------------------------------- * 0 0 Add match criteria based on value in a field. * 1 0 Add match criteria based on an immediate value. * 0 1 Add NXAST_REG_LOAD action to copy field into a different field. * 1 1 Add NXAST_REG_LOAD action to load immediate value into a field. * 0 2 Add OFPAT_OUTPUT action to output to port from specified field. * All other combinations are undefined and not allowed. * * The flow_mod_spec header is followed by a source specification and a * destination specification. The format and meaning of the source * specification depends on 'src': * * - If 'src' is 0, the source bits are taken from a field in the flow to * which this action is attached. (This should be a wildcarded field. If * its value is fully specified then the source bits being copied have * constant values.) * * The source specification is an ovs_be32 'field' and an ovs_be16 'ofs'. * 'field' is an nxm_header with nxm_hasmask=0, and 'ofs' the starting bit * offset within that field. The source bits are field[ofs:ofs+n_bits-1]. * 'field' and 'ofs' are subject to the same restrictions as the source * field in NXAST_REG_MOVE. * * - If 'src' is 1, the source bits are a constant value. The source * specification is (n_bits+15)/16*2 bytes long. Taking those bytes as a * number in network order, the source bits are the 'n_bits' * least-significant bits. The switch will report an error if other bits * in the constant are nonzero. * * The flow_mod_spec destination specification, for 'dst' of 0 or 1, is an * ovs_be32 'field' and an ovs_be16 'ofs'. 'field' is an nxm_header with * nxm_hasmask=0 and 'ofs' is a starting bit offset within that field. The * meaning of the flow_mod_spec depends on 'dst': * * - If 'dst' is 0, the flow_mod_spec specifies match criteria for the new * flow. The new flow matches only if bits field[ofs:ofs+n_bits-1] in a * packet equal the source bits. 'field' may be any nxm_header with * nxm_hasmask=0 that is allowed in NXT_FLOW_MOD. * * Order is significant. Earlier flow_mod_specs must satisfy any * prerequisites for matching fields specified later, by copying constant * values into prerequisite fields. * * The switch will reject flow_mod_specs that do not satisfy NXM masking * restrictions. * * - If 'dst' is 1, the flow_mod_spec specifies an NXAST_REG_LOAD action for * the new flow. The new flow copies the source bits into * field[ofs:ofs+n_bits-1]. Actions are executed in the same order as the * flow_mod_specs. * * A single NXAST_REG_LOAD action writes no more than 64 bits, so n_bits * greater than 64 yields multiple NXAST_REG_LOAD actions. * * The flow_mod_spec destination spec for 'dst' of 2 (when 'src' is 0) is * empty. It has the following meaning: * * - The flow_mod_spec specifies an OFPAT_OUTPUT action for the new flow. * The new flow outputs to the OpenFlow port specified by the source field. * Of the special output ports with value OFPP_MAX or larger, OFPP_IN_PORT, * OFPP_FLOOD, OFPP_LOCAL, and OFPP_ALL are supported. Other special ports * may not be used. * * Resource Management * ------------------- * * A switch has a finite amount of flow table space available for learning. * When this space is exhausted, no new learning table entries will be learned * until some existing flow table entries expire. The controller should be * prepared to handle this by flooding (which can be implemented as a * low-priority flow). * * If a learned flow matches a single TCP stream with a relatively long * timeout, one may make the best of resource constraints by setting * 'fin_idle_timeout' or 'fin_hard_timeout' (both measured in seconds), or * both, to shorter timeouts. When either of these is specified as a nonzero * value, OVS adds a NXAST_FIN_TIMEOUT action, with the specified timeouts, to * the learned flow. * * Examples * -------- * * The following examples give a prose description of the flow_mod_specs along * with informal notation for how those would be represented and a hex dump of * the bytes that would be required. * * These examples could work with various nx_action_learn parameters. Typical * values would be idle_timeout=OFP_FLOW_PERMANENT, hard_timeout=60, * priority=OFP_DEFAULT_PRIORITY, flags=0, table_id=10. * * 1. Learn input port based on the source MAC, with lookup into * NXM_NX_REG1[16:31] by resubmit to in_port=99: * * Match on in_port=99: * ovs_be16(src=1, dst=0, n_bits=16), 20 10 * ovs_be16(99), 00 63 * ovs_be32(NXM_OF_IN_PORT), ovs_be16(0) 00 00 00 02 00 00 * * Match Ethernet destination on Ethernet source from packet: * ovs_be16(src=0, dst=0, n_bits=48), 00 30 * ovs_be32(NXM_OF_ETH_SRC), ovs_be16(0) 00 00 04 06 00 00 * ovs_be32(NXM_OF_ETH_DST), ovs_be16(0) 00 00 02 06 00 00 * * Set NXM_NX_REG1[16:31] to the packet's input port: * ovs_be16(src=0, dst=1, n_bits=16), 08 10 * ovs_be32(NXM_OF_IN_PORT), ovs_be16(0) 00 00 00 02 00 00 * ovs_be32(NXM_NX_REG1), ovs_be16(16) 00 01 02 04 00 10 * * Given a packet that arrived on port A with Ethernet source address B, * this would set up the flow "in_port=99, dl_dst=B, * actions=load:A->NXM_NX_REG1[16..31]". * * In syntax accepted by ovs-ofctl, this action is: learn(in_port=99, * NXM_OF_ETH_DST[]=NXM_OF_ETH_SRC[], * load:NXM_OF_IN_PORT[]->NXM_NX_REG1[16..31]) * * 2. Output to input port based on the source MAC and VLAN VID, with lookup * into NXM_NX_REG1[16:31]: * * Match on same VLAN ID as packet: * ovs_be16(src=0, dst=0, n_bits=12), 00 0c * ovs_be32(NXM_OF_VLAN_TCI), ovs_be16(0) 00 00 08 02 00 00 * ovs_be32(NXM_OF_VLAN_TCI), ovs_be16(0) 00 00 08 02 00 00 * * Match Ethernet destination on Ethernet source from packet: * ovs_be16(src=0, dst=0, n_bits=48), 00 30 * ovs_be32(NXM_OF_ETH_SRC), ovs_be16(0) 00 00 04 06 00 00 * ovs_be32(NXM_OF_ETH_DST), ovs_be16(0) 00 00 02 06 00 00 * * Output to the packet's input port: * ovs_be16(src=0, dst=2, n_bits=16), 10 10 * ovs_be32(NXM_OF_IN_PORT), ovs_be16(0) 00 00 00 02 00 00 * * Given a packet that arrived on port A with Ethernet source address B in * VLAN C, this would set up the flow "dl_dst=B, vlan_vid=C, * actions=output:A". * * In syntax accepted by ovs-ofctl, this action is: * learn(NXM_OF_VLAN_TCI[0..11], NXM_OF_ETH_DST[]=NXM_OF_ETH_SRC[], * output:NXM_OF_IN_PORT[]) * * 3. Here's a recipe for a very simple-minded MAC learning switch. It uses a * 10-second MAC expiration time to make it easier to see what's going on * * ovs-vsctl del-controller br0 * ovs-ofctl del-flows br0 * ovs-ofctl add-flow br0 "table=0 actions=learn(table=1, \ hard_timeout=10, NXM_OF_VLAN_TCI[0..11], \ NXM_OF_ETH_DST[]=NXM_OF_ETH_SRC[], \ output:NXM_OF_IN_PORT[]), resubmit(,1)" * ovs-ofctl add-flow br0 "table=1 priority=0 actions=flood" * * You can then dump the MAC learning table with: * * ovs-ofctl dump-flows br0 table=1 * * Usage Advice * ------------ * * For best performance, segregate learned flows into a table that is not used * for any other flows except possibly for a lowest-priority "catch-all" flow * (a flow with no match criteria). If different learning actions specify * different match criteria, use different tables for the learned flows. * * The meaning of 'hard_timeout' and 'idle_timeout' can be counterintuitive. * These timeouts apply to the flow that is added, which means that a flow with * an idle timeout will expire when no traffic has been sent *to* the learned * address. This is not usually the intent in MAC learning; instead, we want * the MAC learn entry to expire when no traffic has been sent *from* the * learned address. Use a hard timeout for that. */ ''' def _nx_flow_mod_spec_formatter(x): if NX_FLOWMODSPEC_SRC(x['header']): srcdesc = '0x' + ''.join('%02x' % (c,) for c in bytearray(x['value'])) else: srcdesc = '%s[%d..%d]' % (x['src'], x['src_ofs'], x['src_ofs'] + NX_FLOWMODSPEC_NBITS(x['header']) - 1) dstv = NX_FLOWMODSPEC_DST(x['header']) if dstv != NX_LEARN_DST_OUTPUT: dstdesc = '%s[%d..%d]' % (x['dst'], x['dst_ofs'], x['dst_ofs'] + NX_FLOWMODSPEC_NBITS(x['header']) - 1) if dstv == NX_LEARN_DST_MATCH: x['_desc'] = '%s=%s' % (dstdesc, srcdesc) elif dstv == NX_LEARN_DST_LOAD: x['_desc'] = 'load:%s->%s' % (srcdesc, dstdesc) elif NX_FLOWMODSPEC_SRC(x['header']): x['_desc'] = 'output:%s' % nxm_port_no_raw.formatter(common.create_binary(x['value'], 2)) else: x['_desc'] = 'output:%s' % (srcdesc,) x['header'] = nx_flow_mod_spec_header.formatter(x['header']) return x nx_flow_mod_spec = nstruct( (uint16, 'header'), (_nx_flow_mod_spec_src,), (_nx_flow_mod_spec_dst,), name = 'nx_flow_mod_spec', padding = 1, formatter = _nx_flow_mod_spec_formatter, lastextra = False # if x.header == 0, size is 14, the padding should not be so large so it will not be successfully parsed ) namespace['nx_flow_mod_spec'] = nx_flow_mod_spec def create_nxfms_matchfield(src, dst, src_ofs = 0, dst_ofs = 0, n_bits = None): if n_bits is None: n_bits = min(NXM_LENGTH(dst) * 8 - dst_ofs, NXM_LENGTH(src) * 8 - src_ofs) if n_bits <= 0: raise ValueError('Cannot create flow mod spec with 0 bits') return nx_flow_mod_spec.parse(_create_header(NX_LEARN_SRC_FIELD, NX_LEARN_DST_MATCH, n_bits) + _create_field(src, src_ofs) + _create_field(dst, dst_ofs))[0] namespace['create_nxfms_matchfield'] = create_nxfms_matchfield def create_nxfms_matchvalue(dst, value, dst_ofs, n_bits = None): if n_bits is None: n_bits = NXM_LENGTH(dst) * 8 - dst_ofs if n_bits <= 0: raise ValueError('Cannot create flow mod spec with 0 bits') return nx_flow_mod_spec.parse(_create_header(NX_LEARN_SRC_IMMEDIATE, NX_LEARN_DST_MATCH, n_bits) + common.create_binary(value, (n_bits + 15) // 16 * 2) + _create_field(dst, dst_ofs))[0] namespace['create_nxfms_matchvalue'] = create_nxfms_matchvalue def create_nxfms_loadfield(src, dst, src_ofs = 0, dst_ofs = 0, n_bits = None): if n_bits is None: n_bits = min(NXM_LENGTH(dst) * 8 - dst_ofs, NXM_LENGTH(src) * 8 - src_ofs) if n_bits <= 0: raise ValueError('Cannot create flow mod spec with 0 bits') return nx_flow_mod_spec.parse(_create_header(NX_LEARN_SRC_FIELD, NX_LEARN_DST_LOAD, n_bits) + _create_field(src, src_ofs) + _create_field(dst, dst_ofs))[0] namespace['create_nxfms_loadfield'] = create_nxfms_loadfield def create_nxfms_loadvalue(dst, value, dst_ofs, n_bits = None): if n_bits is None: n_bits = NXM_LENGTH(dst) * 8 - dst_ofs if n_bits <= 0: raise ValueError('Cannot create flow mod spec with 0 bits') return nx_flow_mod_spec.parse(_create_header(NX_LEARN_SRC_IMMEDIATE, NX_LEARN_DST_LOAD, n_bits) + common.create_binary(value, (n_bits + 15) // 16 * 2) + _create_field(dst, dst_ofs))[0] namespace['create_nxfms_loadvalue'] = create_nxfms_loadvalue def create_nxfms_outputfield(src, src_ofs = 0, n_bits = None): if n_bits is None: n_bits = NXM_LENGTH(src) * 8 - src_ofs if n_bits <= 0: raise ValueError('Cannot create flow mod spec with 0 bits') return nx_flow_mod_spec.parse(_create_header(NX_LEARN_SRC_FIELD, NX_LEARN_DST_OUTPUT, n_bits) + _create_field(src, src_ofs))[0] namespace['create_nxfms_outputfield'] = create_nxfms_outputfield def create_nxfms_outputvalue(dst, value): return nx_flow_mod_spec.parse(_create_header(NX_LEARN_SRC_IMMEDIATE, NX_LEARN_DST_OUTPUT, 16) + common.create_binary(value, 2))[0] namespace['create_nxfms_outputvalue'] = create_nxfms_outputvalue ofp_flow_mod_flags = namespace['ofp_flow_mod_flags'] nx_action_learn = nstruct( (uint16, 'idle_timeout'), # /* Idle time before discarding (seconds). */ (uint16, 'hard_timeout'), # /* Max time before discarding (seconds). */ (uint16, 'priority'), # /* Priority level of flow entry. */ (uint64, 'cookie'), # /* Cookie for new flow. */ (ofp_flow_mod_flags, 'flags'), # /* Either 0 or OFPFF_SEND_FLOW_REM. */ (uint8, 'table_id'), # /* Table to insert flow entry. */ (uint8,), # /* Must be zero. */ (uint16, 'fin_idle_timeout'),# /* Idle timeout after FIN, if nonzero. */ (uint16, 'fin_hard_timeout'),# /* Hard timeout after FIN, if nonzero. */ (nx_flow_mod_spec[0], 'specs'), base = nx_action, name = 'nx_action_learn', classifyby = (NXAST_LEARN,), criteria = lambda x: getattr(x, action_subtype) == NXAST_LEARN, init = packvalue(NXAST_LEARN, action_subtype), ) namespace['nx_action_learn'] = nx_action_learn ''' /* Action structure for NXAST_FIN_TIMEOUT. * * This action changes the idle timeout or hard timeout, or both, of this * OpenFlow rule when the rule matches a TCP packet with the FIN or RST flag. * When such a packet is observed, the action reduces the rule's idle timeout * to 'fin_idle_timeout' and its hard timeout to 'fin_hard_timeout'. This * action has no effect on an existing timeout that is already shorter than the * one that the action specifies. A 'fin_idle_timeout' or 'fin_hard_timeout' * of zero has no effect on the respective timeout. * * 'fin_idle_timeout' and 'fin_hard_timeout' are measured in seconds. * 'fin_hard_timeout' specifies time since the flow's creation, not since the * receipt of the FIN or RST. * * This is useful for quickly discarding learned TCP flows that otherwise will * take a long time to expire. * * This action is intended for use with an OpenFlow rule that matches only a * single TCP flow. If the rule matches multiple TCP flows (e.g. it wildcards * all TCP traffic, or all TCP traffic to a particular port), then any FIN or * RST in any of those flows will cause the entire OpenFlow rule to expire * early, which is not normally desirable. */ ''' nx_action_fin_timeout = nstruct( (uint16, 'fin_idle_timeout'), # /* New idle timeout, if nonzero. */ (uint16, 'fin_hard_timeout'), # /* New hard timeout, if nonzero. */ (uint16,), base = nx_action, name = 'nx_action_fin_timeout', criteria = lambda x: getattr(x, action_subtype) == NXAST_FIN_TIMEOUT, classifyby = (NXAST_FIN_TIMEOUT,), init = packvalue(NXAST_FIN_TIMEOUT, action_subtype) ) namespace['nx_action_fin_timeout'] = nx_action_fin_timeout ''' /* Action structure for NXAST_BUNDLE and NXAST_BUNDLE_LOAD. * * The bundle actions choose a slave from a supplied list of options. * NXAST_BUNDLE outputs to its selection. NXAST_BUNDLE_LOAD writes its * selection to a register. * * The list of possible slaves follows the nx_action_bundle structure. The size * of each slave is governed by its type as indicated by the 'slave_type' * parameter. The list of slaves should be padded at its end with zeros to make * the total length of the action a multiple of 8. * * Switches infer from the 'slave_type' parameter the size of each slave. All * implementations must support the NXM_OF_IN_PORT 'slave_type' which indicates * that the slaves are OpenFlow port numbers with NXM_LENGTH(NXM_OF_IN_PORT) == * 2 byte width. Switches should reject actions which indicate unknown or * unsupported slave types. * * Switches use a strategy dictated by the 'algorithm' parameter to choose a * slave. If the switch does not support the specified 'algorithm' parameter, * it should reject the action. * * Several algorithms take into account liveness when selecting slaves. The * liveness of a slave is implementation defined (with one exception), but will * generally take into account things like its carrier status and the results * of any link monitoring protocols which happen to be running on it. In order * to give controllers a place-holder value, the OFPP_NONE port is always * considered live. * * Some slave selection strategies require the use of a hash function, in which * case the 'fields' and 'basis' parameters should be populated. The 'fields' * parameter (one of NX_HASH_FIELDS_*) designates which parts of the flow to * hash. Refer to the definition of "enum nx_hash_fields" for details. The * 'basis' parameter is used as a universal hash parameter. Different values * of 'basis' yield different hash results. * * The 'zero' parameter at the end of the action structure is reserved for * future use. Switches are required to reject actions which have nonzero * bytes in the 'zero' field. * * NXAST_BUNDLE actions should have 'ofs_nbits' and 'dst' zeroed. Switches * should reject actions which have nonzero bytes in either of these fields. * * NXAST_BUNDLE_LOAD stores the OpenFlow port number of the selected slave in * dst[ofs:ofs+n_bits]. The format and semantics of 'dst' and 'ofs_nbits' are * similar to those for the NXAST_REG_LOAD action. */ ''' nx_action_bundle = nstruct( # /* Slave choice algorithm to apply to hash value. */ (nx_bd_algorithm, 'algorithm'), # /* One of NX_BD_ALG_*. */ # /* What fields to hash and how. */ (nx_hash_fields, 'fields'), # /* One of NX_HASH_FIELDS_*. */ (uint16, 'basis'), # /* Universal hash parameter. */ (nxm_header, 'slave_type'), # /* NXM_OF_IN_PORT. */ (uint16, 'n_slaves'), # /* Number of slaves. */ (uint16, 'ofs_nbits'), # /* (ofs << 6) | (n_bits - 1). */ (nxm_header, 'dst'), # /* Destination. */ (uint8[4],), # /* Reserved. Must be zero. */ name = 'nx_action_bundle', base = nx_action, criteria = lambda x: getattr(x, action_subtype) == NXAST_BUNDLE or getattr(x, action_subtype) == NXAST_BUNDLE_LOAD, classifyby = (NXAST_BUNDLE, NXAST_BUNDLE_LOAD), init = packvalue(NXAST_BUNDLE, action_subtype) ) namespace['nx_action_bundle'] = nx_action_bundle def _nx_slave_ports_prepack(x): x.n_slaves = len(x.bundles) _nx_slave_ports = nstruct( (nx_port_no[0], 'bundles'), name = '_nx_slave_ports', size = lambda x: x.n_slaves * 2, prepack = _nx_slave_ports_prepack, padding = 1 ) nx_action_bundle_port = nstruct( (_nx_slave_ports,), base = nx_action_bundle, name = 'nx_action_bundle_port', criteria = lambda x: x.slave_type == NXM_OF_IN_PORT, init = packvalue(NXM_OF_IN_PORT, 'slave_type'), lastextra = False, formatter = _createdesc(lambda x: 'bundle_load(%s,%d,%s,%s,%s[%d..%d],slaves:%r)' % \ (x['fields'], x['basis'], x['algorithm'], x['slave_type'], x['dst'], x['ofs_nbits'] >> 6, (x['ofs_nbits'] >> 6) + (x['ofs_nbits'] & 0x3f), x['bundles']) \ if x[action_subtype] == 'NXAST_BUNDLE_LOAD' else 'bundle(%s,%d,%s,%s,slaves:%r)' % (x['fields'], x['basis'], x['algorithm'], x['slave_type'], x['bundles'])) ) namespace['nx_action_bundle_port'] = nx_action_bundle_port def _nx_slave_others_prepack(x): x.n_slaves = len(x.bundlesraw) // NXM_LENGTH(x.slave_type) _nx_slave_others = nstruct( (raw, 'bundlesraw'), name = '_nx_slave_others', size = lambda x: x.n_slaves * NXM_LENGTH(x.slave_type), prepack = _nx_slave_others_prepack, padding = 1 ) nx_action_bundle_others = nstruct( (_nx_slave_others,), base = nx_action_bundle, name = 'nx_action_bundle_others', criteria = lambda x: x.slave_type != NXM_OF_IN_PORT, lastextra = False, init = packvalue(NXM_OF_ETH_DST, 'slave_type'), formatter = _createdesc(lambda x: 'bundle_load(%s,%d,%s,%s,%s[%d..%d],slaves:%r)' % \ (x['fields'], x['basis'], x['algorithm'], x['slave_type'], x['dst'], x['ofs_nbits'] >> 6, (x['ofs_nbits'] >> 6) + (x['ofs_nbits'] & 0x3f), x['bundleraw']) \ if x[action_subtype] == 'NXAST_BUNDLE_LOAD' else 'bundle(%s,%d,%s,%s,slaves:%r)' % (x['fields'], x['basis'], x['algorithm'], x['slave_type'], x['bundleraw'])) ) namespace['nx_action_bundle_others'] = nx_action_bundle_others ''' /* Action structure for NXAST_DEC_TTL_CNT_IDS. * * If the packet is not IPv4 or IPv6, does nothing. For IPv4 or IPv6, if the * TTL or hop limit is at least 2, decrements it by 1. Otherwise, if TTL or * hop limit is 0 or 1, sends a packet-in to the controllers with each of the * 'n_controllers' controller IDs specified in 'cnt_ids'. * * (This differs from NXAST_DEC_TTL in that for NXAST_DEC_TTL the packet-in is * sent only to controllers with id 0.) */ ''' def _nx_action_cnt_ids_ids_prepack(x): x.n_controllers = len(x.cnt_ids) _nx_action_cnt_ids_ids = nstruct( (uint16[0], 'cnt_ids'), name = '_nx_action_cnt_ids_ids', size = lambda x: 2 * x.n_controllers, prepack = _nx_action_cnt_ids_ids_prepack ) nx_action_cnt_ids = nstruct( (uint16, 'n_controllers'), # /* Number of controllers. */ (uint8[4],), # /* Must be zero. */ (_nx_action_cnt_ids_ids,), base = nx_action, classifyby = (NXAST_DEC_TTL_CNT_IDS,), criteria = lambda x: getattr(x, action_subtype) == NXAST_DEC_TTL_CNT_IDS, init = packvalue(NXAST_DEC_TTL_CNT_IDS, action_subtype), lastextra = False, name = 'nx_action_cnt_ids' ) namespace['nx_action_cnt_ids'] = nx_action_cnt_ids ''' /* Action structure for NXAST_OUTPUT_REG. * * Outputs to the OpenFlow port number written to src[ofs:ofs+nbits]. * * The format and semantics of 'src' and 'ofs_nbits' are similar to those for * the NXAST_REG_LOAD action. * * The acceptable nxm_header values for 'src' are the same as the acceptable * nxm_header values for the 'src' field of NXAST_REG_MOVE. * * The 'max_len' field indicates the number of bytes to send when the chosen * port is OFPP_CONTROLLER. Its semantics are equivalent to the 'max_len' * field of OFPAT_OUTPUT. * * The 'zero' field is required to be zeroed for forward compatibility. */ ''' nx_action_output_reg = nstruct( (uint16, 'ofs_nbits'), # /* (ofs << 6) | (n_bits - 1). */ (nxm_header, 'src'), # /* Source. */ (uint16, 'max_len'), # /* Max length to send to controller. */ (uint8[6],), # /* Reserved, must be zero. */ base = nx_action, classifyby = (NXAST_OUTPUT_REG,), criteria = lambda x: getattr(x, action_subtype) == NXAST_OUTPUT_REG, init = packvalue(NXAST_OUTPUT_REG, action_subtype), name = 'nx_action_output_reg', formatter = _createdesc(lambda x: 'output:%s[%d..%d]' % (x['src'], x['ofs_nbits'] >> 6, (x['ofs_nbits'] >> 6) + (x['ofs_nbits'] & 0x3f))) ) namespace['nx_action_output_reg'] = nx_action_output_reg ''' /* NXAST_EXIT * * Discontinues action processing. * * The NXAST_EXIT action causes the switch to immediately halt processing * actions for the flow. Any actions which have already been processed are * executed by the switch. However, any further actions, including those which * may be in different tables, or different levels of the NXAST_RESUBMIT * hierarchy, will be ignored. * * Uses the nx_action_header structure. */ /* ## --------------------- ## */ /* ## Requests and replies. ## */ /* ## --------------------- ## */ ''' ''' /* NXT_SET_FLOW_FORMAT request. */ ''' nx_set_flow_format = nstruct( (nx_flow_format, 'format'), # /* One of NXFF_*. */ name = 'nx_set_flow_format', base = nicira_header, criteria = lambda x: getattr(x, msg_subtype) == NXT_SET_FLOW_FORMAT, classifyby = (NXT_SET_FLOW_FORMAT,), init = packvalue(NXT_SET_FLOW_FORMAT, msg_subtype) ) namespace['nx_set_flow_format'] = nx_set_flow_format ''' /* NXT_FLOW_MOD (analogous to OFPT_FLOW_MOD). * * It is possible to limit flow deletions and modifications to certain * cookies by using the NXM_NX_COOKIE(_W) matches. The "cookie" field * is used only to add or modify flow cookies. */ ''' ofp_flow_mod_command = namespace['ofp_flow_mod_command'] nx_flow_mod = nstruct( (uint64, 'cookie'), # /* Opaque controller-issued identifier. */ (ofp_flow_mod_command, 'command'), # /* OFPFC_* + possibly a table ID (see comment # * on struct nx_flow_mod_table_id). */ (uint16, 'idle_timeout'), # /* Idle time before discarding (seconds). */ (uint16, 'hard_timeout'), # /* Max time before discarding (seconds). */ (uint16, 'priority'), # /* Priority level of flow entry. */ (uint32, 'buffer_id'), # /* Buffered packet to apply to (or -1). # Not meaningful for OFPFC_DELETE*. */ (nx_port_no, 'out_port'), # /* For OFPFC_DELETE* commands, require # matching entries to include this as an # output port. A value of OFPP_NONE # indicates no restriction. */ (ofp_flow_mod_flags, 'flags'), # /* One of OFPFF_*. */ (uint16, 'match_len'), # /* Size of nx_match. */ (uint8[6],), # /* Align to 64-bits. */ (nx_matches,), base = nicira_header, criteria = lambda x: getattr(x, msg_subtype) == NXT_FLOW_MOD, classifyby = (NXT_FLOW_MOD,), init = packvalue(NXT_FLOW_MOD, msg_subtype), name = 'nx_flow_mod' ) namespace['nx_flow_mod'] = nx_flow_mod ''' /* NXT_FLOW_REMOVED (analogous to OFPT_FLOW_REMOVED). * * 'table_id' is present only in Open vSwitch 1.11 and later. In earlier * versions of Open vSwitch, this is a padding byte that is always zeroed. * Therefore, a 'table_id' value of 0 indicates that the table ID is not known, * and other values may be interpreted as one more than the flow's former table * ID. */ ''' nx_flow_removed = nstruct( (uint64, 'cookie'), # /* Opaque controller-issued identifier. */ (uint16, 'priority'), # /* Priority level of flow entry. */ (ofp_flow_removed_reason, 'reason'), # /* One of OFPRR_*. */ (uint8, 'table_id'), # /* Flow's former table ID, plus one. */ (uint32, 'duration_sec'), # /* Time flow was alive in seconds. */ (uint32, 'duration_nsec'), # /* Time flow was alive in nanoseconds beyond # duration_sec. */ (uint16, 'idle_timeout'), # /* Idle timeout from original flow mod. */ (uint16, 'match_len'), # /* Size of nx_match. */ (uint64, 'packet_count'), (uint64, 'byte_count'), (nx_matches,), base = nicira_header, criteria = lambda x: getattr(x, msg_subtype) == NXT_FLOW_REMOVED, classifyby = (NXT_FLOW_REMOVED,), init = packvalue(NXT_FLOW_REMOVED, msg_subtype), name = 'nx_flow_removed' ) namespace['nx_flow_removed'] = nx_flow_removed ''' /* Nicira vendor stats request of type NXST_FLOW (analogous to OFPST_FLOW * request). * * It is possible to limit matches to certain cookies by using the * NXM_NX_COOKIE and NXM_NX_COOKIE_W matches. */ ''' nx_flow_stats_request = nstruct( (nx_port_no, 'out_port'), #/* Require matching entries to include this # as an output port. A value of OFPP_NONE # indicates no restriction. */ (uint16, 'match_len'), # /* Length of nx_match. */ (uint8, 'table_id'), # /* ID of table to read (from ofp_table_stats) # or 0xff for all tables. */ (uint8[3],), # /* Align to 64 bits. */ (nx_matches,), base = nx_stats_request, criteria = lambda x: getattr(x, stats_subtype) == NXST_FLOW, classifyby = (NXST_FLOW,), init = packvalue(NXST_FLOW, stats_subtype), name = 'nx_flow_stats_request' ) namespace['nx_flow_stats_request'] = nx_flow_stats_request ''' /* Body for Nicira vendor stats reply of type NXST_FLOW (analogous to * OFPST_FLOW reply). * * The values of 'idle_age' and 'hard_age' are only meaningful when talking to * a switch that implements the NXT_FLOW_AGE extension. Zero means that the * true value is unknown, perhaps because hardware does not track the value. * (Zero is also the value that one should ordinarily expect to see talking to * a switch that does not implement NXT_FLOW_AGE, since those switches zero the * padding bytes that these fields replaced.) A nonzero value X represents X-1 * seconds. A value of 65535 represents 65534 or more seconds. * * 'idle_age' is the number of seconds that the flow has been idle, that is, * the number of seconds since a packet passed through the flow. 'hard_age' is * the number of seconds since the flow was last modified (e.g. OFPFC_MODIFY or * OFPFC_MODIFY_STRICT). (The 'duration_*' fields are the elapsed time since * the flow was added, regardless of subsequent modifications.) * * For a flow with an idle or hard timeout, 'idle_age' or 'hard_age', * respectively, will ordinarily be smaller than the timeout, but flow * expiration times are only approximate and so one must be prepared to * tolerate expirations that occur somewhat early or late. */ ''' ofp_action = namespace['ofp_action'] nx_flow_stats = nstruct( (uint16, 'length'), # /* Length of this entry. */ (uint8, 'table_id'), # /* ID of table flow came from. */ (uint8,), (uint32, 'duration_sec'), # /* Time flow has been alive in seconds. */ (uint32, 'duration_nsec'), # /* Time flow has been alive in nanoseconds # beyond duration_sec. */ (uint16, 'priority'), # /* Priority of the entry. */ (uint16, 'idle_timeout'), # /* Number of seconds idle before expiration. */ (uint16, 'hard_timeout'), # /* Number of seconds before expiration. */ (uint16, 'match_len'), # /* Length of nx_match. */ (uint16, 'idle_age'), # /* Seconds since last packet, plus one. */ (uint16, 'hard_age'), # /* Seconds since last modification, plus one. */ (uint64, 'cookie'), # /* Opaque controller-issued identifier. */ (uint64, 'packet_count'), # /* Number of packets, UINT64_MAX if unknown. */ (uint64, 'byte_count'), # /* Number of bytes, UINT64_MAX if unknown. */ #======================================================================= # /* Followed by: # * - Exactly match_len (possibly 0) bytes containing the nx_match, then # * - Exactly (match_len + 7)/8*8 - match_len (between 0 and 7) bytes of # * all-zero bytes, then # * - Actions to fill out the remainder 'length' bytes (always a multiple # * of 8). # */ #======================================================================= (nx_matches,), (ofp_action[0], 'actions'), name = 'nx_flow_stats', size = sizefromlen(65536, 'length'), prepack = packsize('length') ) namespace['nx_flow_stats'] = nx_flow_stats nx_flow_stats_reply = nstruct( (nx_flow_stats[0], 'stats'), base = nx_stats_reply, classifyby = (NXST_FLOW,), criteria = lambda x: getattr(x, stats_subtype) == NXST_FLOW, init = packvalue(NXST_FLOW, stats_subtype), name = 'nx_flow_stats_reply' ) namespace['nx_flow_stats_reply'] = nx_flow_stats_reply ''' /* Nicira vendor stats request of type NXST_AGGREGATE (analogous to * OFPST_AGGREGATE request). * * The reply format is identical to the reply format for OFPST_AGGREGATE, * except for the header. */ ''' nx_aggregate_stats_request = nstruct( (nx_port_no, 'out_port'), # /* Require matching entries to include this # as an output port. A value of OFPP_NONE # indicates no restriction. */ (uint16, 'match_len'), # /* Length of nx_match. */ (uint8, 'table_id'), # /* ID of table to read (from ofp_table_stats) # or 0xff for all tables. */ (uint8[3],), # /* Align to 64 bits. */ #======================================================================= # /* Followed by: # * - Exactly match_len (possibly 0) bytes containing the nx_match, then # * - Exactly (match_len + 7)/8*8 - match_len (between 0 and 7) bytes of # * all-zero bytes, which must also exactly fill out the length of the # * message. # */ #======================================================================= (nx_matches,), base = nx_stats_request, name = 'nx_aggregate_stats_request', criteria = lambda x: getattr(x, stats_subtype) == NXST_AGGREGATE, classifyby = (NXST_AGGREGATE,), init = packvalue(NXST_AGGREGATE, stats_subtype), lastextra = False ) namespace['nx_aggregate_stats_request'] = nx_aggregate_stats_request nx_aggregate_stats_reply = nstruct( (uint64, 'packet_count'), # /* Number of packets in flows. */ (uint64, 'byte_count'), # /* Number of bytes in flows. */ (uint32, 'flow_count'), # /* Number of flows. */ (uint8[4],), base = nx_stats_reply, name = 'nx_aggregate_stats_reply', criteria = lambda x: getattr(x, stats_subtype) == NXST_AGGREGATE, classifyby = (NXST_AGGREGATE,), init = packvalue(NXST_AGGREGATE, stats_subtype) ) namespace['nx_aggregate_stats_reply'] = nx_aggregate_stats_reply ''' /* NXT_SET_CONTROLLER_ID. * * Each OpenFlow controller connection has a 16-bit identifier that is * initially 0. This message changes the connection's ID to 'id'. * * Controller connection IDs need not be unique. * * The NXAST_CONTROLLER action is the only current user of controller * connection IDs. */ ''' nx_controller_id = nstruct( (uint8[6],), # /* Must be zero. */ (uint16, 'controller_id'), # /* New controller connection ID. */ base = nicira_header, name = 'nx_controller_id', criteria = lambda x: getattr(x, msg_subtype) == NXT_SET_CONTROLLER_ID, init = packvalue(NXT_SET_CONTROLLER_ID, msg_subtype), classifyby = (NXT_SET_CONTROLLER_ID,) ) namespace['nx_controller_id'] = nx_controller_id ''' /* Action structure for NXAST_CONTROLLER. * * This generalizes using OFPAT_OUTPUT to send a packet to OFPP_CONTROLLER. In * addition to the 'max_len' that OFPAT_OUTPUT supports, it also allows * specifying: * * - 'reason': The reason code to use in the ofp_packet_in or nx_packet_in. * * - 'controller_id': The ID of the controller connection to which the * ofp_packet_in should be sent. The ofp_packet_in or nx_packet_in is * sent only to controllers that have the specified controller connection * ID. See "struct nx_controller_id" for more information. */ ''' nx_action_controller = nstruct( (uint16, 'max_len'), # /* Maximum length to send to controller. */ (uint16, 'controller_id'), # /* Controller ID to send packet-in. */ (ofp_packet_in_reason, 'reason'), # /* enum ofp_packet_in_reason (OFPR_*). */ (uint8,), base = nx_action, name = 'nx_action_controller', criteria = lambda x: getattr(x, action_subtype) == NXAST_CONTROLLER, classifyby = (NXAST_CONTROLLER,), init = packvalue(NXAST_CONTROLLER, action_subtype) ) namespace['nx_action_controller'] = nx_action_controller ''' /* Flow Table Monitoring * ===================== * * NXST_FLOW_MONITOR allows a controller to keep track of changes to OpenFlow * flow table(s) or subsets of them, with the following workflow: * * 1. The controller sends an NXST_FLOW_MONITOR request to begin monitoring * flows. The 'id' in the request must be unique among all monitors that * the controller has started and not yet canceled on this OpenFlow * connection. * * 2. The switch responds with an NXST_FLOW_MONITOR reply. If the request's * 'flags' included NXFMF_INITIAL, the reply includes all the flows that * matched the request at the time of the request (with event NXFME_ADDED). * If 'flags' did not include NXFMF_INITIAL, the reply is empty. * * The reply uses the xid of the request (as do all replies to OpenFlow * requests). * * 3. Whenever a change to a flow table entry matches some outstanding monitor * request's criteria and flags, the switch sends a notification to the * controller as an additional NXST_FLOW_MONITOR reply with xid 0. * * When multiple outstanding monitors match a single change, only a single * notification is sent. This merged notification includes the information * requested in any of the individual monitors. That is, if any of the * matching monitors requests actions (NXFMF_ACTIONS), the notification * includes actions, and if any of the monitors request full changes for the * controller's own changes (NXFMF_OWN), the controller's own changes will * be included in full. * * 4. The controller may cancel a monitor with NXT_FLOW_MONITOR_CANCEL. No * further notifications will be sent on the basis of the canceled monitor * afterward. * * * Buffer Management * ================= * * OpenFlow messages for flow monitor notifications can overflow the buffer * space available to the switch, either temporarily (e.g. due to network * conditions slowing OpenFlow traffic) or more permanently (e.g. the sustained * rate of flow table change exceeds the network bandwidth between switch and * controller). * * When Open vSwitch's notification buffer space reaches a limiting threshold, * OVS reacts as follows: * * 1. OVS sends an NXT_FLOW_MONITOR_PAUSED message to the controller, following * all the already queued notifications. After it receives this message, * the controller knows that its view of the flow table, as represented by * flow monitor notifications, is incomplete. * * 2. As long as the notification buffer is not empty: * * - NXMFE_ADD and NXFME_MODIFIED notifications will not be sent. * * - NXFME_DELETED notifications will still be sent, but only for flows * that existed before OVS sent NXT_FLOW_MONITOR_PAUSED. * * - NXFME_ABBREV notifications will not be sent. They are treated as * the expanded version (and therefore only the NXFME_DELETED * components, if any, are sent). * * 3. When the notification buffer empties, OVS sends NXFME_ADD notifications * for flows added since the buffer reached its limit and NXFME_MODIFIED * notifications for flows that existed before the limit was reached and * changed after the limit was reached. * * 4. OVS sends an NXT_FLOW_MONITOR_RESUMED message to the controller. After * it receives this message, the controller knows that its view of the flow * table, as represented by flow monitor notifications, is again complete. * * This allows the maximum buffer space requirement for notifications to be * bounded by the limit plus the maximum number of supported flows. * * * "Flow Removed" messages * ======================= * * The flow monitor mechanism is independent of OFPT_FLOW_REMOVED and * NXT_FLOW_REMOVED. Flow monitor updates for deletion are sent if * NXFMF_DELETE is set on a monitor, regardless of whether the * OFPFF_SEND_FLOW_REM flag was set when the flow was added. */ /* NXST_FLOW_MONITOR request. * * The NXST_FLOW_MONITOR request's body consists of an array of zero or more * instances of this structure. The request arranges to monitor the flows * that match the specified criteria, which are interpreted in the same way as * for NXST_FLOW. * * 'id' identifies a particular monitor for the purpose of allowing it to be * canceled later with NXT_FLOW_MONITOR_CANCEL. 'id' must be unique among * existing monitors that have not already been canceled. * * The reply includes the initial flow matches for monitors that have the * NXFMF_INITIAL flag set. No single flow will be included in the reply more * than once, even if more than one requested monitor matches that flow. The * reply will be empty if none of the monitors has NXFMF_INITIAL set or if none * of the monitors initially matches any flows. * * For NXFMF_ADD, an event will be reported if 'out_port' matches against the * actions of the flow being added or, for a flow that is replacing an existing * flow, if 'out_port' matches against the actions of the flow being replaced. * For NXFMF_DELETE, 'out_port' matches against the actions of a flow being * deleted. For NXFMF_MODIFY, an event will be reported if 'out_port' matches * either the old or the new actions. */ ''' ofp_table = namespace['ofp_table'] nx_flow_monitor_request = nstruct( (uint32, 'id'), # /* Controller-assigned ID for this monitor. */ (nx_flow_monitor_flags, 'flags'), # /* NXFMF_*. */ (nx_port_no, 'out_port'), # /* Required output port, if not OFPP_NONE. */ (uint16, 'match_len'), # /* Length of nx_match. */ (ofp_table, 'table_id'), # /* One table's ID or 0xff for all tables. */ (uint8[5],), # /* Align to 64 bits (must be zero). */ (nx_matches,), name = 'nx_flow_monitor_request', base = nx_stats_request, criteria = lambda x: getattr(x, stats_subtype) == NXST_FLOW_MONITOR, init = packvalue(NXST_FLOW_MONITOR, stats_subtype), classifyby = (NXST_FLOW_MONITOR,) ) namespace['nx_flow_monitor_request'] = nx_flow_monitor_request ''' /* NXST_FLOW_MONITOR reply header. * * The body of an NXST_FLOW_MONITOR reply is an array of variable-length * structures, each of which begins with this header. The 'length' member may * be used to traverse the array, and the 'event' member may be used to * determine the particular structure. * * Every instance is a multiple of 8 bytes long. */ ''' nx_flow_update = nstruct( (uint16, 'length'), #/* Length of this entry. */ (nx_flow_update_event, 'event'), # /* One of NXFME_*. */ name = 'nx_flow_update', size = sizefromlen(65536, 'length'), prepack = packsize('length') ) namespace['nx_flow_update'] = nx_flow_update ''' /* NXST_FLOW_MONITOR reply for NXFME_ADDED, NXFME_DELETED, and * NXFME_MODIFIED. */ ''' nx_flow_update_full = nstruct( (ofp_flow_removed_reason, 'reason'), # /* OFPRR_* for NXFME_DELETED, else zero. */ (uint16, 'priority'), # /* Priority of the entry. */ (uint16, 'idle_timeout'), # /* Number of seconds idle before expiration. */ (uint16, 'hard_timeout'), # /* Number of seconds before expiration. */ (uint16, 'match_len'), # /* Length of nx_match. */ (uint8, 'table_id'), # /* ID of flow's table. */ (uint8,), # /* Reserved, currently zeroed. */ (uint64, 'cookie'), # /* Opaque controller-issued identifier. */ #======================================================================= # /* Followed by: # * - Exactly match_len (possibly 0) bytes containing the nx_match, then # * - Exactly (match_len + 7)/8*8 - match_len (between 0 and 7) bytes of # * all-zero bytes, then # * - Actions to fill out the remainder 'length' bytes (always a multiple # * of 8). If NXFMF_ACTIONS was not specified, or 'event' is # * NXFME_DELETED, no actions are included. # */ #======================================================================= (nx_matches,), (ofp_action[0], 'actions'), name = 'nx_flow_update_full', base = nx_flow_update, criteria = lambda x: x.event in (NXFME_ADDED, NXFME_DELETED, NXFME_MODIFIED), init = packvalue(NXFME_ADDED, 'event') ) namespace['nx_flow_update_full'] = nx_flow_update_full ''' /* NXST_FLOW_MONITOR reply for NXFME_ABBREV. * * When the controller does not specify NXFMF_OWN in a monitor request, any * flow tables changes due to the controller's own requests (on the same * OpenFlow channel) will be abbreviated, when possible, to this form, which * simply specifies the 'xid' of the OpenFlow request (e.g. an OFPT_FLOW_MOD or * NXT_FLOW_MOD) that caused the change. * * Some changes cannot be abbreviated and will be sent in full: * * - Changes that only partially succeed. This can happen if, for example, * a flow_mod with type OFPFC_MODIFY affects multiple flows, but only some * of those modifications succeed (e.g. due to hardware limitations). * * This cannot occur with the current implementation of the Open vSwitch * software datapath. It could happen with other datapath implementations. * * - Changes that race with conflicting changes made by other controllers or * other flow_mods (not separated by barriers) by the same controller. * * This cannot occur with the current Open vSwitch implementation * (regardless of datapath) because Open vSwitch internally serializes * potentially conflicting changes. * * A flow_mod that does not change the flow table will not trigger any * notification, even an abbreviated one. For example, a "modify" or "delete" * flow_mod that does not match any flows will not trigger a notification. * Whether an "add" or "modify" that specifies all the same parameters that a * flow already has triggers a notification is unspecified and subject to * change in future versions of Open vSwitch. * * OVS will always send the notifications for a given flow table change before * the reply to a OFPT_BARRIER_REQUEST request that follows the flow table * change. Thus, if the controller does not receive an abbreviated (or * unabbreviated) notification for a flow_mod before the next * OFPT_BARRIER_REPLY, it will never receive one. */ ''' nx_flow_update_abbrev = nstruct( (uint32, 'xid'), # /* Controller-specified xid from flow_mod. */ name = 'nx_flow_update_abbrev', base = nx_flow_update, criteria = lambda x: x.event == NXFME_ABBREV, init = packvalue(NXFME_ABBREV, 'event') ) namespace['nx_flow_update_abbrev'] = nx_flow_update_abbrev nx_flow_monitor_reply = nstruct( (nx_flow_update[0], 'stats'), base = nx_stats_reply, classifyby = (NXST_FLOW_MONITOR,), name = 'nx_flow_monitor_reply', criteria = lambda x: getattr(x, stats_subtype) == NXST_FLOW_MONITOR, init = packvalue(NXST_FLOW_MONITOR, stats_subtype) ) namespace['nx_flow_monitor_reply'] = nx_flow_monitor_reply ''' /* NXT_FLOW_MONITOR_CANCEL. * * Used by a controller to cancel an outstanding monitor. */ ''' nx_flow_monitor_cancel = nstruct( (uint32, 'id'), # /* 'id' from nx_flow_monitor_request. */ name = 'nx_flow_monitor_cancel', base = nicira_header, classifyby = (NXT_FLOW_MONITOR_CANCEL,), criteria = lambda x: getattr(x, msg_subtype) == NXT_FLOW_MONITOR_CANCEL, init = packvalue(NXT_FLOW_MONITOR_CANCEL, msg_subtype) ) namespace['nx_flow_monitor_cancel'] = nx_flow_monitor_cancel ''' /* Action structure for NXAST_WRITE_METADATA. * * Modifies the 'mask' bits of the metadata value. */ ''' nx_action_write_metadata = nstruct( (uint8[6],), # /* Must be zero. */ (uint64, 'metadata'), # /* Metadata register. */ (uint64, 'mask'), # /* Metadata mask. */ base = nx_action, classifyby = (NXAST_WRITE_METADATA,), criteria = lambda x: getattr(x, action_subtype) == NXAST_WRITE_METADATA, init = packvalue(NXAST_WRITE_METADATA, action_subtype), name = 'nx_action_write_metadata' ) namespace['nx_action_write_metadata'] = nx_action_write_metadata ''' /* Action structure for NXAST_PUSH_MPLS. */ ''' nx_action_push_mpls = nstruct( (ethertype, 'ethertype'), # /* Ethertype */ (uint8[4],), base = nx_action, classifyby = (NXAST_PUSH_MPLS,), criteria = lambda x: getattr(x, action_subtype) == NXAST_PUSH_MPLS, init = packvalue(NXAST_PUSH_MPLS, action_subtype), name = 'nx_action_push_mpls' ) namespace['nx_action_push_mpls'] = nx_action_push_mpls ''' /* Action structure for NXAST_POP_MPLS. */ ''' nx_action_pop_mpls = nstruct( (ethertype, 'ethertype'), # /* Ethertype */ (uint8[4],), base = nx_action, classifyby = (NXAST_POP_MPLS,), criteria = lambda x: getattr(x, action_subtype) == NXAST_POP_MPLS, init = packvalue(NXAST_POP_MPLS, action_subtype), name = 'nx_action_pop_mpls' ) namespace['nx_action_pop_mpls'] = nx_action_pop_mpls ''' /* Action structure for NXAST_SET_MPLS_LABEL. */ ''' nx_action_mpls_label = nstruct( (uint8[2],), # /* Must be zero. */ (uint32, 'label'), # /* LABEL */ base = nx_action, classifyby = (NXAST_SET_MPLS_LABEL,), criteria = lambda x: getattr(x, action_subtype) == NXAST_SET_MPLS_LABEL, init = packvalue(NXAST_SET_MPLS_LABEL, action_subtype), name = 'nx_action_mpls_label' ) namespace['nx_action_mpls_label'] = nx_action_mpls_label ''' /* Action structure for NXAST_SET_MPLS_TC. */ ''' nx_action_mpls_tc = nstruct( (uint8, 'tc'), # /* TC */ (uint8[5],), base = nx_action, classifyby = (NXAST_SET_MPLS_TC,), criteria = lambda x: getattr(x, action_subtype) == NXAST_SET_MPLS_TC, init = packvalue(NXAST_SET_MPLS_TC, action_subtype), name = 'nx_action_mpls_tc' ) namespace['nx_action_mpls_tc'] = nx_action_mpls_tc ''' /* Action structure for NXAST_SET_MPLS_TTL. */ ''' nx_action_mpls_ttl = nstruct( (uint8, 'ttl'), # /* TTL */ (uint8[5],), base = nx_action, classifyby = (NXAST_SET_MPLS_TTL,), criteria = lambda x: getattr(x, action_subtype) == NXAST_SET_MPLS_TTL, init = packvalue(NXAST_SET_MPLS_TTL, action_subtype), name = 'nx_action_mpls_ttl' ) namespace['nx_action_mpls_ttl'] = nx_action_mpls_ttl ''' /* Action structure for NXAST_SAMPLE. * * Samples matching packets with the given probability and sends them * each to the set of collectors identified with the given ID. The * probability is expressed as a number of packets to be sampled out * of USHRT_MAX packets, and must be >0. * * When sending packet samples to IPFIX collectors, the IPFIX flow * record sent for each sampled packet is associated with the given * observation domain ID and observation point ID. Each IPFIX flow * record contain the sampled packet's headers when executing this * rule. If a sampled packet's headers are modified by previous * actions in the flow, those modified headers are sent. */ ''' nx_action_sample = nstruct( (uint16, 'probability'), # /* Fraction of packets to sample. */ (uint32, 'collector_set_id'), # /* ID of collector set in OVSDB. */ (uint32, 'obs_domain_id'), # /* ID of sampling observation domain. */ (uint32, 'obs_point_id'), # /* ID of sampling observation point. */ base = nx_action, classifyby = (NXAST_SAMPLE,), criteria = lambda x: getattr(x, action_subtype) == NXAST_SAMPLE, init = packvalue(NXAST_SAMPLE, action_subtype), name = 'nx_action_sample' ) namespace['nx_action_sample'] = nx_action_sample
/* This command enables or disables an Open vSwitch extension that allows a * controller to specify the OpenFlow table to which a flow should be added, * instead of having the switch decide which table is most appropriate as * required by OpenFlow 1.0. Because NXM was designed as an extension to * OpenFlow 1.0, the extension applies equally to ofp10_flow_mod and * nx_flow_mod. By default, the extension is disabled. * * When this feature is enabled, Open vSwitch treats struct ofp10_flow_mod's * and struct nx_flow_mod's 16-bit 'command' member as two separate fields. * The upper 8 bits are used as the table ID, the lower 8 bits specify the * command as usual. A table ID of 0xff is treated like a wildcarded table ID. * * The specific treatment of the table ID depends on the type of flow mod: * * - OFPFC_ADD: Given a specific table ID, the flow is always placed in that * table. If an identical flow already exists in that table only, then it * is replaced. If the flow cannot be placed in the specified table, * either because the table is full or because the table cannot support * flows of the given type, the switch replies with an OFPFMFC_TABLE_FULL * error. (A controller can distinguish these cases by comparing the * current and maximum number of entries reported in ofp_table_stats.) * * If the table ID is wildcarded, the switch picks an appropriate table * itself. If an identical flow already exist in the selected flow table, * then it is replaced. The choice of table might depend on the flows * that are already in the switch; for example, if one table fills up then * the switch might fall back to another one. * * - OFPFC_MODIFY, OFPFC_DELETE: Given a specific table ID, only flows * within that table are matched and modified or deleted. If the table ID * is wildcarded, flows within any table may be matched and modified or * deleted. * * - OFPFC_MODIFY_STRICT, OFPFC_DELETE_STRICT: Given a specific table ID, * only a flow within that table may be matched and modified or deleted. * If the table ID is wildcarded and exactly one flow within any table * matches, then it is modified or deleted; if flows in more than one * table match, then none is modified or deleted. */
https://github.com/hubo1016/namedstruct/blob/5039026e0df4ce23003d212358918dbe1a6e1d76/misc/openflow/nicira_ext.py#L1130-L3077
mduggan/cdifflib
cdifflib.py
CSequenceMatcher.find_longest_match
def find_longest_match(self, alo, ahi, blo, bhi): """Find longest matching block in a[alo:ahi] and b[blo:bhi]. Wrapper for the C implementation of this function. """ besti, bestj, bestsize = _cdifflib.find_longest_match(self, alo, ahi, blo, bhi) return _Match(besti, bestj, bestsize)
python
def find_longest_match(self, alo, ahi, blo, bhi): """Find longest matching block in a[alo:ahi] and b[blo:bhi]. Wrapper for the C implementation of this function. """ besti, bestj, bestsize = _cdifflib.find_longest_match(self, alo, ahi, blo, bhi) return _Match(besti, bestj, bestsize)
Find longest matching block in a[alo:ahi] and b[blo:bhi]. Wrapper for the C implementation of this function.
https://github.com/mduggan/cdifflib/blob/95b1e28fbd14d2c2ff38e108745f7b38b89748a4/cdifflib.py#L32-L38
mduggan/cdifflib
cdifflib.py
CSequenceMatcher.set_seq1
def set_seq1(self, a): """Same as SequenceMatcher.set_seq1, but check for non-list inputs implementation.""" if a is self.a: return self.a = a if not isinstance(self.a, list): self.a = list(self.a) # Types must be hashable to work in the c layer. This will raise if # list items are *not* hashable. [hash(x) for x in self.a]
python
def set_seq1(self, a): """Same as SequenceMatcher.set_seq1, but check for non-list inputs implementation.""" if a is self.a: return self.a = a if not isinstance(self.a, list): self.a = list(self.a) # Types must be hashable to work in the c layer. This will raise if # list items are *not* hashable. [hash(x) for x in self.a]
Same as SequenceMatcher.set_seq1, but check for non-list inputs implementation.
https://github.com/mduggan/cdifflib/blob/95b1e28fbd14d2c2ff38e108745f7b38b89748a4/cdifflib.py#L40-L50
mduggan/cdifflib
cdifflib.py
CSequenceMatcher.set_seq2
def set_seq2(self, b): """Same as SequenceMatcher.set_seq2, but uses the c chainb implementation. """ if b is self.b and hasattr(self, 'isbjunk'): return self.b = b if not isinstance(self.a, list): self.a = list(self.a) if not isinstance(self.b, list): self.b = list(self.b) # Types must be hashable to work in the c layer. This check lines will # raise the correct error if they are *not* hashable. [hash(x) for x in self.a] [hash(x) for x in self.b] self.matching_blocks = self.opcodes = None self.fullbcount = None junk, popular = _cdifflib.chain_b(self) assert hasattr(junk, '__contains__') assert hasattr(popular, '__contains__') self.isbjunk = junk.__contains__ self.isbpopular = popular.__contains__
python
def set_seq2(self, b): """Same as SequenceMatcher.set_seq2, but uses the c chainb implementation. """ if b is self.b and hasattr(self, 'isbjunk'): return self.b = b if not isinstance(self.a, list): self.a = list(self.a) if not isinstance(self.b, list): self.b = list(self.b) # Types must be hashable to work in the c layer. This check lines will # raise the correct error if they are *not* hashable. [hash(x) for x in self.a] [hash(x) for x in self.b] self.matching_blocks = self.opcodes = None self.fullbcount = None junk, popular = _cdifflib.chain_b(self) assert hasattr(junk, '__contains__') assert hasattr(popular, '__contains__') self.isbjunk = junk.__contains__ self.isbpopular = popular.__contains__
Same as SequenceMatcher.set_seq2, but uses the c chainb implementation.
https://github.com/mduggan/cdifflib/blob/95b1e28fbd14d2c2ff38e108745f7b38b89748a4/cdifflib.py#L52-L75
mduggan/cdifflib
cdifflib.py
CSequenceMatcher.get_matching_blocks
def get_matching_blocks(self): """Same as SequenceMatcher.get_matching_blocks, but calls through to a faster loop for find_longest_match. The rest is the same. """ if self.matching_blocks is not None: return self.matching_blocks matching_blocks = _cdifflib.matching_blocks(self) matching_blocks.append((len(self.a), len(self.b), 0)) self.matching_blocks = matching_blocks return map(_Match._make, self.matching_blocks)
python
def get_matching_blocks(self): """Same as SequenceMatcher.get_matching_blocks, but calls through to a faster loop for find_longest_match. The rest is the same. """ if self.matching_blocks is not None: return self.matching_blocks matching_blocks = _cdifflib.matching_blocks(self) matching_blocks.append((len(self.a), len(self.b), 0)) self.matching_blocks = matching_blocks return map(_Match._make, self.matching_blocks)
Same as SequenceMatcher.get_matching_blocks, but calls through to a faster loop for find_longest_match. The rest is the same.
https://github.com/mduggan/cdifflib/blob/95b1e28fbd14d2c2ff38e108745f7b38b89748a4/cdifflib.py#L78-L89
hubo1016/namedstruct
namedstruct/namedstruct.py
_tostream
def _tostream(parser, obj, stream, skipprepack = False): """ Compatible to old parsers """ if hasattr(parser, 'tostream'): return parser.tostream(obj, stream, skipprepack) else: data = parser.tobytes(obj, skipprepack) cls = type(parser) if cls not in _deprecated_parsers: _deprecated_parsers.add(cls) warnings.warn("Parser %r does not have 'tostream' interfaces" % (cls,), UserWarning) return stream.write(data)
python
def _tostream(parser, obj, stream, skipprepack = False): """ Compatible to old parsers """ if hasattr(parser, 'tostream'): return parser.tostream(obj, stream, skipprepack) else: data = parser.tobytes(obj, skipprepack) cls = type(parser) if cls not in _deprecated_parsers: _deprecated_parsers.add(cls) warnings.warn("Parser %r does not have 'tostream' interfaces" % (cls,), UserWarning) return stream.write(data)
Compatible to old parsers
https://github.com/hubo1016/namedstruct/blob/5039026e0df4ce23003d212358918dbe1a6e1d76/namedstruct/namedstruct.py#L44-L56
hubo1016/namedstruct
namedstruct/namedstruct.py
_to_str
def _to_str(dumped_val, encoding='utf-8', ordered=True): """ Convert bytes in a dump value to str, allowing json encode """ _dict = OrderedDict if ordered else dict if isinstance(dumped_val, dict): return OrderedDict((k, _to_str(v, encoding)) for k,v in dumped_val.items()) elif isinstance(dumped_val, (list, tuple)): return [_to_str(v, encoding) for v in dumped_val] elif isinstance(dumped_val, bytes): try: d = dumped_val.decode('utf-8') except Exception: d = repr(dumped_val) return d else: return dumped_val
python
def _to_str(dumped_val, encoding='utf-8', ordered=True): """ Convert bytes in a dump value to str, allowing json encode """ _dict = OrderedDict if ordered else dict if isinstance(dumped_val, dict): return OrderedDict((k, _to_str(v, encoding)) for k,v in dumped_val.items()) elif isinstance(dumped_val, (list, tuple)): return [_to_str(v, encoding) for v in dumped_val] elif isinstance(dumped_val, bytes): try: d = dumped_val.decode('utf-8') except Exception: d = repr(dumped_val) return d else: return dumped_val
Convert bytes in a dump value to str, allowing json encode
https://github.com/hubo1016/namedstruct/blob/5039026e0df4ce23003d212358918dbe1a6e1d76/namedstruct/namedstruct.py#L407-L423
hubo1016/namedstruct
namedstruct/namedstruct.py
dump
def dump(val, humanread = True, dumpextra = False, typeinfo = DUMPTYPE_FLAT, ordered=True, tostr=False, encoding='utf-8'): ''' Convert a parsed NamedStruct (probably with additional NamedStruct as fields) into a JSON-friendly format, with only Python primitives (dictionaries, lists, bytes, integers etc.) Then you may use json.dumps, or pprint to further process the result. :param val: parsed result, may contain NamedStruct :param humanread: if True (default), convert raw data into readable format with type-defined formatters. For example, enumerators are converted into names, IP addresses are converted into dotted formats, etc. :param dumpextra: if True, dump "extra" data in '_extra' field. False (default) to ignore them. :param typeinfo: Add struct type information in the dump result. May be the following values: DUMPTYPE_FLAT ('flat') add a field '_type' for the type information (default) DUMPTYPE_KEY ('key') convert the value to dictionary like: {'<struc_type>': value} DUMPTYPE_NONE ('none') do not add type information :param tostr: if True, convert all bytes to str :param encoding: if tostr=`True`, first try to decode bytes in `encoding`. If failed, use `repr()` instead. :returns: "dump" format of val, suitable for JSON-encode or print. ''' dumped = _dump(val, humanread, dumpextra, typeinfo, ordered) if tostr: dumped = _to_str(dumped, encoding, ordered) return dumped
python
def dump(val, humanread = True, dumpextra = False, typeinfo = DUMPTYPE_FLAT, ordered=True, tostr=False, encoding='utf-8'): ''' Convert a parsed NamedStruct (probably with additional NamedStruct as fields) into a JSON-friendly format, with only Python primitives (dictionaries, lists, bytes, integers etc.) Then you may use json.dumps, or pprint to further process the result. :param val: parsed result, may contain NamedStruct :param humanread: if True (default), convert raw data into readable format with type-defined formatters. For example, enumerators are converted into names, IP addresses are converted into dotted formats, etc. :param dumpextra: if True, dump "extra" data in '_extra' field. False (default) to ignore them. :param typeinfo: Add struct type information in the dump result. May be the following values: DUMPTYPE_FLAT ('flat') add a field '_type' for the type information (default) DUMPTYPE_KEY ('key') convert the value to dictionary like: {'<struc_type>': value} DUMPTYPE_NONE ('none') do not add type information :param tostr: if True, convert all bytes to str :param encoding: if tostr=`True`, first try to decode bytes in `encoding`. If failed, use `repr()` instead. :returns: "dump" format of val, suitable for JSON-encode or print. ''' dumped = _dump(val, humanread, dumpextra, typeinfo, ordered) if tostr: dumped = _to_str(dumped, encoding, ordered) return dumped
Convert a parsed NamedStruct (probably with additional NamedStruct as fields) into a JSON-friendly format, with only Python primitives (dictionaries, lists, bytes, integers etc.) Then you may use json.dumps, or pprint to further process the result. :param val: parsed result, may contain NamedStruct :param humanread: if True (default), convert raw data into readable format with type-defined formatters. For example, enumerators are converted into names, IP addresses are converted into dotted formats, etc. :param dumpextra: if True, dump "extra" data in '_extra' field. False (default) to ignore them. :param typeinfo: Add struct type information in the dump result. May be the following values: DUMPTYPE_FLAT ('flat') add a field '_type' for the type information (default) DUMPTYPE_KEY ('key') convert the value to dictionary like: {'<struc_type>': value} DUMPTYPE_NONE ('none') do not add type information :param tostr: if True, convert all bytes to str :param encoding: if tostr=`True`, first try to decode bytes in `encoding`. If failed, use `repr()` instead. :returns: "dump" format of val, suitable for JSON-encode or print.
https://github.com/hubo1016/namedstruct/blob/5039026e0df4ce23003d212358918dbe1a6e1d76/namedstruct/namedstruct.py#L426-L460
hubo1016/namedstruct
namedstruct/namedstruct.py
sizefromlen
def sizefromlen(limit, *properties): ''' Factory to generate a function which get size from specified field with limits. Often used in nstruct "size" parameter. To retrieve size without limit, simply use lambda expression: lambda x: x.header.length :param limit: the maximum size limit, if the acquired value if larger then the limit, BadLenError is raised to protect against serious result like memory overflow or dead loop. :param properties: the name of the specified fields. Specify more than one string to form a property path, like: sizefromlen(256, 'header', 'length') -> s.header.length :returns: a function which takes a NamedStruct as parameter, and returns the length value from specified property path. ''' def func(namedstruct): v = namedstruct._target for p in properties: v = getattr(v, p) if v > limit: raise BadLenError('Struct length exceeds limit ' + str(limit)) return v return func
python
def sizefromlen(limit, *properties): ''' Factory to generate a function which get size from specified field with limits. Often used in nstruct "size" parameter. To retrieve size without limit, simply use lambda expression: lambda x: x.header.length :param limit: the maximum size limit, if the acquired value if larger then the limit, BadLenError is raised to protect against serious result like memory overflow or dead loop. :param properties: the name of the specified fields. Specify more than one string to form a property path, like: sizefromlen(256, 'header', 'length') -> s.header.length :returns: a function which takes a NamedStruct as parameter, and returns the length value from specified property path. ''' def func(namedstruct): v = namedstruct._target for p in properties: v = getattr(v, p) if v > limit: raise BadLenError('Struct length exceeds limit ' + str(limit)) return v return func
Factory to generate a function which get size from specified field with limits. Often used in nstruct "size" parameter. To retrieve size without limit, simply use lambda expression: lambda x: x.header.length :param limit: the maximum size limit, if the acquired value if larger then the limit, BadLenError is raised to protect against serious result like memory overflow or dead loop. :param properties: the name of the specified fields. Specify more than one string to form a property path, like: sizefromlen(256, 'header', 'length') -> s.header.length :returns: a function which takes a NamedStruct as parameter, and returns the length value from specified property path.
https://github.com/hubo1016/namedstruct/blob/5039026e0df4ce23003d212358918dbe1a6e1d76/namedstruct/namedstruct.py#L519-L542
hubo1016/namedstruct
namedstruct/namedstruct.py
packsize
def packsize(*properties): ''' Revert to sizefromlen, store the struct size (len(struct)) to specified property path. The size includes padding. To store the size without padding, use packrealsize() instead. Often used in nstruct "prepack" parameter. :param properties: specified field name, same as sizefromlen. :returns: a function which takes a NamedStruct as parameter, and pack the size to specified field. ''' def func(namedstruct): v = namedstruct._target for p in properties[:-1]: v = getattr(v, p) setattr(v, properties[-1], len(namedstruct)) return func
python
def packsize(*properties): ''' Revert to sizefromlen, store the struct size (len(struct)) to specified property path. The size includes padding. To store the size without padding, use packrealsize() instead. Often used in nstruct "prepack" parameter. :param properties: specified field name, same as sizefromlen. :returns: a function which takes a NamedStruct as parameter, and pack the size to specified field. ''' def func(namedstruct): v = namedstruct._target for p in properties[:-1]: v = getattr(v, p) setattr(v, properties[-1], len(namedstruct)) return func
Revert to sizefromlen, store the struct size (len(struct)) to specified property path. The size includes padding. To store the size without padding, use packrealsize() instead. Often used in nstruct "prepack" parameter. :param properties: specified field name, same as sizefromlen. :returns: a function which takes a NamedStruct as parameter, and pack the size to specified field.
https://github.com/hubo1016/namedstruct/blob/5039026e0df4ce23003d212358918dbe1a6e1d76/namedstruct/namedstruct.py#L544-L560
hubo1016/namedstruct
namedstruct/namedstruct.py
packrealsize
def packrealsize(*properties): ''' Revert to sizefromlen, pack the struct real size (struct._realsize()) to specified property path. Unlike packsize, the size without padding is stored. Often used in nstruct "prepack" parameter. :param properties: specified field name, same as sizefromlen. :returns: a function which takes a NamedStruct as parameter, and pack the size to specified field. ''' def func(namedstruct): v = namedstruct._target for p in properties[:-1]: v = getattr(v, p) setattr(v, properties[-1], namedstruct._realsize()) return func
python
def packrealsize(*properties): ''' Revert to sizefromlen, pack the struct real size (struct._realsize()) to specified property path. Unlike packsize, the size without padding is stored. Often used in nstruct "prepack" parameter. :param properties: specified field name, same as sizefromlen. :returns: a function which takes a NamedStruct as parameter, and pack the size to specified field. ''' def func(namedstruct): v = namedstruct._target for p in properties[:-1]: v = getattr(v, p) setattr(v, properties[-1], namedstruct._realsize()) return func
Revert to sizefromlen, pack the struct real size (struct._realsize()) to specified property path. Unlike packsize, the size without padding is stored. Often used in nstruct "prepack" parameter. :param properties: specified field name, same as sizefromlen. :returns: a function which takes a NamedStruct as parameter, and pack the size to specified field.
https://github.com/hubo1016/namedstruct/blob/5039026e0df4ce23003d212358918dbe1a6e1d76/namedstruct/namedstruct.py#L562-L577
hubo1016/namedstruct
namedstruct/namedstruct.py
packvalue
def packvalue(value, *properties): ''' Store a specified value to specified property path. Often used in nstruct "init" parameter. :param value: a fixed value :param properties: specified field name, same as sizefromlen. :returns: a function which takes a NamedStruct as parameter, and store the value to property path. ''' def func(namedstruct): v = namedstruct._target for p in properties[:-1]: v = getattr(v, p) setattr(v, properties[-1], value) return func
python
def packvalue(value, *properties): ''' Store a specified value to specified property path. Often used in nstruct "init" parameter. :param value: a fixed value :param properties: specified field name, same as sizefromlen. :returns: a function which takes a NamedStruct as parameter, and store the value to property path. ''' def func(namedstruct): v = namedstruct._target for p in properties[:-1]: v = getattr(v, p) setattr(v, properties[-1], value) return func
Store a specified value to specified property path. Often used in nstruct "init" parameter. :param value: a fixed value :param properties: specified field name, same as sizefromlen. :returns: a function which takes a NamedStruct as parameter, and store the value to property path.
https://github.com/hubo1016/namedstruct/blob/5039026e0df4ce23003d212358918dbe1a6e1d76/namedstruct/namedstruct.py#L579-L595
hubo1016/namedstruct
namedstruct/namedstruct.py
packexpr
def packexpr(func, *properties): ''' Store a evaluated value to specified property path. Often used in nstruct "prepack" parameter. :param func: a function which takes a NamedStruct as parameter and returns a value, often a lambda expression :param properties: specified field name, same as sizefromlen. :returns: a function which takes a NamedStruct as parameter, and store the return value of func to property path. ''' def func2(namedstruct): v = namedstruct._target for p in properties[:-1]: v = getattr(v, p) setattr(v, properties[-1], func(namedstruct)) return func2
python
def packexpr(func, *properties): ''' Store a evaluated value to specified property path. Often used in nstruct "prepack" parameter. :param func: a function which takes a NamedStruct as parameter and returns a value, often a lambda expression :param properties: specified field name, same as sizefromlen. :returns: a function which takes a NamedStruct as parameter, and store the return value of func to property path. ''' def func2(namedstruct): v = namedstruct._target for p in properties[:-1]: v = getattr(v, p) setattr(v, properties[-1], func(namedstruct)) return func2
Store a evaluated value to specified property path. Often used in nstruct "prepack" parameter. :param func: a function which takes a NamedStruct as parameter and returns a value, often a lambda expression :param properties: specified field name, same as sizefromlen. :returns: a function which takes a NamedStruct as parameter, and store the return value of func to property path.
https://github.com/hubo1016/namedstruct/blob/5039026e0df4ce23003d212358918dbe1a6e1d76/namedstruct/namedstruct.py#L597-L612
hubo1016/namedstruct
namedstruct/namedstruct.py
NamedStruct._create_embedded_indices
def _create_embedded_indices(self): ''' Create indices for all the embedded structs. For parser internal use. ''' try: _set(self, '_embedded_indices', dict((k,(self,v)) for k,v in getattr(self._parser.typedef, 'inline_names', {}).items())) except AttributeError: _set(self, '_embedded_indices', {})
python
def _create_embedded_indices(self): ''' Create indices for all the embedded structs. For parser internal use. ''' try: _set(self, '_embedded_indices', dict((k,(self,v)) for k,v in getattr(self._parser.typedef, 'inline_names', {}).items())) except AttributeError: _set(self, '_embedded_indices', {})
Create indices for all the embedded structs. For parser internal use.
https://github.com/hubo1016/namedstruct/blob/5039026e0df4ce23003d212358918dbe1a6e1d76/namedstruct/namedstruct.py#L84-L91
hubo1016/namedstruct
namedstruct/namedstruct.py
NamedStruct._unpack
def _unpack(self, data): ''' Unpack a struct from bytes. For parser internal use. ''' #self._logger.log(logging.DEBUG, 'unpacking %r', self) current = self while current is not None: data = current._parser.unpack(data, current) last = current current = getattr(current, '_sub', None) _set(last, '_extra', data)
python
def _unpack(self, data): ''' Unpack a struct from bytes. For parser internal use. ''' #self._logger.log(logging.DEBUG, 'unpacking %r', self) current = self while current is not None: data = current._parser.unpack(data, current) last = current current = getattr(current, '_sub', None) _set(last, '_extra', data)
Unpack a struct from bytes. For parser internal use.
https://github.com/hubo1016/namedstruct/blob/5039026e0df4ce23003d212358918dbe1a6e1d76/namedstruct/namedstruct.py#L92-L102
hubo1016/namedstruct
namedstruct/namedstruct.py
NamedStruct._packto
def _packto(self, stream): ''' Pack current struct into stream. For parser internal use. :param stream: a buffered stream (File or BytesIO) :return: packed bytes length ''' #self._logger.log(logging.DEBUG, 'packing %r', self) total_size = 0 current = self while current is not None: total_size += current._parser.packto(current, stream) last = current current = getattr(current, '_sub', None) if hasattr(last, '_extra'): _extra = last._extra total_size += stream.write(_extra) return total_size
python
def _packto(self, stream): ''' Pack current struct into stream. For parser internal use. :param stream: a buffered stream (File or BytesIO) :return: packed bytes length ''' #self._logger.log(logging.DEBUG, 'packing %r', self) total_size = 0 current = self while current is not None: total_size += current._parser.packto(current, stream) last = current current = getattr(current, '_sub', None) if hasattr(last, '_extra'): _extra = last._extra total_size += stream.write(_extra) return total_size
Pack current struct into stream. For parser internal use. :param stream: a buffered stream (File or BytesIO) :return: packed bytes length
https://github.com/hubo1016/namedstruct/blob/5039026e0df4ce23003d212358918dbe1a6e1d76/namedstruct/namedstruct.py#L113-L131
hubo1016/namedstruct
namedstruct/namedstruct.py
NamedStruct._prepack
def _prepack(self): ''' Prepack stage. For parser internal use. ''' current = self while current is not None: current._parser.prepack(current, skip_self = True) current = getattr(current, '_sub', None) current = self while current is not None: current._parser.prepack(current, skip_sub = True) current = getattr(current, '_sub', None)
python
def _prepack(self): ''' Prepack stage. For parser internal use. ''' current = self while current is not None: current._parser.prepack(current, skip_self = True) current = getattr(current, '_sub', None) current = self while current is not None: current._parser.prepack(current, skip_sub = True) current = getattr(current, '_sub', None)
Prepack stage. For parser internal use.
https://github.com/hubo1016/namedstruct/blob/5039026e0df4ce23003d212358918dbe1a6e1d76/namedstruct/namedstruct.py#L133-L144
hubo1016/namedstruct
namedstruct/namedstruct.py
NamedStruct._tobytes
def _tobytes(self, skipprepack = False): ''' Convert the struct to bytes. This is the standard way to convert a NamedStruct to bytes. :param skipprepack: if True, the prepack stage is skipped. For parser internal use. :returns: converted bytes ''' stream = BytesIO() self._tostream(stream, skipprepack) return stream.getvalue()
python
def _tobytes(self, skipprepack = False): ''' Convert the struct to bytes. This is the standard way to convert a NamedStruct to bytes. :param skipprepack: if True, the prepack stage is skipped. For parser internal use. :returns: converted bytes ''' stream = BytesIO() self._tostream(stream, skipprepack) return stream.getvalue()
Convert the struct to bytes. This is the standard way to convert a NamedStruct to bytes. :param skipprepack: if True, the prepack stage is skipped. For parser internal use. :returns: converted bytes
https://github.com/hubo1016/namedstruct/blob/5039026e0df4ce23003d212358918dbe1a6e1d76/namedstruct/namedstruct.py#L146-L156
hubo1016/namedstruct
namedstruct/namedstruct.py
NamedStruct._tostream
def _tostream(self, stream, skipprepack= False): ''' Convert the struct into a bytes stream. This is the standard way to convert a NamedStruct to bytes. :param stream: a list of bytes to get the result :param skipprepack: if True, the prepack stage is skipped. For parser internal use. :returns: total appended size ''' if not skipprepack: self._prepack() datasize = self._packto(stream) paddingSize = self._parser.paddingsize2(datasize) if paddingSize > datasize: stream.write(b'\x00' * (paddingSize - datasize)) return paddingSize
python
def _tostream(self, stream, skipprepack= False): ''' Convert the struct into a bytes stream. This is the standard way to convert a NamedStruct to bytes. :param stream: a list of bytes to get the result :param skipprepack: if True, the prepack stage is skipped. For parser internal use. :returns: total appended size ''' if not skipprepack: self._prepack() datasize = self._packto(stream) paddingSize = self._parser.paddingsize2(datasize) if paddingSize > datasize: stream.write(b'\x00' * (paddingSize - datasize)) return paddingSize
Convert the struct into a bytes stream. This is the standard way to convert a NamedStruct to bytes. :param stream: a list of bytes to get the result :param skipprepack: if True, the prepack stage is skipped. For parser internal use. :returns: total appended size
https://github.com/hubo1016/namedstruct/blob/5039026e0df4ce23003d212358918dbe1a6e1d76/namedstruct/namedstruct.py#L157-L173
hubo1016/namedstruct
namedstruct/namedstruct.py
NamedStruct._realsize
def _realsize(self): ''' Get the struct size without padding (or the "real size") :returns: the "real size" in bytes ''' current = self size= 0 while current is not None: size += current._parser.sizeof(current) last = current current = getattr(current, '_sub', None) size += len(getattr(last, '_extra', b'')) return size
python
def _realsize(self): ''' Get the struct size without padding (or the "real size") :returns: the "real size" in bytes ''' current = self size= 0 while current is not None: size += current._parser.sizeof(current) last = current current = getattr(current, '_sub', None) size += len(getattr(last, '_extra', b'')) return size
Get the struct size without padding (or the "real size") :returns: the "real size" in bytes
https://github.com/hubo1016/namedstruct/blob/5039026e0df4ce23003d212358918dbe1a6e1d76/namedstruct/namedstruct.py#L174-L188
hubo1016/namedstruct
namedstruct/namedstruct.py
NamedStruct._subclass
def _subclass(self, parser): ''' Create sub-classed struct from extra data, with specified parser. For parser internal use. :param parser: parser of subclass ''' _set(self, '_sub', parser._create(memoryview(getattr(self, '_extra', b'')), self._target)) try: object.__delattr__(self, '_extra') except: pass
python
def _subclass(self, parser): ''' Create sub-classed struct from extra data, with specified parser. For parser internal use. :param parser: parser of subclass ''' _set(self, '_sub', parser._create(memoryview(getattr(self, '_extra', b'')), self._target)) try: object.__delattr__(self, '_extra') except: pass
Create sub-classed struct from extra data, with specified parser. For parser internal use. :param parser: parser of subclass
https://github.com/hubo1016/namedstruct/blob/5039026e0df4ce23003d212358918dbe1a6e1d76/namedstruct/namedstruct.py#L197-L207
hubo1016/namedstruct
namedstruct/namedstruct.py
NamedStruct._extend
def _extend(self, newsub): ''' Append a subclass (extension) after the base class. For parser internal use. ''' current = self while hasattr(current, '_sub'): current = current._sub _set(current, '_sub', newsub) try: object.__delattr__(self, '_extra') except: pass
python
def _extend(self, newsub): ''' Append a subclass (extension) after the base class. For parser internal use. ''' current = self while hasattr(current, '_sub'): current = current._sub _set(current, '_sub', newsub) try: object.__delattr__(self, '_extra') except: pass
Append a subclass (extension) after the base class. For parser internal use.
https://github.com/hubo1016/namedstruct/blob/5039026e0df4ce23003d212358918dbe1a6e1d76/namedstruct/namedstruct.py#L214-L225
hubo1016/namedstruct
namedstruct/namedstruct.py
NamedStruct._gettype
def _gettype(self): ''' Return current type of this struct :returns: a typedef object (e.g. nstruct) ''' current = self lastname = getattr(current._parser, 'typedef', None) while hasattr(current, '_sub'): current = current._sub tn = getattr(current._parser, 'typedef', None) if tn is not None: lastname = tn return lastname
python
def _gettype(self): ''' Return current type of this struct :returns: a typedef object (e.g. nstruct) ''' current = self lastname = getattr(current._parser, 'typedef', None) while hasattr(current, '_sub'): current = current._sub tn = getattr(current._parser, 'typedef', None) if tn is not None: lastname = tn return lastname
Return current type of this struct :returns: a typedef object (e.g. nstruct)
https://github.com/hubo1016/namedstruct/blob/5039026e0df4ce23003d212358918dbe1a6e1d76/namedstruct/namedstruct.py#L226-L240
hubo1016/namedstruct
namedstruct/namedstruct.py
NamedStruct._setextra
def _setextra(self, extradata): ''' Set the _extra field in the struct, which stands for the additional ("extra") data after the defined fields. ''' current = self while hasattr(current, '_sub'): current = current._sub _set(current, '_extra', extradata)
python
def _setextra(self, extradata): ''' Set the _extra field in the struct, which stands for the additional ("extra") data after the defined fields. ''' current = self while hasattr(current, '_sub'): current = current._sub _set(current, '_extra', extradata)
Set the _extra field in the struct, which stands for the additional ("extra") data after the defined fields.
https://github.com/hubo1016/namedstruct/blob/5039026e0df4ce23003d212358918dbe1a6e1d76/namedstruct/namedstruct.py#L249-L257
hubo1016/namedstruct
namedstruct/namedstruct.py
NamedStruct._getextra
def _getextra(self): ''' Get the extra data of this struct. ''' current = self while hasattr(current, '_sub'): current = current._sub return getattr(current, '_extra', None)
python
def _getextra(self): ''' Get the extra data of this struct. ''' current = self while hasattr(current, '_sub'): current = current._sub return getattr(current, '_extra', None)
Get the extra data of this struct.
https://github.com/hubo1016/namedstruct/blob/5039026e0df4ce23003d212358918dbe1a6e1d76/namedstruct/namedstruct.py#L258-L265
hubo1016/namedstruct
namedstruct/namedstruct.py
NamedStruct._replace_embedded_type
def _replace_embedded_type(self, name, newtype): ''' Replace the embedded struct to a newly-created struct of another type (usually based on the original type). The attributes of the old struct is NOT preserved. :param name: either the original type, or the name of the original type. It is always the type used in type definitions, even if it is already replaced once or more. :param newtype: the new type to replace ''' if hasattr(name, 'readablename'): name = name.readablename t,i = self._target._embedded_indices[name] t._seqs[i] = newtype.parser().new(self._target)
python
def _replace_embedded_type(self, name, newtype): ''' Replace the embedded struct to a newly-created struct of another type (usually based on the original type). The attributes of the old struct is NOT preserved. :param name: either the original type, or the name of the original type. It is always the type used in type definitions, even if it is already replaced once or more. :param newtype: the new type to replace ''' if hasattr(name, 'readablename'): name = name.readablename t,i = self._target._embedded_indices[name] t._seqs[i] = newtype.parser().new(self._target)
Replace the embedded struct to a newly-created struct of another type (usually based on the original type). The attributes of the old struct is NOT preserved. :param name: either the original type, or the name of the original type. It is always the type used in type definitions, even if it is already replaced once or more. :param newtype: the new type to replace
https://github.com/hubo1016/namedstruct/blob/5039026e0df4ce23003d212358918dbe1a6e1d76/namedstruct/namedstruct.py#L325-L338
hubo1016/namedstruct
namedstruct/namedstruct.py
NamedStruct._get_embedded
def _get_embedded(self, name): ''' Return an embedded struct object to calculate the size or use _tobytes(True) to convert just the embedded parts. :param name: either the original type, or the name of the original type. It is always the type used in type definitions, even if it is already replaced once or more. :returns: an embedded struct ''' if hasattr(name, 'readablename'): name = name.readablename t,i = self._target._embedded_indices[name] return t._seqs[i]
python
def _get_embedded(self, name): ''' Return an embedded struct object to calculate the size or use _tobytes(True) to convert just the embedded parts. :param name: either the original type, or the name of the original type. It is always the type used in type definitions, even if it is already replaced once or more. :returns: an embedded struct ''' if hasattr(name, 'readablename'): name = name.readablename t,i = self._target._embedded_indices[name] return t._seqs[i]
Return an embedded struct object to calculate the size or use _tobytes(True) to convert just the embedded parts. :param name: either the original type, or the name of the original type. It is always the type used in type definitions, even if it is already replaced once or more. :returns: an embedded struct
https://github.com/hubo1016/namedstruct/blob/5039026e0df4ce23003d212358918dbe1a6e1d76/namedstruct/namedstruct.py#L339-L352
hubo1016/namedstruct
namedstruct/namedstruct.py
NamedStruct._registerPickleType
def _registerPickleType(name, typedef): ''' Register a type with the specified name. After registration, NamedStruct with this type (and any sub-types) can be successfully pickled and transfered. ''' NamedStruct._pickleNames[typedef] = name NamedStruct._pickleTypes[name] = typedef
python
def _registerPickleType(name, typedef): ''' Register a type with the specified name. After registration, NamedStruct with this type (and any sub-types) can be successfully pickled and transfered. ''' NamedStruct._pickleNames[typedef] = name NamedStruct._pickleTypes[name] = typedef
Register a type with the specified name. After registration, NamedStruct with this type (and any sub-types) can be successfully pickled and transfered.
https://github.com/hubo1016/namedstruct/blob/5039026e0df4ce23003d212358918dbe1a6e1d76/namedstruct/namedstruct.py#L354-L360
hubo1016/namedstruct
namedstruct/namedstruct.py
EmbeddedStruct._create_embedded_indices
def _create_embedded_indices(self): ''' Create indices for all the embedded structs. For parser internal use. ''' try: self._target._embedded_indices.update(((k,(self,v)) for k,v in getattr(self._parser.typedef, 'inline_names', {}).items())) except AttributeError: pass
python
def _create_embedded_indices(self): ''' Create indices for all the embedded structs. For parser internal use. ''' try: self._target._embedded_indices.update(((k,(self,v)) for k,v in getattr(self._parser.typedef, 'inline_names', {}).items())) except AttributeError: pass
Create indices for all the embedded structs. For parser internal use.
https://github.com/hubo1016/namedstruct/blob/5039026e0df4ce23003d212358918dbe1a6e1d76/namedstruct/namedstruct.py#L366-L373
hubo1016/namedstruct
namedstruct/namedstruct.py
Parser.parse
def parse(self, buffer, inlineparent = None): ''' Try to parse the struct from bytes sequence. The bytes sequence is taken from a streaming source. :param buffer: bytes sequence to be parsed from. :param inlineparent: if specified, this struct is embedded in another struct. :returns: None if the buffer does not have enough data for this struct (e.g. incomplete read from socket); (struct, size) else, where struct is the parsed result (usually a NamedStruct object) and size is the used bytes length, so you can start another parse from buffer[size:]. ''' if self.base is not None: return self.base.parse(buffer, inlineparent) r = self._parse(buffer, inlineparent) if r is None: return None (s, size) = r self.subclass(s) return (s, (size + self.padding - 1) // self.padding * self.padding)
python
def parse(self, buffer, inlineparent = None): ''' Try to parse the struct from bytes sequence. The bytes sequence is taken from a streaming source. :param buffer: bytes sequence to be parsed from. :param inlineparent: if specified, this struct is embedded in another struct. :returns: None if the buffer does not have enough data for this struct (e.g. incomplete read from socket); (struct, size) else, where struct is the parsed result (usually a NamedStruct object) and size is the used bytes length, so you can start another parse from buffer[size:]. ''' if self.base is not None: return self.base.parse(buffer, inlineparent) r = self._parse(buffer, inlineparent) if r is None: return None (s, size) = r self.subclass(s) return (s, (size + self.padding - 1) // self.padding * self.padding)
Try to parse the struct from bytes sequence. The bytes sequence is taken from a streaming source. :param buffer: bytes sequence to be parsed from. :param inlineparent: if specified, this struct is embedded in another struct. :returns: None if the buffer does not have enough data for this struct (e.g. incomplete read from socket); (struct, size) else, where struct is the parsed result (usually a NamedStruct object) and size is the used bytes length, so you can start another parse from buffer[size:].
https://github.com/hubo1016/namedstruct/blob/5039026e0df4ce23003d212358918dbe1a6e1d76/namedstruct/namedstruct.py#L671-L690
hubo1016/namedstruct
namedstruct/namedstruct.py
Parser.subclass
def subclass(self, namedstruct): ''' Sub-class a NamedStruct into correct sub types. :param namedstruct: a NamedStruct of this type. ''' cp = self cs = namedstruct while True: if hasattr(cs, '_sub'): cs = cs._sub cp = cs._parser continue subp = None clsfr = getattr(cp, 'classifier', None) if clsfr is not None: clsvalue = clsfr(namedstruct) subp = cp.subindices.get(clsvalue) if subp is None: for sc in cp.subclasses: if sc.isinstance(namedstruct): subp = sc break if subp is None: break cs._subclass(subp) cs = cs._sub cp = subp
python
def subclass(self, namedstruct): ''' Sub-class a NamedStruct into correct sub types. :param namedstruct: a NamedStruct of this type. ''' cp = self cs = namedstruct while True: if hasattr(cs, '_sub'): cs = cs._sub cp = cs._parser continue subp = None clsfr = getattr(cp, 'classifier', None) if clsfr is not None: clsvalue = clsfr(namedstruct) subp = cp.subindices.get(clsvalue) if subp is None: for sc in cp.subclasses: if sc.isinstance(namedstruct): subp = sc break if subp is None: break cs._subclass(subp) cs = cs._sub cp = subp
Sub-class a NamedStruct into correct sub types. :param namedstruct: a NamedStruct of this type.
https://github.com/hubo1016/namedstruct/blob/5039026e0df4ce23003d212358918dbe1a6e1d76/namedstruct/namedstruct.py#L691-L718
hubo1016/namedstruct
namedstruct/namedstruct.py
Parser.new
def new(self, inlineparent = None): ''' Create an empty struct of this type. "initfunc" is called on the created struct to initialize it. :param inlineparent: if specified, this struct is embedded into another struct "inlineparent" :returns: a created struct (usually a NamedStruct object) ''' if self.base is not None: s = self.base.new(inlineparent) s._extend(self._new(s._target)) else: s = self._new(inlineparent) if self.initfunc is not None: self.initfunc(s) return s
python
def new(self, inlineparent = None): ''' Create an empty struct of this type. "initfunc" is called on the created struct to initialize it. :param inlineparent: if specified, this struct is embedded into another struct "inlineparent" :returns: a created struct (usually a NamedStruct object) ''' if self.base is not None: s = self.base.new(inlineparent) s._extend(self._new(s._target)) else: s = self._new(inlineparent) if self.initfunc is not None: self.initfunc(s) return s
Create an empty struct of this type. "initfunc" is called on the created struct to initialize it. :param inlineparent: if specified, this struct is embedded into another struct "inlineparent" :returns: a created struct (usually a NamedStruct object)
https://github.com/hubo1016/namedstruct/blob/5039026e0df4ce23003d212358918dbe1a6e1d76/namedstruct/namedstruct.py#L733-L748
hubo1016/namedstruct
namedstruct/namedstruct.py
Parser.create
def create(self, data, inlineparent = None): ''' Create a struct and use all bytes of data. Different from parse(), this takes all data, store unused bytes in "extra" data of the struct. Some types like variable-length array may have different parse result with create() and parse(). :param data: bytes of a packed struct. :param inlineparent: if specified, this struct is embedded in another struct "inlineparent" :returns: a created NamedStruct object. ''' if self.base is not None: return self.base.create(data, inlineparent) c = self._create(data, inlineparent) self.subclass(c) return c
python
def create(self, data, inlineparent = None): ''' Create a struct and use all bytes of data. Different from parse(), this takes all data, store unused bytes in "extra" data of the struct. Some types like variable-length array may have different parse result with create() and parse(). :param data: bytes of a packed struct. :param inlineparent: if specified, this struct is embedded in another struct "inlineparent" :returns: a created NamedStruct object. ''' if self.base is not None: return self.base.create(data, inlineparent) c = self._create(data, inlineparent) self.subclass(c) return c
Create a struct and use all bytes of data. Different from parse(), this takes all data, store unused bytes in "extra" data of the struct. Some types like variable-length array may have different parse result with create() and parse(). :param data: bytes of a packed struct. :param inlineparent: if specified, this struct is embedded in another struct "inlineparent" :returns: a created NamedStruct object.
https://github.com/hubo1016/namedstruct/blob/5039026e0df4ce23003d212358918dbe1a6e1d76/namedstruct/namedstruct.py#L758-L774
hubo1016/namedstruct
namedstruct/namedstruct.py
Parser.paddingsize
def paddingsize(self, namedstruct): ''' Return the size of the padded struct (including the "real" size and the padding bytes) :param namedstruct: a NamedStruct object of this type. :returns: size including both data and padding. ''' if self.base is not None: return self.base.paddingsize(namedstruct) realsize = namedstruct._realsize() return (realsize + self.padding - 1) // self.padding * self.padding
python
def paddingsize(self, namedstruct): ''' Return the size of the padded struct (including the "real" size and the padding bytes) :param namedstruct: a NamedStruct object of this type. :returns: size including both data and padding. ''' if self.base is not None: return self.base.paddingsize(namedstruct) realsize = namedstruct._realsize() return (realsize + self.padding - 1) // self.padding * self.padding
Return the size of the padded struct (including the "real" size and the padding bytes) :param namedstruct: a NamedStruct object of this type. :returns: size including both data and padding.
https://github.com/hubo1016/namedstruct/blob/5039026e0df4ce23003d212358918dbe1a6e1d76/namedstruct/namedstruct.py#L775-L786
hubo1016/namedstruct
namedstruct/namedstruct.py
Parser.paddingsize2
def paddingsize2(self, realsize): ''' Return a padded size from realsize, for NamedStruct internal use. ''' if self.base is not None: return self.base.paddingsize2(realsize) return (realsize + self.padding - 1) // self.padding * self.padding
python
def paddingsize2(self, realsize): ''' Return a padded size from realsize, for NamedStruct internal use. ''' if self.base is not None: return self.base.paddingsize2(realsize) return (realsize + self.padding - 1) // self.padding * self.padding
Return a padded size from realsize, for NamedStruct internal use.
https://github.com/hubo1016/namedstruct/blob/5039026e0df4ce23003d212358918dbe1a6e1d76/namedstruct/namedstruct.py#L787-L793
hubo1016/namedstruct
namedstruct/namedstruct.py
Parser.tostream
def tostream(self, namedstruct, stream, skipprepack = False): ''' Convert a NamedStruct to packed bytes, append the bytes to the stream :param namedstruct: a NamedStruct object of this type to pack. :param skipprepack: if True, the prepack stage is skipped. :param stream: a buffered stream :return: appended bytes size ''' return namedstruct._tostream(stream, skipprepack)
python
def tostream(self, namedstruct, stream, skipprepack = False): ''' Convert a NamedStruct to packed bytes, append the bytes to the stream :param namedstruct: a NamedStruct object of this type to pack. :param skipprepack: if True, the prepack stage is skipped. :param stream: a buffered stream :return: appended bytes size ''' return namedstruct._tostream(stream, skipprepack)
Convert a NamedStruct to packed bytes, append the bytes to the stream :param namedstruct: a NamedStruct object of this type to pack. :param skipprepack: if True, the prepack stage is skipped. :param stream: a buffered stream :return: appended bytes size
https://github.com/hubo1016/namedstruct/blob/5039026e0df4ce23003d212358918dbe1a6e1d76/namedstruct/namedstruct.py#L805-L817
hubo1016/namedstruct
namedstruct/namedstruct.py
Parser.prepack
def prepack(self, namedstruct, skip_self=False, skip_sub=False): ''' Run prepack ''' if not skip_self and self.prepackfunc is not None: self.prepackfunc(namedstruct)
python
def prepack(self, namedstruct, skip_self=False, skip_sub=False): ''' Run prepack ''' if not skip_self and self.prepackfunc is not None: self.prepackfunc(namedstruct)
Run prepack
https://github.com/hubo1016/namedstruct/blob/5039026e0df4ce23003d212358918dbe1a6e1d76/namedstruct/namedstruct.py#L818-L823
hubo1016/namedstruct
namedstruct/namedstruct.py
Parser.packto
def packto(self, namedstruct, stream): """ Pack a struct to a stream :param namedstruct: struct to pack :param stream: a buffered stream :return: appended bytes size """ # Default implementation data = self.pack(namedstruct) return stream.write(data)
python
def packto(self, namedstruct, stream): """ Pack a struct to a stream :param namedstruct: struct to pack :param stream: a buffered stream :return: appended bytes size """ # Default implementation data = self.pack(namedstruct) return stream.write(data)
Pack a struct to a stream :param namedstruct: struct to pack :param stream: a buffered stream :return: appended bytes size
https://github.com/hubo1016/namedstruct/blob/5039026e0df4ce23003d212358918dbe1a6e1d76/namedstruct/namedstruct.py#L824-L836
hubo1016/namedstruct
namedstruct/namedstruct.py
FormatParser.unpack
def unpack(self, data, namedstruct): ''' Unpack the struct from specified bytes. If the struct is sub-classed, definitions from the sub type is not unpacked. :param data: bytes of the struct, including fields of sub type and "extra" data. :param namedstruct: a NamedStruct object of this type :returns: unused bytes from data, which forms data of the sub type and "extra" data. ''' try: result = self.struct.unpack(data[0:self.struct.size]) except struct.error as exc: raise BadFormatError(exc) start = 0 t = namedstruct._target for p in self.properties: if len(p) > 1: if isinstance(result[start], bytes): v = [r.rstrip(b'\x00') for r in result[start:start + p[1]]] else: v = list(result[start:start + p[1]]) start += p[1] else: v = result[start] if isinstance(v, bytes): v = v.rstrip(b'\x00') start += 1 setin = t for sp in p[0][0:-1]: if not hasattr(setin, sp): setin2 = InlineStruct(namedstruct._target) setattr(setin, sp, setin2) setin = setin2 else: setin = getattr(setin, sp) setattr(setin, p[0][-1], v) return data[self.struct.size:]
python
def unpack(self, data, namedstruct): ''' Unpack the struct from specified bytes. If the struct is sub-classed, definitions from the sub type is not unpacked. :param data: bytes of the struct, including fields of sub type and "extra" data. :param namedstruct: a NamedStruct object of this type :returns: unused bytes from data, which forms data of the sub type and "extra" data. ''' try: result = self.struct.unpack(data[0:self.struct.size]) except struct.error as exc: raise BadFormatError(exc) start = 0 t = namedstruct._target for p in self.properties: if len(p) > 1: if isinstance(result[start], bytes): v = [r.rstrip(b'\x00') for r in result[start:start + p[1]]] else: v = list(result[start:start + p[1]]) start += p[1] else: v = result[start] if isinstance(v, bytes): v = v.rstrip(b'\x00') start += 1 setin = t for sp in p[0][0:-1]: if not hasattr(setin, sp): setin2 = InlineStruct(namedstruct._target) setattr(setin, sp, setin2) setin = setin2 else: setin = getattr(setin, sp) setattr(setin, p[0][-1], v) return data[self.struct.size:]
Unpack the struct from specified bytes. If the struct is sub-classed, definitions from the sub type is not unpacked. :param data: bytes of the struct, including fields of sub type and "extra" data. :param namedstruct: a NamedStruct object of this type :returns: unused bytes from data, which forms data of the sub type and "extra" data.
https://github.com/hubo1016/namedstruct/blob/5039026e0df4ce23003d212358918dbe1a6e1d76/namedstruct/namedstruct.py#L907-L945
hubo1016/namedstruct
namedstruct/namedstruct.py
FormatParser.pack
def pack(self, namedstruct): ''' Pack the struct and return the packed bytes. :param namedstruct: a NamedStruct of this type. :returns: packed bytes, only contains fields of definitions in this type, not the sub type and "extra" data. ''' elements = [] t = namedstruct._target for p in self.properties: v = t for sp in p[0]: v = getattr(v, sp) if len(p) > 1: elements.extend(v[0:p[1]]) else: elements.append(v) return self.struct.pack(*elements)
python
def pack(self, namedstruct): ''' Pack the struct and return the packed bytes. :param namedstruct: a NamedStruct of this type. :returns: packed bytes, only contains fields of definitions in this type, not the sub type and "extra" data. ''' elements = [] t = namedstruct._target for p in self.properties: v = t for sp in p[0]: v = getattr(v, sp) if len(p) > 1: elements.extend(v[0:p[1]]) else: elements.append(v) return self.struct.pack(*elements)
Pack the struct and return the packed bytes. :param namedstruct: a NamedStruct of this type. :returns: packed bytes, only contains fields of definitions in this type, not the sub type and "extra" data.
https://github.com/hubo1016/namedstruct/blob/5039026e0df4ce23003d212358918dbe1a6e1d76/namedstruct/namedstruct.py#L946-L964
hubo1016/namedstruct
namedstruct/namedstruct.py
PrimitiveParser.parse
def parse(self, buffer, inlineparent = None): ''' Compatible to Parser.parse() ''' if len(buffer) < self.struct.size: return None try: return (self.struct.unpack(buffer[:self.struct.size])[0], self.struct.size) except struct.error as exc: raise BadFormatError(exc)
python
def parse(self, buffer, inlineparent = None): ''' Compatible to Parser.parse() ''' if len(buffer) < self.struct.size: return None try: return (self.struct.unpack(buffer[:self.struct.size])[0], self.struct.size) except struct.error as exc: raise BadFormatError(exc)
Compatible to Parser.parse()
https://github.com/hubo1016/namedstruct/blob/5039026e0df4ce23003d212358918dbe1a6e1d76/namedstruct/namedstruct.py#L1246-L1255
hubo1016/namedstruct
namedstruct/namedstruct.py
PrimitiveParser.create
def create(self, data, inlineparent = None): ''' Compatible to Parser.create() ''' try: return self.struct.unpack(data)[0] except struct.error as exc: raise BadFormatError(exc)
python
def create(self, data, inlineparent = None): ''' Compatible to Parser.create() ''' try: return self.struct.unpack(data)[0] except struct.error as exc: raise BadFormatError(exc)
Compatible to Parser.create()
https://github.com/hubo1016/namedstruct/blob/5039026e0df4ce23003d212358918dbe1a6e1d76/namedstruct/namedstruct.py#L1261-L1268
hubo1016/namedstruct
namedstruct/namedstruct.py
ArrayParser.parse
def parse(self, buffer, inlineparent = None): ''' Compatible to Parser.parse() ''' size = 0 v = [] for i in range(0, self.size): # @UnusedVariable r = self.innerparser.parse(buffer[size:], None) if r is None: return None v.append(r[0]) size += r[1] return (v, size)
python
def parse(self, buffer, inlineparent = None): ''' Compatible to Parser.parse() ''' size = 0 v = [] for i in range(0, self.size): # @UnusedVariable r = self.innerparser.parse(buffer[size:], None) if r is None: return None v.append(r[0]) size += r[1] return (v, size)
Compatible to Parser.parse()
https://github.com/hubo1016/namedstruct/blob/5039026e0df4ce23003d212358918dbe1a6e1d76/namedstruct/namedstruct.py#L1303-L1315
hubo1016/namedstruct
namedstruct/namedstruct.py
ArrayParser.new
def new(self, inlineparent = None): ''' Compatible to Parser.new() ''' v = list(range(0, self.size)) for i in range(0, self.size): v[i] = self.innerparser.new() return v
python
def new(self, inlineparent = None): ''' Compatible to Parser.new() ''' v = list(range(0, self.size)) for i in range(0, self.size): v[i] = self.innerparser.new() return v
Compatible to Parser.new()
https://github.com/hubo1016/namedstruct/blob/5039026e0df4ce23003d212358918dbe1a6e1d76/namedstruct/namedstruct.py#L1316-L1323
hubo1016/namedstruct
namedstruct/namedstruct.py
ArrayParser.create
def create(self, data, inlineparent = None): ''' Compatible to Parser.create() ''' if self.size > 0: r = self.parse(data) if r is None: raise ParseError('data is not enough to create an array of size ' + self.size) else: return r[0] else: v = [] start = 0 while start < len(data): r = self.innerparser.parse(data[start:], None) if r is None: break v.append(r[0]) start += r[1] return v
python
def create(self, data, inlineparent = None): ''' Compatible to Parser.create() ''' if self.size > 0: r = self.parse(data) if r is None: raise ParseError('data is not enough to create an array of size ' + self.size) else: return r[0] else: v = [] start = 0 while start < len(data): r = self.innerparser.parse(data[start:], None) if r is None: break v.append(r[0]) start += r[1] return v
Compatible to Parser.create()
https://github.com/hubo1016/namedstruct/blob/5039026e0df4ce23003d212358918dbe1a6e1d76/namedstruct/namedstruct.py#L1324-L1343
hubo1016/namedstruct
namedstruct/namedstruct.py
ArrayParser.sizeof
def sizeof(self, prim): ''' Compatible to Parser.sizeof() ''' size = 0 arraysize = self.size if arraysize == 0: arraysize = len(prim) for i in range(0, arraysize): if i >= len(prim): tp = self.innerparser.new() if hasattr(self.innerparser, 'fullparse'): self.innerparser.fullparse(tp) size += self.innerparser.paddingsize(tp) else: size += self.innerparser.paddingsize(prim[i]) return size
python
def sizeof(self, prim): ''' Compatible to Parser.sizeof() ''' size = 0 arraysize = self.size if arraysize == 0: arraysize = len(prim) for i in range(0, arraysize): if i >= len(prim): tp = self.innerparser.new() if hasattr(self.innerparser, 'fullparse'): self.innerparser.fullparse(tp) size += self.innerparser.paddingsize(tp) else: size += self.innerparser.paddingsize(prim[i]) return size
Compatible to Parser.sizeof()
https://github.com/hubo1016/namedstruct/blob/5039026e0df4ce23003d212358918dbe1a6e1d76/namedstruct/namedstruct.py#L1344-L1360
hubo1016/namedstruct
namedstruct/namedstruct.py
ArrayParser.tobytes
def tobytes(self, prim, skipprepack = False): ''' Compatible to Parser.tobytes() ''' stream = BytesIO() self.tostream(prim, stream, skipprepack=skipprepack) return stream.getvalue()
python
def tobytes(self, prim, skipprepack = False): ''' Compatible to Parser.tobytes() ''' stream = BytesIO() self.tostream(prim, stream, skipprepack=skipprepack) return stream.getvalue()
Compatible to Parser.tobytes()
https://github.com/hubo1016/namedstruct/blob/5039026e0df4ce23003d212358918dbe1a6e1d76/namedstruct/namedstruct.py#L1366-L1372
hubo1016/namedstruct
namedstruct/namedstruct.py
RawParser.create
def create(self, data, inlineparent = None): ''' Compatible to Parser.create() ''' if self.cstr: return _copy(data).rstrip(b'\x00') else: return _copy(data)
python
def create(self, data, inlineparent = None): ''' Compatible to Parser.create() ''' if self.cstr: return _copy(data).rstrip(b'\x00') else: return _copy(data)
Compatible to Parser.create()
https://github.com/hubo1016/namedstruct/blob/5039026e0df4ce23003d212358918dbe1a6e1d76/namedstruct/namedstruct.py#L1412-L1419
hubo1016/namedstruct
namedstruct/namedstruct.py
typedef.parser
def parser(self): ''' Get parser for this type. Create the parser on first call. ''' if not hasattr(self, '_parser'): self._parser = self._compile() return self._parser
python
def parser(self): ''' Get parser for this type. Create the parser on first call. ''' if not hasattr(self, '_parser'): self._parser = self._compile() return self._parser
Get parser for this type. Create the parser on first call.
https://github.com/hubo1016/namedstruct/blob/5039026e0df4ce23003d212358918dbe1a6e1d76/namedstruct/namedstruct.py#L1477-L1483
hubo1016/namedstruct
namedstruct/namedstruct.py
typedef.new
def new(self, *args, **kwargs): ''' Create a new object of this type. It is also available as __call__, so you can create a new object just like creating a class instance: a = mytype(a=1,b=2) :param args: Replace the embedded struct type. Each argument is a tuple (name, newtype). It is equivalent to call _replace_embedded_type with *name* and *newtype* one by one. Both the "directly" embedded struct and the embedded struct inside another embedded struct can be set. If you want to replace an embedded struct in a replaced struct type, make sure the outer struct is replaced first. The embeded struct type must have a *name* to be replaced by specify *name* option. :param kwargs: extra key-value arguments, each one will be set on the new object, to set value to the fields conveniently. :returns: a new object, with the specified "kwargs" set. ''' obj = self.parser().new() for k,v in args: obj._replace_embedded_type(k,v) for k,v in kwargs.items(): setattr(obj, k, v) return obj
python
def new(self, *args, **kwargs): ''' Create a new object of this type. It is also available as __call__, so you can create a new object just like creating a class instance: a = mytype(a=1,b=2) :param args: Replace the embedded struct type. Each argument is a tuple (name, newtype). It is equivalent to call _replace_embedded_type with *name* and *newtype* one by one. Both the "directly" embedded struct and the embedded struct inside another embedded struct can be set. If you want to replace an embedded struct in a replaced struct type, make sure the outer struct is replaced first. The embeded struct type must have a *name* to be replaced by specify *name* option. :param kwargs: extra key-value arguments, each one will be set on the new object, to set value to the fields conveniently. :returns: a new object, with the specified "kwargs" set. ''' obj = self.parser().new() for k,v in args: obj._replace_embedded_type(k,v) for k,v in kwargs.items(): setattr(obj, k, v) return obj
Create a new object of this type. It is also available as __call__, so you can create a new object just like creating a class instance: a = mytype(a=1,b=2) :param args: Replace the embedded struct type. Each argument is a tuple (name, newtype). It is equivalent to call _replace_embedded_type with *name* and *newtype* one by one. Both the "directly" embedded struct and the embedded struct inside another embedded struct can be set. If you want to replace an embedded struct in a replaced struct type, make sure the outer struct is replaced first. The embeded struct type must have a *name* to be replaced by specify *name* option. :param kwargs: extra key-value arguments, each one will be set on the new object, to set value to the fields conveniently. :returns: a new object, with the specified "kwargs" set.
https://github.com/hubo1016/namedstruct/blob/5039026e0df4ce23003d212358918dbe1a6e1d76/namedstruct/namedstruct.py#L1508-L1530
hubo1016/namedstruct
namedstruct/namedstruct.py
enum.getName
def getName(self, value, defaultName = None): ''' Get the enumerate name of a specified value. :param value: the enumerate value :param defaultName: returns if the enumerate value is not defined :returns: the corresponding enumerate value or *defaultName* if not found ''' for k,v in self._values.items(): if v == value: return k return defaultName
python
def getName(self, value, defaultName = None): ''' Get the enumerate name of a specified value. :param value: the enumerate value :param defaultName: returns if the enumerate value is not defined :returns: the corresponding enumerate value or *defaultName* if not found ''' for k,v in self._values.items(): if v == value: return k return defaultName
Get the enumerate name of a specified value. :param value: the enumerate value :param defaultName: returns if the enumerate value is not defined :returns: the corresponding enumerate value or *defaultName* if not found
https://github.com/hubo1016/namedstruct/blob/5039026e0df4ce23003d212358918dbe1a6e1d76/namedstruct/namedstruct.py#L2559-L2569
hubo1016/namedstruct
namedstruct/namedstruct.py
enum.importAll
def importAll(self, gs): ''' Import all the enumerate values from this enumerate to *gs* :param gs: usually globals(), a dictionary. At lease __setitem__ should be implemented if not a dictionary. ''' for k,v in self._values.items(): gs[k] = v
python
def importAll(self, gs): ''' Import all the enumerate values from this enumerate to *gs* :param gs: usually globals(), a dictionary. At lease __setitem__ should be implemented if not a dictionary. ''' for k,v in self._values.items(): gs[k] = v
Import all the enumerate values from this enumerate to *gs* :param gs: usually globals(), a dictionary. At lease __setitem__ should be implemented if not a dictionary.
https://github.com/hubo1016/namedstruct/blob/5039026e0df4ce23003d212358918dbe1a6e1d76/namedstruct/namedstruct.py#L2578-L2584
hubo1016/namedstruct
namedstruct/namedstruct.py
enum.extend
def extend(self, namespace = None, name = None, **kwargs): ''' Create a new enumerate with current values merged with new enumerate values :param namespace: same as __init__ :param name: same as __init__ :param kwargs: same as __init__ :returns: a new enumerate type ''' if name is None: name = self._readablename d = dict(self._values) d.update(kwargs) return enum(name, namespace, self, self._bitwise, **d)
python
def extend(self, namespace = None, name = None, **kwargs): ''' Create a new enumerate with current values merged with new enumerate values :param namespace: same as __init__ :param name: same as __init__ :param kwargs: same as __init__ :returns: a new enumerate type ''' if name is None: name = self._readablename d = dict(self._values) d.update(kwargs) return enum(name, namespace, self, self._bitwise, **d)
Create a new enumerate with current values merged with new enumerate values :param namespace: same as __init__ :param name: same as __init__ :param kwargs: same as __init__ :returns: a new enumerate type
https://github.com/hubo1016/namedstruct/blob/5039026e0df4ce23003d212358918dbe1a6e1d76/namedstruct/namedstruct.py#L2585-L2597
hubo1016/namedstruct
namedstruct/namedstruct.py
enum.formatter
def formatter(self, value): ''' Format a enumerate value to enumerate names if possible. Used to generate human readable dump result. ''' if not self._bitwise: n = self.getName(value) if n is None: return value else: return n else: names = [] for k,v in sorted(self._values.items(), key=lambda x: x[1], reverse=True): if (v & value) == v: names.append(k) value = value ^ v names.reverse() if value != 0: names.append(hex(value)) if not names: return 0 return ' '.join(names)
python
def formatter(self, value): ''' Format a enumerate value to enumerate names if possible. Used to generate human readable dump result. ''' if not self._bitwise: n = self.getName(value) if n is None: return value else: return n else: names = [] for k,v in sorted(self._values.items(), key=lambda x: x[1], reverse=True): if (v & value) == v: names.append(k) value = value ^ v names.reverse() if value != 0: names.append(hex(value)) if not names: return 0 return ' '.join(names)
Format a enumerate value to enumerate names if possible. Used to generate human readable dump result.
https://github.com/hubo1016/namedstruct/blob/5039026e0df4ce23003d212358918dbe1a6e1d76/namedstruct/namedstruct.py#L2626-L2648
hubo1016/namedstruct
namedstruct/namedstruct.py
OptionalParser.packto
def packto(self, namedstruct, stream): """ Pack a struct to a stream """ if hasattr(namedstruct, self.name): return _tostream(self.basetypeparser, getattr(namedstruct, self.name), stream, True) else: return 0
python
def packto(self, namedstruct, stream): """ Pack a struct to a stream """ if hasattr(namedstruct, self.name): return _tostream(self.basetypeparser, getattr(namedstruct, self.name), stream, True) else: return 0
Pack a struct to a stream
https://github.com/hubo1016/namedstruct/blob/5039026e0df4ce23003d212358918dbe1a6e1d76/namedstruct/namedstruct.py#L2764-L2771
hubo1016/namedstruct
namedstruct/namedstruct.py
OptionalParser.prepack
def prepack(self, namedstruct, skip_self=False, skip_sub=False): ''' Run prepack ''' if not skip_sub and hasattr(namedstruct, self.name) and hasattr(self.basetypeparser, 'fullprepack'): self.basetypeparser.fullprepack(getattr(namedstruct, self.name)) Parser.prepack(self, namedstruct, skip_self, skip_sub)
python
def prepack(self, namedstruct, skip_self=False, skip_sub=False): ''' Run prepack ''' if not skip_sub and hasattr(namedstruct, self.name) and hasattr(self.basetypeparser, 'fullprepack'): self.basetypeparser.fullprepack(getattr(namedstruct, self.name)) Parser.prepack(self, namedstruct, skip_self, skip_sub)
Run prepack
https://github.com/hubo1016/namedstruct/blob/5039026e0df4ce23003d212358918dbe1a6e1d76/namedstruct/namedstruct.py#L2779-L2785
hubo1016/namedstruct
namedstruct/namedstruct.py
DArrayParser.prepack
def prepack(self, namedstruct, skip_self=False, skip_sub=False): ''' Run prepack ''' if not skip_sub and hasattr(self.innertypeparser, 'fullprepack'): for v in getattr(namedstruct, self.name): self.innertypeparser.fullprepack(v) Parser.prepack(self, namedstruct, skip_self, skip_sub)
python
def prepack(self, namedstruct, skip_self=False, skip_sub=False): ''' Run prepack ''' if not skip_sub and hasattr(self.innertypeparser, 'fullprepack'): for v in getattr(namedstruct, self.name): self.innertypeparser.fullprepack(v) Parser.prepack(self, namedstruct, skip_self, skip_sub)
Run prepack
https://github.com/hubo1016/namedstruct/blob/5039026e0df4ce23003d212358918dbe1a6e1d76/namedstruct/namedstruct.py#L2928-L2935
hubo1016/namedstruct
namedstruct/namedstruct.py
VariantParser.prepack
def prepack(self, namedstruct, skip_self=False, skip_sub=False): ''' Run prepack ''' if not skip_sub and self.header is not None and hasattr(self.header, 'fullprepack'): self.header.fullprepack(namedstruct._seqs[0]) Parser.prepack(self, namedstruct, skip_self, skip_sub)
python
def prepack(self, namedstruct, skip_self=False, skip_sub=False): ''' Run prepack ''' if not skip_sub and self.header is not None and hasattr(self.header, 'fullprepack'): self.header.fullprepack(namedstruct._seqs[0]) Parser.prepack(self, namedstruct, skip_self, skip_sub)
Run prepack
https://github.com/hubo1016/namedstruct/blob/5039026e0df4ce23003d212358918dbe1a6e1d76/namedstruct/namedstruct.py#L3300-L3306
mesowx/MesoPy
MesoPy.py
Meso._checkresponse
def _checkresponse(response): r""" Returns the data requested by the other methods assuming the response from the API is ok. If not, provides error handling for all possible API errors. HTTP errors are handled in the get_response() function. Arguments: ---------- None. Returns: -------- The response from the API as a dictionary if the API code is 2. Raises: ------- MesoPyError: Gives different response messages depending on returned code from API. If the response is 2, resultsError is displayed. For a response of 200, an authError message is shown. A ruleError is displayed if the code is 400, a formatError for -1, and catchError for any other invalid response. """ results_error = 'No results were found matching your query' auth_error = 'The token or API key is not valid, please contact Josh Clark at [email protected] to ' \ 'resolve this' rule_error = 'This request violates a rule of the API. Please check the guidelines for formatting a data ' \ 'request and try again' catch_error = 'Something went wrong. Check all your calls and try again' if response['SUMMARY']['RESPONSE_CODE'] == 1: return response elif response['SUMMARY']['RESPONSE_CODE'] == 2: if response['SUMMARY']['NUMBER_OF_OBJECTS'] == 0: return None raise MesoPyError(results_error) elif response['SUMMARY']['RESPONSE_CODE'] == 200: raise MesoPyError(auth_error) elif response['SUMMARY']['RESPONSE_CODE'] == 400: raise MesoPyError(rule_error) elif response['SUMMARY']['RESPONSE_CODE'] == -1: format_error = response['SUMMARY']['RESPONSE_MESSAGE'] raise MesoPyError(format_error) else: raise MesoPyError(catch_error)
python
def _checkresponse(response): r""" Returns the data requested by the other methods assuming the response from the API is ok. If not, provides error handling for all possible API errors. HTTP errors are handled in the get_response() function. Arguments: ---------- None. Returns: -------- The response from the API as a dictionary if the API code is 2. Raises: ------- MesoPyError: Gives different response messages depending on returned code from API. If the response is 2, resultsError is displayed. For a response of 200, an authError message is shown. A ruleError is displayed if the code is 400, a formatError for -1, and catchError for any other invalid response. """ results_error = 'No results were found matching your query' auth_error = 'The token or API key is not valid, please contact Josh Clark at [email protected] to ' \ 'resolve this' rule_error = 'This request violates a rule of the API. Please check the guidelines for formatting a data ' \ 'request and try again' catch_error = 'Something went wrong. Check all your calls and try again' if response['SUMMARY']['RESPONSE_CODE'] == 1: return response elif response['SUMMARY']['RESPONSE_CODE'] == 2: if response['SUMMARY']['NUMBER_OF_OBJECTS'] == 0: return None raise MesoPyError(results_error) elif response['SUMMARY']['RESPONSE_CODE'] == 200: raise MesoPyError(auth_error) elif response['SUMMARY']['RESPONSE_CODE'] == 400: raise MesoPyError(rule_error) elif response['SUMMARY']['RESPONSE_CODE'] == -1: format_error = response['SUMMARY']['RESPONSE_MESSAGE'] raise MesoPyError(format_error) else: raise MesoPyError(catch_error)
r""" Returns the data requested by the other methods assuming the response from the API is ok. If not, provides error handling for all possible API errors. HTTP errors are handled in the get_response() function. Arguments: ---------- None. Returns: -------- The response from the API as a dictionary if the API code is 2. Raises: ------- MesoPyError: Gives different response messages depending on returned code from API. If the response is 2, resultsError is displayed. For a response of 200, an authError message is shown. A ruleError is displayed if the code is 400, a formatError for -1, and catchError for any other invalid response.
https://github.com/mesowx/MesoPy/blob/cd1e837e108ed7a110d81cf789f19afcdd52145b/MesoPy.py#L84-L125
mesowx/MesoPy
MesoPy.py
Meso._get_response
def _get_response(self, endpoint, request_dict): """ Returns a dictionary of data requested by each function. Arguments: ---------- endpoint: string, mandatory Set in all other methods, this is the API endpoint specific to each function. request_dict: string, mandatory A dictionary of parameters that are formatted into the API call. Returns: -------- response: A dictionary that has been dumped from JSON. Raises: ------- MesoPyError: Overrides the exceptions given in the requests library to give more custom error messages. Connection_error occurs if no internet connection exists. Timeout_error occurs if the request takes too long and redirect_error is shown if the url is formatted incorrectly. """ http_error = 'Could not connect to the API. This could be because you have no internet connection, a parameter' \ ' was input incorrectly, or the API is currently down. Please try again.' json_error = 'Could not retrieve JSON values. Try again with a shorter date range.' # For python 3.4 try: qsp = urllib.parse.urlencode(request_dict, doseq=True) resp = urllib.request.urlopen(self.base_url + endpoint + '?' + qsp).read() # For python 2.7 except AttributeError or NameError: try: qsp = urllib.urlencode(request_dict, doseq=True) resp = urllib2.urlopen(self.base_url + endpoint + '?' + qsp).read() except urllib2.URLError: raise MesoPyError(http_error) except urllib.error.URLError: raise MesoPyError(http_error) try: json_data = json.loads(resp.decode('utf-8')) except ValueError: raise MesoPyError(json_error) return self._checkresponse(json_data)
python
def _get_response(self, endpoint, request_dict): """ Returns a dictionary of data requested by each function. Arguments: ---------- endpoint: string, mandatory Set in all other methods, this is the API endpoint specific to each function. request_dict: string, mandatory A dictionary of parameters that are formatted into the API call. Returns: -------- response: A dictionary that has been dumped from JSON. Raises: ------- MesoPyError: Overrides the exceptions given in the requests library to give more custom error messages. Connection_error occurs if no internet connection exists. Timeout_error occurs if the request takes too long and redirect_error is shown if the url is formatted incorrectly. """ http_error = 'Could not connect to the API. This could be because you have no internet connection, a parameter' \ ' was input incorrectly, or the API is currently down. Please try again.' json_error = 'Could not retrieve JSON values. Try again with a shorter date range.' # For python 3.4 try: qsp = urllib.parse.urlencode(request_dict, doseq=True) resp = urllib.request.urlopen(self.base_url + endpoint + '?' + qsp).read() # For python 2.7 except AttributeError or NameError: try: qsp = urllib.urlencode(request_dict, doseq=True) resp = urllib2.urlopen(self.base_url + endpoint + '?' + qsp).read() except urllib2.URLError: raise MesoPyError(http_error) except urllib.error.URLError: raise MesoPyError(http_error) try: json_data = json.loads(resp.decode('utf-8')) except ValueError: raise MesoPyError(json_error) return self._checkresponse(json_data)
Returns a dictionary of data requested by each function. Arguments: ---------- endpoint: string, mandatory Set in all other methods, this is the API endpoint specific to each function. request_dict: string, mandatory A dictionary of parameters that are formatted into the API call. Returns: -------- response: A dictionary that has been dumped from JSON. Raises: ------- MesoPyError: Overrides the exceptions given in the requests library to give more custom error messages. Connection_error occurs if no internet connection exists. Timeout_error occurs if the request takes too long and redirect_error is shown if the url is formatted incorrectly.
https://github.com/mesowx/MesoPy/blob/cd1e837e108ed7a110d81cf789f19afcdd52145b/MesoPy.py#L127-L173
mesowx/MesoPy
MesoPy.py
Meso._check_geo_param
def _check_geo_param(self, arg_list): r""" Checks each function call to make sure that the user has provided at least one of the following geographic parameters: 'stid', 'state', 'country', 'county', 'radius', 'bbox', 'cwa', 'nwsfirezone', 'gacc', or 'subgacc'. Arguments: ---------- arg_list: list, mandatory A list of kwargs from other functions. Returns: -------- None. Raises: ------- MesoPyError if no geographic search criteria is provided. """ geo_func = lambda a, b: any(i in b for i in a) check = geo_func(self.geo_criteria, arg_list) if check is False: raise MesoPyError('No stations or geographic search criteria specified. Please provide one of the ' 'following: stid, state, county, country, radius, bbox, cwa, nwsfirezone, gacc, subgacc')
python
def _check_geo_param(self, arg_list): r""" Checks each function call to make sure that the user has provided at least one of the following geographic parameters: 'stid', 'state', 'country', 'county', 'radius', 'bbox', 'cwa', 'nwsfirezone', 'gacc', or 'subgacc'. Arguments: ---------- arg_list: list, mandatory A list of kwargs from other functions. Returns: -------- None. Raises: ------- MesoPyError if no geographic search criteria is provided. """ geo_func = lambda a, b: any(i in b for i in a) check = geo_func(self.geo_criteria, arg_list) if check is False: raise MesoPyError('No stations or geographic search criteria specified. Please provide one of the ' 'following: stid, state, county, country, radius, bbox, cwa, nwsfirezone, gacc, subgacc')
r""" Checks each function call to make sure that the user has provided at least one of the following geographic parameters: 'stid', 'state', 'country', 'county', 'radius', 'bbox', 'cwa', 'nwsfirezone', 'gacc', or 'subgacc'. Arguments: ---------- arg_list: list, mandatory A list of kwargs from other functions. Returns: -------- None. Raises: ------- MesoPyError if no geographic search criteria is provided.
https://github.com/mesowx/MesoPy/blob/cd1e837e108ed7a110d81cf789f19afcdd52145b/MesoPy.py#L175-L198
mesowx/MesoPy
MesoPy.py
Meso.attime
def attime(self, **kwargs): r""" Returns a dictionary of latest observations at a user specified location for a specified time. Users must specify at least one geographic search parameter ('stid', 'state', 'country', 'county', 'radius', 'bbox', 'cwa', 'nwsfirezone', 'gacc', or 'subgacc') to obtain observation data. Other parameters may also be included. See below for optional parameters. Also see the metadata() function for station IDs. Arguments: ---------- attime: string, required Date and time in form of YYYYMMDDhhmm for which returned obs are closest. All times are UTC. e.g. attime='201504261800' within: string, required Can be a single number representing a time period before attime or two comma separated numbers representing a period before and after the attime e.g. attime='201306011800', within='30' would return the ob closest to attime within a 30 min period before or after attime. obtimezone: string, optional Set to either UTC or local. Sets timezone of obs. Default is UTC. e.g. obtimezone='local' showemptystations: string, optional Set to '1' to show stations even if no obs exist that match the time period. Stations without obs are omitted by default. stid: string, optional Single or comma separated list of MesoWest station IDs. e.g. stid='kden,kslc,wbb' county: string, optional County/parish/borough (US/Canada only), full name e.g. county='Larimer' state: string, optional US state, 2-letter ID e.g. state='CO' country: string, optional Single or comma separated list of abbreviated 2 or 3 character countries e.g. country='us,ca,mx' radius: string, optional Distance from a lat/lon pt or stid as [lat,lon,radius (mi)] or [stid, radius (mi)]. e.g. radius="-120,40,20" bbox: string, optional Stations within a [lon/lat] box in the order [lonmin,latmin,lonmax,latmax] e.g. bbox="-120,40,-119,41" cwa: string, optional NWS county warning area. See http://www.nws.noaa.gov/organization.php for CWA list. e.g. cwa='LOX' nwsfirezone: string, optional NWS fire zones. See http://www.nws.noaa.gov/geodata/catalog/wsom/html/firezone.htm for a shapefile containing the full list of zones. e.g. nwsfirezone='LOX241' gacc: string, optional Name of Geographic Area Coordination Center e.g. gacc='EBCC' See http://gacc.nifc.gov/ for a list of GACCs. subgacc: string, optional Name of Sub GACC e.g. subgacc='EB07' vars: string, optional Single or comma separated list of sensor variables. Will return all stations that match one of provided variables. Useful for filtering all stations that sense only certain vars. Do not request vars twice in the query. e.g. vars='wind_speed,pressure' Use the variables function to see a list of sensor vars. status: string, optional A value of either active or inactive returns stations currently set as active or inactive in the archive. Omitting this param returns all stations. e.g. status='active' units: string, optional String or set of strings and pipes separated by commas. Default is metric units. Set units='ENGLISH' for FREEDOM UNITS ;) Valid other combinations are as follows: temp|C, temp|F, temp|K; speed|mps, speed|mph, speed|kph, speed|kts; pres|pa, pres|mb; height|m, height|ft; precip|mm, precip|cm, precip|in; alti|pa, alti|inhg. e.g. units='temp|F,speed|kph,metric' groupby: string, optional Results can be grouped by key words: state, county, country, cwa, nwszone, mwsfirezone, gacc, subgacc e.g. groupby='state' timeformat: string, optional A python format string for returning customized date-time groups for observation times. Can include characters. e.g. timeformat='%m/%d/%Y at %H:%M' Returns: -------- Dictionary of observations around a specific time. Raises: ------- None. """ self._check_geo_param(kwargs) kwargs['token'] = self.token return self._get_response('stations/nearesttime', kwargs)
python
def attime(self, **kwargs): r""" Returns a dictionary of latest observations at a user specified location for a specified time. Users must specify at least one geographic search parameter ('stid', 'state', 'country', 'county', 'radius', 'bbox', 'cwa', 'nwsfirezone', 'gacc', or 'subgacc') to obtain observation data. Other parameters may also be included. See below for optional parameters. Also see the metadata() function for station IDs. Arguments: ---------- attime: string, required Date and time in form of YYYYMMDDhhmm for which returned obs are closest. All times are UTC. e.g. attime='201504261800' within: string, required Can be a single number representing a time period before attime or two comma separated numbers representing a period before and after the attime e.g. attime='201306011800', within='30' would return the ob closest to attime within a 30 min period before or after attime. obtimezone: string, optional Set to either UTC or local. Sets timezone of obs. Default is UTC. e.g. obtimezone='local' showemptystations: string, optional Set to '1' to show stations even if no obs exist that match the time period. Stations without obs are omitted by default. stid: string, optional Single or comma separated list of MesoWest station IDs. e.g. stid='kden,kslc,wbb' county: string, optional County/parish/borough (US/Canada only), full name e.g. county='Larimer' state: string, optional US state, 2-letter ID e.g. state='CO' country: string, optional Single or comma separated list of abbreviated 2 or 3 character countries e.g. country='us,ca,mx' radius: string, optional Distance from a lat/lon pt or stid as [lat,lon,radius (mi)] or [stid, radius (mi)]. e.g. radius="-120,40,20" bbox: string, optional Stations within a [lon/lat] box in the order [lonmin,latmin,lonmax,latmax] e.g. bbox="-120,40,-119,41" cwa: string, optional NWS county warning area. See http://www.nws.noaa.gov/organization.php for CWA list. e.g. cwa='LOX' nwsfirezone: string, optional NWS fire zones. See http://www.nws.noaa.gov/geodata/catalog/wsom/html/firezone.htm for a shapefile containing the full list of zones. e.g. nwsfirezone='LOX241' gacc: string, optional Name of Geographic Area Coordination Center e.g. gacc='EBCC' See http://gacc.nifc.gov/ for a list of GACCs. subgacc: string, optional Name of Sub GACC e.g. subgacc='EB07' vars: string, optional Single or comma separated list of sensor variables. Will return all stations that match one of provided variables. Useful for filtering all stations that sense only certain vars. Do not request vars twice in the query. e.g. vars='wind_speed,pressure' Use the variables function to see a list of sensor vars. status: string, optional A value of either active or inactive returns stations currently set as active or inactive in the archive. Omitting this param returns all stations. e.g. status='active' units: string, optional String or set of strings and pipes separated by commas. Default is metric units. Set units='ENGLISH' for FREEDOM UNITS ;) Valid other combinations are as follows: temp|C, temp|F, temp|K; speed|mps, speed|mph, speed|kph, speed|kts; pres|pa, pres|mb; height|m, height|ft; precip|mm, precip|cm, precip|in; alti|pa, alti|inhg. e.g. units='temp|F,speed|kph,metric' groupby: string, optional Results can be grouped by key words: state, county, country, cwa, nwszone, mwsfirezone, gacc, subgacc e.g. groupby='state' timeformat: string, optional A python format string for returning customized date-time groups for observation times. Can include characters. e.g. timeformat='%m/%d/%Y at %H:%M' Returns: -------- Dictionary of observations around a specific time. Raises: ------- None. """ self._check_geo_param(kwargs) kwargs['token'] = self.token return self._get_response('stations/nearesttime', kwargs)
r""" Returns a dictionary of latest observations at a user specified location for a specified time. Users must specify at least one geographic search parameter ('stid', 'state', 'country', 'county', 'radius', 'bbox', 'cwa', 'nwsfirezone', 'gacc', or 'subgacc') to obtain observation data. Other parameters may also be included. See below for optional parameters. Also see the metadata() function for station IDs. Arguments: ---------- attime: string, required Date and time in form of YYYYMMDDhhmm for which returned obs are closest. All times are UTC. e.g. attime='201504261800' within: string, required Can be a single number representing a time period before attime or two comma separated numbers representing a period before and after the attime e.g. attime='201306011800', within='30' would return the ob closest to attime within a 30 min period before or after attime. obtimezone: string, optional Set to either UTC or local. Sets timezone of obs. Default is UTC. e.g. obtimezone='local' showemptystations: string, optional Set to '1' to show stations even if no obs exist that match the time period. Stations without obs are omitted by default. stid: string, optional Single or comma separated list of MesoWest station IDs. e.g. stid='kden,kslc,wbb' county: string, optional County/parish/borough (US/Canada only), full name e.g. county='Larimer' state: string, optional US state, 2-letter ID e.g. state='CO' country: string, optional Single or comma separated list of abbreviated 2 or 3 character countries e.g. country='us,ca,mx' radius: string, optional Distance from a lat/lon pt or stid as [lat,lon,radius (mi)] or [stid, radius (mi)]. e.g. radius="-120,40,20" bbox: string, optional Stations within a [lon/lat] box in the order [lonmin,latmin,lonmax,latmax] e.g. bbox="-120,40,-119,41" cwa: string, optional NWS county warning area. See http://www.nws.noaa.gov/organization.php for CWA list. e.g. cwa='LOX' nwsfirezone: string, optional NWS fire zones. See http://www.nws.noaa.gov/geodata/catalog/wsom/html/firezone.htm for a shapefile containing the full list of zones. e.g. nwsfirezone='LOX241' gacc: string, optional Name of Geographic Area Coordination Center e.g. gacc='EBCC' See http://gacc.nifc.gov/ for a list of GACCs. subgacc: string, optional Name of Sub GACC e.g. subgacc='EB07' vars: string, optional Single or comma separated list of sensor variables. Will return all stations that match one of provided variables. Useful for filtering all stations that sense only certain vars. Do not request vars twice in the query. e.g. vars='wind_speed,pressure' Use the variables function to see a list of sensor vars. status: string, optional A value of either active or inactive returns stations currently set as active or inactive in the archive. Omitting this param returns all stations. e.g. status='active' units: string, optional String or set of strings and pipes separated by commas. Default is metric units. Set units='ENGLISH' for FREEDOM UNITS ;) Valid other combinations are as follows: temp|C, temp|F, temp|K; speed|mps, speed|mph, speed|kph, speed|kts; pres|pa, pres|mb; height|m, height|ft; precip|mm, precip|cm, precip|in; alti|pa, alti|inhg. e.g. units='temp|F,speed|kph,metric' groupby: string, optional Results can be grouped by key words: state, county, country, cwa, nwszone, mwsfirezone, gacc, subgacc e.g. groupby='state' timeformat: string, optional A python format string for returning customized date-time groups for observation times. Can include characters. e.g. timeformat='%m/%d/%Y at %H:%M' Returns: -------- Dictionary of observations around a specific time. Raises: ------- None.
https://github.com/mesowx/MesoPy/blob/cd1e837e108ed7a110d81cf789f19afcdd52145b/MesoPy.py#L200-L273
mesowx/MesoPy
MesoPy.py
Meso.precip
def precip(self, start, end, **kwargs): r""" Returns precipitation observations at a user specified location for a specified time. Users must specify at least one geographic search parameter ('stid', 'state', 'country', 'county', 'radius', 'bbox', 'cwa', 'nwsfirezone', 'gacc', or 'subgacc') to obtain observation data. Other parameters may also be included. See below mandatory and optional parameters. Also see the metadata() function for station IDs. Arguments: ---------- start: string, mandatory Start date in form of YYYYMMDDhhmm. MUST BE USED WITH THE END PARAMETER. Default time is UTC e.g., start='201306011800' end: string, mandatory End date in form of YYYYMMDDhhmm. MUST BE USED WITH THE START PARAMETER. Default time is UTC e.g., end='201306011800' obtimezone: string, optional Set to either UTC or local. Sets timezone of obs. Default is UTC. e.g. obtimezone='local' showemptystations: string, optional Set to '1' to show stations even if no obs exist that match the time period. Stations without obs are omitted by default. stid: string, optional Single or comma separated list of MesoWest station IDs. e.g. stid='kden,kslc,wbb' county: string, optional County/parish/borough (US/Canada only), full name e.g. county='Larimer' state: string, optional US state, 2-letter ID e.g. state='CO' country: string, optional Single or comma separated list of abbreviated 2 or 3 character countries e.g. country='us,ca,mx' radius: list, optional Distance from a lat/lon pt or stid as [lat,lon,radius (mi)] or [stid, radius (mi)]. e.g. radius="-120,40,20" bbox: list, optional Stations within a [lon/lat] box in the order [lonmin,latmin,lonmax,latmax] e.g. bbox="-120,40,-119,41" cwa: string, optional NWS county warning area. See http://www.nws.noaa.gov/organization.php for CWA list. e.g. cwa='LOX' nwsfirezone: string, optional NWS fire zones. See http://www.nws.noaa.gov/geodata/catalog/wsom/html/firezone.htm for a shapefile containing the full list of zones. e.g. nwsfirezone='LOX241' gacc: string, optional Name of Geographic Area Coordination Center e.g. gacc='EBCC' See http://gacc.nifc.gov/ for a list of GACCs. subgacc: string, optional Name of Sub GACC e.g. subgacc='EB07' vars: string, optional Single or comma separated list of sensor variables. Will return all stations that match one of provided variables. Useful for filtering all stations that sense only certain vars. Do not request vars twice in the query. e.g. vars='wind_speed,pressure' Use the variables function to see a list of sensor vars. status: string, optional A value of either active or inactive returns stations currently set as active or inactive in the archive. Omitting this param returns all stations. e.g. status='active' units: string, optional String or set of strings and pipes separated by commas. Default is metric units. Set units='ENGLISH' for FREEDOM UNITS ;) Valid other combinations are as follows: temp|C, temp|F, temp|K; speed|mps, speed|mph, speed|kph, speed|kts; pres|pa, pres|mb; height|m, height|ft; precip|mm, precip|cm, precip|in; alti|pa, alti|inhg. e.g. units='temp|F,speed|kph,metric' groupby: string, optional Results can be grouped by key words: state, county, country, cwa, nwszone, mwsfirezone, gacc, subgacc e.g. groupby='state' timeformat: string, optional A python format string for returning customized date-time groups for observation times. Can include characters. e.g. timeformat='%m/%d/%Y at %H:%M' Returns: -------- Dictionary of precipitation observations. Raises: ------- None. """ self._check_geo_param(kwargs) kwargs['start'] = start kwargs['end'] = end kwargs['token'] = self.token return self._get_response('stations/precipitation', kwargs)
python
def precip(self, start, end, **kwargs): r""" Returns precipitation observations at a user specified location for a specified time. Users must specify at least one geographic search parameter ('stid', 'state', 'country', 'county', 'radius', 'bbox', 'cwa', 'nwsfirezone', 'gacc', or 'subgacc') to obtain observation data. Other parameters may also be included. See below mandatory and optional parameters. Also see the metadata() function for station IDs. Arguments: ---------- start: string, mandatory Start date in form of YYYYMMDDhhmm. MUST BE USED WITH THE END PARAMETER. Default time is UTC e.g., start='201306011800' end: string, mandatory End date in form of YYYYMMDDhhmm. MUST BE USED WITH THE START PARAMETER. Default time is UTC e.g., end='201306011800' obtimezone: string, optional Set to either UTC or local. Sets timezone of obs. Default is UTC. e.g. obtimezone='local' showemptystations: string, optional Set to '1' to show stations even if no obs exist that match the time period. Stations without obs are omitted by default. stid: string, optional Single or comma separated list of MesoWest station IDs. e.g. stid='kden,kslc,wbb' county: string, optional County/parish/borough (US/Canada only), full name e.g. county='Larimer' state: string, optional US state, 2-letter ID e.g. state='CO' country: string, optional Single or comma separated list of abbreviated 2 or 3 character countries e.g. country='us,ca,mx' radius: list, optional Distance from a lat/lon pt or stid as [lat,lon,radius (mi)] or [stid, radius (mi)]. e.g. radius="-120,40,20" bbox: list, optional Stations within a [lon/lat] box in the order [lonmin,latmin,lonmax,latmax] e.g. bbox="-120,40,-119,41" cwa: string, optional NWS county warning area. See http://www.nws.noaa.gov/organization.php for CWA list. e.g. cwa='LOX' nwsfirezone: string, optional NWS fire zones. See http://www.nws.noaa.gov/geodata/catalog/wsom/html/firezone.htm for a shapefile containing the full list of zones. e.g. nwsfirezone='LOX241' gacc: string, optional Name of Geographic Area Coordination Center e.g. gacc='EBCC' See http://gacc.nifc.gov/ for a list of GACCs. subgacc: string, optional Name of Sub GACC e.g. subgacc='EB07' vars: string, optional Single or comma separated list of sensor variables. Will return all stations that match one of provided variables. Useful for filtering all stations that sense only certain vars. Do not request vars twice in the query. e.g. vars='wind_speed,pressure' Use the variables function to see a list of sensor vars. status: string, optional A value of either active or inactive returns stations currently set as active or inactive in the archive. Omitting this param returns all stations. e.g. status='active' units: string, optional String or set of strings and pipes separated by commas. Default is metric units. Set units='ENGLISH' for FREEDOM UNITS ;) Valid other combinations are as follows: temp|C, temp|F, temp|K; speed|mps, speed|mph, speed|kph, speed|kts; pres|pa, pres|mb; height|m, height|ft; precip|mm, precip|cm, precip|in; alti|pa, alti|inhg. e.g. units='temp|F,speed|kph,metric' groupby: string, optional Results can be grouped by key words: state, county, country, cwa, nwszone, mwsfirezone, gacc, subgacc e.g. groupby='state' timeformat: string, optional A python format string for returning customized date-time groups for observation times. Can include characters. e.g. timeformat='%m/%d/%Y at %H:%M' Returns: -------- Dictionary of precipitation observations. Raises: ------- None. """ self._check_geo_param(kwargs) kwargs['start'] = start kwargs['end'] = end kwargs['token'] = self.token return self._get_response('stations/precipitation', kwargs)
r""" Returns precipitation observations at a user specified location for a specified time. Users must specify at least one geographic search parameter ('stid', 'state', 'country', 'county', 'radius', 'bbox', 'cwa', 'nwsfirezone', 'gacc', or 'subgacc') to obtain observation data. Other parameters may also be included. See below mandatory and optional parameters. Also see the metadata() function for station IDs. Arguments: ---------- start: string, mandatory Start date in form of YYYYMMDDhhmm. MUST BE USED WITH THE END PARAMETER. Default time is UTC e.g., start='201306011800' end: string, mandatory End date in form of YYYYMMDDhhmm. MUST BE USED WITH THE START PARAMETER. Default time is UTC e.g., end='201306011800' obtimezone: string, optional Set to either UTC or local. Sets timezone of obs. Default is UTC. e.g. obtimezone='local' showemptystations: string, optional Set to '1' to show stations even if no obs exist that match the time period. Stations without obs are omitted by default. stid: string, optional Single or comma separated list of MesoWest station IDs. e.g. stid='kden,kslc,wbb' county: string, optional County/parish/borough (US/Canada only), full name e.g. county='Larimer' state: string, optional US state, 2-letter ID e.g. state='CO' country: string, optional Single or comma separated list of abbreviated 2 or 3 character countries e.g. country='us,ca,mx' radius: list, optional Distance from a lat/lon pt or stid as [lat,lon,radius (mi)] or [stid, radius (mi)]. e.g. radius="-120,40,20" bbox: list, optional Stations within a [lon/lat] box in the order [lonmin,latmin,lonmax,latmax] e.g. bbox="-120,40,-119,41" cwa: string, optional NWS county warning area. See http://www.nws.noaa.gov/organization.php for CWA list. e.g. cwa='LOX' nwsfirezone: string, optional NWS fire zones. See http://www.nws.noaa.gov/geodata/catalog/wsom/html/firezone.htm for a shapefile containing the full list of zones. e.g. nwsfirezone='LOX241' gacc: string, optional Name of Geographic Area Coordination Center e.g. gacc='EBCC' See http://gacc.nifc.gov/ for a list of GACCs. subgacc: string, optional Name of Sub GACC e.g. subgacc='EB07' vars: string, optional Single or comma separated list of sensor variables. Will return all stations that match one of provided variables. Useful for filtering all stations that sense only certain vars. Do not request vars twice in the query. e.g. vars='wind_speed,pressure' Use the variables function to see a list of sensor vars. status: string, optional A value of either active or inactive returns stations currently set as active or inactive in the archive. Omitting this param returns all stations. e.g. status='active' units: string, optional String or set of strings and pipes separated by commas. Default is metric units. Set units='ENGLISH' for FREEDOM UNITS ;) Valid other combinations are as follows: temp|C, temp|F, temp|K; speed|mps, speed|mph, speed|kph, speed|kts; pres|pa, pres|mb; height|m, height|ft; precip|mm, precip|cm, precip|in; alti|pa, alti|inhg. e.g. units='temp|F,speed|kph,metric' groupby: string, optional Results can be grouped by key words: state, county, country, cwa, nwszone, mwsfirezone, gacc, subgacc e.g. groupby='state' timeformat: string, optional A python format string for returning customized date-time groups for observation times. Can include characters. e.g. timeformat='%m/%d/%Y at %H:%M' Returns: -------- Dictionary of precipitation observations. Raises: ------- None.
https://github.com/mesowx/MesoPy/blob/cd1e837e108ed7a110d81cf789f19afcdd52145b/MesoPy.py#L346-L420
mesowx/MesoPy
MesoPy.py
Meso.timeseries
def timeseries(self, start, end, **kwargs): r""" Returns a time series of observations at a user specified location for a specified time. Users must specify at least one geographic search parameter ('stid', 'state', 'country', 'county', 'radius', 'bbox', 'cwa', 'nwsfirezone', 'gacc', or 'subgacc') to obtain observation data. Other parameters may also be included. See below mandatory and optional parameters. Also see the metadata() function for station IDs. Arguments: ---------- start: string, mandatory Start date in form of YYYYMMDDhhmm. MUST BE USED WITH THE END PARAMETER. Default time is UTC e.g., start='201306011800' end: string, mandatory End date in form of YYYYMMDDhhmm. MUST BE USED WITH THE START PARAMETER. Default time is UTC e.g., end='201306011800' obtimezone: string, optional Set to either UTC or local. Sets timezone of obs. Default is UTC. e.g. obtimezone='local' showemptystations: string, optional Set to '1' to show stations even if no obs exist that match the time period. Stations without obs are omitted by default. stid: string, optional Single or comma separated list of MesoWest station IDs. e.g. stid='kden,kslc,wbb' county: string, optional County/parish/borough (US/Canada only), full name e.g. county='Larimer' state: string, optional US state, 2-letter ID e.g. state='CO' country: string, optional Single or comma separated list of abbreviated 2 or 3 character countries e.g. country='us,ca,mx' radius: string, optional Distance from a lat/lon pt or stid as [lat,lon,radius (mi)] or [stid, radius (mi)]. e.g. radius="-120,40,20" bbox: string, optional Stations within a [lon/lat] box in the order [lonmin,latmin,lonmax,latmax] e.g. bbox="-120,40,-119,41" cwa: string, optional NWS county warning area. See http://www.nws.noaa.gov/organization.php for CWA list. e.g. cwa='LOX' nwsfirezone: string, optional NWS fire zones. See http://www.nws.noaa.gov/geodata/catalog/wsom/html/firezone.htm for a shapefile containing the full list of zones. e.g. nwsfirezone='LOX241' gacc: string, optional Name of Geographic Area Coordination Center e.g. gacc='EBCC' See http://gacc.nifc.gov/ for a list of GACCs. subgacc: string, optional Name of Sub GACC e.g. subgacc='EB07' vars: string, optional Single or comma separated list of sensor variables. Will return all stations that match one of provided variables. Useful for filtering all stations that sense only certain vars. Do not request vars twice in the query. e.g. vars='wind_speed,pressure' Use the variables function to see a list of sensor vars. status: string, optional A value of either active or inactive returns stations currently set as active or inactive in the archive. Omitting this param returns all stations. e.g. status='active' units: string, optional String or set of strings and pipes separated by commas. Default is metric units. Set units='ENGLISH' for FREEDOM UNITS ;) Valid other combinations are as follows: temp|C, temp|F, temp|K; speed|mps, speed|mph, speed|kph, speed|kts; pres|pa, pres|mb; height|m, height|ft; precip|mm, precip|cm, precip|in; alti|pa, alti|inhg. e.g. units='temp|F,speed|kph,metric' groupby: string, optional Results can be grouped by key words: state, county, country, cwa, nwszone, mwsfirezone, gacc, subgacc e.g. groupby='state' timeformat: string, optional A python format string for returning customized date-time groups for observation times. Can include characters. e.g. timeformat='%m/%d/%Y at %H:%M' Returns: -------- Dictionary of time series observations through the get_response() function. Raises: ------- None. """ self._check_geo_param(kwargs) kwargs['start'] = start kwargs['end'] = end kwargs['token'] = self.token return self._get_response('stations/timeseries', kwargs)
python
def timeseries(self, start, end, **kwargs): r""" Returns a time series of observations at a user specified location for a specified time. Users must specify at least one geographic search parameter ('stid', 'state', 'country', 'county', 'radius', 'bbox', 'cwa', 'nwsfirezone', 'gacc', or 'subgacc') to obtain observation data. Other parameters may also be included. See below mandatory and optional parameters. Also see the metadata() function for station IDs. Arguments: ---------- start: string, mandatory Start date in form of YYYYMMDDhhmm. MUST BE USED WITH THE END PARAMETER. Default time is UTC e.g., start='201306011800' end: string, mandatory End date in form of YYYYMMDDhhmm. MUST BE USED WITH THE START PARAMETER. Default time is UTC e.g., end='201306011800' obtimezone: string, optional Set to either UTC or local. Sets timezone of obs. Default is UTC. e.g. obtimezone='local' showemptystations: string, optional Set to '1' to show stations even if no obs exist that match the time period. Stations without obs are omitted by default. stid: string, optional Single or comma separated list of MesoWest station IDs. e.g. stid='kden,kslc,wbb' county: string, optional County/parish/borough (US/Canada only), full name e.g. county='Larimer' state: string, optional US state, 2-letter ID e.g. state='CO' country: string, optional Single or comma separated list of abbreviated 2 or 3 character countries e.g. country='us,ca,mx' radius: string, optional Distance from a lat/lon pt or stid as [lat,lon,radius (mi)] or [stid, radius (mi)]. e.g. radius="-120,40,20" bbox: string, optional Stations within a [lon/lat] box in the order [lonmin,latmin,lonmax,latmax] e.g. bbox="-120,40,-119,41" cwa: string, optional NWS county warning area. See http://www.nws.noaa.gov/organization.php for CWA list. e.g. cwa='LOX' nwsfirezone: string, optional NWS fire zones. See http://www.nws.noaa.gov/geodata/catalog/wsom/html/firezone.htm for a shapefile containing the full list of zones. e.g. nwsfirezone='LOX241' gacc: string, optional Name of Geographic Area Coordination Center e.g. gacc='EBCC' See http://gacc.nifc.gov/ for a list of GACCs. subgacc: string, optional Name of Sub GACC e.g. subgacc='EB07' vars: string, optional Single or comma separated list of sensor variables. Will return all stations that match one of provided variables. Useful for filtering all stations that sense only certain vars. Do not request vars twice in the query. e.g. vars='wind_speed,pressure' Use the variables function to see a list of sensor vars. status: string, optional A value of either active or inactive returns stations currently set as active or inactive in the archive. Omitting this param returns all stations. e.g. status='active' units: string, optional String or set of strings and pipes separated by commas. Default is metric units. Set units='ENGLISH' for FREEDOM UNITS ;) Valid other combinations are as follows: temp|C, temp|F, temp|K; speed|mps, speed|mph, speed|kph, speed|kts; pres|pa, pres|mb; height|m, height|ft; precip|mm, precip|cm, precip|in; alti|pa, alti|inhg. e.g. units='temp|F,speed|kph,metric' groupby: string, optional Results can be grouped by key words: state, county, country, cwa, nwszone, mwsfirezone, gacc, subgacc e.g. groupby='state' timeformat: string, optional A python format string for returning customized date-time groups for observation times. Can include characters. e.g. timeformat='%m/%d/%Y at %H:%M' Returns: -------- Dictionary of time series observations through the get_response() function. Raises: ------- None. """ self._check_geo_param(kwargs) kwargs['start'] = start kwargs['end'] = end kwargs['token'] = self.token return self._get_response('stations/timeseries', kwargs)
r""" Returns a time series of observations at a user specified location for a specified time. Users must specify at least one geographic search parameter ('stid', 'state', 'country', 'county', 'radius', 'bbox', 'cwa', 'nwsfirezone', 'gacc', or 'subgacc') to obtain observation data. Other parameters may also be included. See below mandatory and optional parameters. Also see the metadata() function for station IDs. Arguments: ---------- start: string, mandatory Start date in form of YYYYMMDDhhmm. MUST BE USED WITH THE END PARAMETER. Default time is UTC e.g., start='201306011800' end: string, mandatory End date in form of YYYYMMDDhhmm. MUST BE USED WITH THE START PARAMETER. Default time is UTC e.g., end='201306011800' obtimezone: string, optional Set to either UTC or local. Sets timezone of obs. Default is UTC. e.g. obtimezone='local' showemptystations: string, optional Set to '1' to show stations even if no obs exist that match the time period. Stations without obs are omitted by default. stid: string, optional Single or comma separated list of MesoWest station IDs. e.g. stid='kden,kslc,wbb' county: string, optional County/parish/borough (US/Canada only), full name e.g. county='Larimer' state: string, optional US state, 2-letter ID e.g. state='CO' country: string, optional Single or comma separated list of abbreviated 2 or 3 character countries e.g. country='us,ca,mx' radius: string, optional Distance from a lat/lon pt or stid as [lat,lon,radius (mi)] or [stid, radius (mi)]. e.g. radius="-120,40,20" bbox: string, optional Stations within a [lon/lat] box in the order [lonmin,latmin,lonmax,latmax] e.g. bbox="-120,40,-119,41" cwa: string, optional NWS county warning area. See http://www.nws.noaa.gov/organization.php for CWA list. e.g. cwa='LOX' nwsfirezone: string, optional NWS fire zones. See http://www.nws.noaa.gov/geodata/catalog/wsom/html/firezone.htm for a shapefile containing the full list of zones. e.g. nwsfirezone='LOX241' gacc: string, optional Name of Geographic Area Coordination Center e.g. gacc='EBCC' See http://gacc.nifc.gov/ for a list of GACCs. subgacc: string, optional Name of Sub GACC e.g. subgacc='EB07' vars: string, optional Single or comma separated list of sensor variables. Will return all stations that match one of provided variables. Useful for filtering all stations that sense only certain vars. Do not request vars twice in the query. e.g. vars='wind_speed,pressure' Use the variables function to see a list of sensor vars. status: string, optional A value of either active or inactive returns stations currently set as active or inactive in the archive. Omitting this param returns all stations. e.g. status='active' units: string, optional String or set of strings and pipes separated by commas. Default is metric units. Set units='ENGLISH' for FREEDOM UNITS ;) Valid other combinations are as follows: temp|C, temp|F, temp|K; speed|mps, speed|mph, speed|kph, speed|kts; pres|pa, pres|mb; height|m, height|ft; precip|mm, precip|cm, precip|in; alti|pa, alti|inhg. e.g. units='temp|F,speed|kph,metric' groupby: string, optional Results can be grouped by key words: state, county, country, cwa, nwszone, mwsfirezone, gacc, subgacc e.g. groupby='state' timeformat: string, optional A python format string for returning customized date-time groups for observation times. Can include characters. e.g. timeformat='%m/%d/%Y at %H:%M' Returns: -------- Dictionary of time series observations through the get_response() function. Raises: ------- None.
https://github.com/mesowx/MesoPy/blob/cd1e837e108ed7a110d81cf789f19afcdd52145b/MesoPy.py#L422-L495
mesowx/MesoPy
MesoPy.py
Meso.climatology
def climatology(self, startclim, endclim, **kwargs): r""" Returns a climatology of observations at a user specified location for a specified time. Users must specify at least one geographic search parameter ('stid', 'state', 'country', 'county', 'radius', 'bbox', 'cwa', 'nwsfirezone', 'gacc', or 'subgacc') to obtain observation data. Other parameters may also be included. See below mandatory and optional parameters. Also see the metadata() function for station IDs. Arguments: ---------- startclim: string, mandatory Start date in form of MMDDhhmm. MUST BE USED WITH THE ENDCLIM PARAMETER. Default time is UTC e.g. startclim='06011800' Do not specify a year endclim: string, mandatory End date in form of MMDDhhmm. MUST BE USED WITH THE STARTCLIM PARAMETER. Default time is UTC e.g. endclim='06011800' Do not specify a year obtimezone: string, optional Set to either UTC or local. Sets timezone of obs. Default is UTC. e.g. obtimezone='local' showemptystations: string, optional Set to '1' to show stations even if no obs exist that match the time period. Stations without obs are omitted by default. stid: string, optional Single or comma separated list of MesoWest station IDs. e.g. stid='kden,kslc,wbb' county: string, optional County/parish/borough (US/Canada only), full name e.g. county='Larimer' state: string, optional US state, 2-letter ID e.g. state='CO' country: string, optional Single or comma separated list of abbreviated 2 or 3 character countries e.g. country='us,ca,mx' radius: string, optional Distance from a lat/lon pt or stid as [lat,lon,radius (mi)] or [stid, radius (mi)]. e.g. radius="-120,40,20" bbox: string, optional Stations within a [lon/lat] box in the order [lonmin,latmin,lonmax,latmax] e.g. bbox="-120,40,-119,41" cwa: string, optional NWS county warning area. See http://www.nws.noaa.gov/organization.php for CWA list. e.g. cwa='LOX' nwsfirezone: string, optional NWS fire zones. See http://www.nws.noaa.gov/geodata/catalog/wsom/html/firezone.htm for a shapefile containing the full list of zones. e.g. nwsfirezone='LOX241' gacc: string, optional Name of Geographic Area Coordination Center e.g. gacc='EBCC' See http://gacc.nifc.gov/ for a list of GACCs. subgacc: string, optional Name of Sub GACC e.g. subgacc='EB07' vars: string, optional Single or comma separated list of sensor variables. Will return all stations that match one of provided variables. Useful for filtering all stations that sense only certain vars. Do not request vars twice in the query. e.g. vars='wind_speed,pressure' Use the variables function to see a list of sensor vars. status: string, optional A value of either active or inactive returns stations currently set as active or inactive in the archive. Omitting this param returns all stations. e.g. status='active' units: string, optional String or set of strings and pipes separated by commas. Default is metric units. Set units='ENGLISH' for FREEDOM UNITS ;) Valid other combinations are as follows: temp|C, temp|F, temp|K; speed|mps, speed|mph, speed|kph, speed|kts; pres|pa, pres|mb; height|m, height|ft; precip|mm, precip|cm, precip|in; alti|pa, alti|inhg. e.g. units='temp|F,speed|kph,metric' groupby: string, optional Results can be grouped by key words: state, county, country, cwa, nwszone, mwsfirezone, gacc, subgacc e.g. groupby='state' timeformat: string, optional A python format string for returning customized date-time groups for observation times. Can include characters. e.g. timeformat='%m/%d/%Y at %H:%M' Returns: -------- Dictionary of climatology observations through the get_response() function. Raises: ------- None. """ self._check_geo_param(kwargs) kwargs['startclim'] = startclim kwargs['endclim'] = endclim kwargs['token'] = self.token return self._get_response('stations/climatology', kwargs)
python
def climatology(self, startclim, endclim, **kwargs): r""" Returns a climatology of observations at a user specified location for a specified time. Users must specify at least one geographic search parameter ('stid', 'state', 'country', 'county', 'radius', 'bbox', 'cwa', 'nwsfirezone', 'gacc', or 'subgacc') to obtain observation data. Other parameters may also be included. See below mandatory and optional parameters. Also see the metadata() function for station IDs. Arguments: ---------- startclim: string, mandatory Start date in form of MMDDhhmm. MUST BE USED WITH THE ENDCLIM PARAMETER. Default time is UTC e.g. startclim='06011800' Do not specify a year endclim: string, mandatory End date in form of MMDDhhmm. MUST BE USED WITH THE STARTCLIM PARAMETER. Default time is UTC e.g. endclim='06011800' Do not specify a year obtimezone: string, optional Set to either UTC or local. Sets timezone of obs. Default is UTC. e.g. obtimezone='local' showemptystations: string, optional Set to '1' to show stations even if no obs exist that match the time period. Stations without obs are omitted by default. stid: string, optional Single or comma separated list of MesoWest station IDs. e.g. stid='kden,kslc,wbb' county: string, optional County/parish/borough (US/Canada only), full name e.g. county='Larimer' state: string, optional US state, 2-letter ID e.g. state='CO' country: string, optional Single or comma separated list of abbreviated 2 or 3 character countries e.g. country='us,ca,mx' radius: string, optional Distance from a lat/lon pt or stid as [lat,lon,radius (mi)] or [stid, radius (mi)]. e.g. radius="-120,40,20" bbox: string, optional Stations within a [lon/lat] box in the order [lonmin,latmin,lonmax,latmax] e.g. bbox="-120,40,-119,41" cwa: string, optional NWS county warning area. See http://www.nws.noaa.gov/organization.php for CWA list. e.g. cwa='LOX' nwsfirezone: string, optional NWS fire zones. See http://www.nws.noaa.gov/geodata/catalog/wsom/html/firezone.htm for a shapefile containing the full list of zones. e.g. nwsfirezone='LOX241' gacc: string, optional Name of Geographic Area Coordination Center e.g. gacc='EBCC' See http://gacc.nifc.gov/ for a list of GACCs. subgacc: string, optional Name of Sub GACC e.g. subgacc='EB07' vars: string, optional Single or comma separated list of sensor variables. Will return all stations that match one of provided variables. Useful for filtering all stations that sense only certain vars. Do not request vars twice in the query. e.g. vars='wind_speed,pressure' Use the variables function to see a list of sensor vars. status: string, optional A value of either active or inactive returns stations currently set as active or inactive in the archive. Omitting this param returns all stations. e.g. status='active' units: string, optional String or set of strings and pipes separated by commas. Default is metric units. Set units='ENGLISH' for FREEDOM UNITS ;) Valid other combinations are as follows: temp|C, temp|F, temp|K; speed|mps, speed|mph, speed|kph, speed|kts; pres|pa, pres|mb; height|m, height|ft; precip|mm, precip|cm, precip|in; alti|pa, alti|inhg. e.g. units='temp|F,speed|kph,metric' groupby: string, optional Results can be grouped by key words: state, county, country, cwa, nwszone, mwsfirezone, gacc, subgacc e.g. groupby='state' timeformat: string, optional A python format string for returning customized date-time groups for observation times. Can include characters. e.g. timeformat='%m/%d/%Y at %H:%M' Returns: -------- Dictionary of climatology observations through the get_response() function. Raises: ------- None. """ self._check_geo_param(kwargs) kwargs['startclim'] = startclim kwargs['endclim'] = endclim kwargs['token'] = self.token return self._get_response('stations/climatology', kwargs)
r""" Returns a climatology of observations at a user specified location for a specified time. Users must specify at least one geographic search parameter ('stid', 'state', 'country', 'county', 'radius', 'bbox', 'cwa', 'nwsfirezone', 'gacc', or 'subgacc') to obtain observation data. Other parameters may also be included. See below mandatory and optional parameters. Also see the metadata() function for station IDs. Arguments: ---------- startclim: string, mandatory Start date in form of MMDDhhmm. MUST BE USED WITH THE ENDCLIM PARAMETER. Default time is UTC e.g. startclim='06011800' Do not specify a year endclim: string, mandatory End date in form of MMDDhhmm. MUST BE USED WITH THE STARTCLIM PARAMETER. Default time is UTC e.g. endclim='06011800' Do not specify a year obtimezone: string, optional Set to either UTC or local. Sets timezone of obs. Default is UTC. e.g. obtimezone='local' showemptystations: string, optional Set to '1' to show stations even if no obs exist that match the time period. Stations without obs are omitted by default. stid: string, optional Single or comma separated list of MesoWest station IDs. e.g. stid='kden,kslc,wbb' county: string, optional County/parish/borough (US/Canada only), full name e.g. county='Larimer' state: string, optional US state, 2-letter ID e.g. state='CO' country: string, optional Single or comma separated list of abbreviated 2 or 3 character countries e.g. country='us,ca,mx' radius: string, optional Distance from a lat/lon pt or stid as [lat,lon,radius (mi)] or [stid, radius (mi)]. e.g. radius="-120,40,20" bbox: string, optional Stations within a [lon/lat] box in the order [lonmin,latmin,lonmax,latmax] e.g. bbox="-120,40,-119,41" cwa: string, optional NWS county warning area. See http://www.nws.noaa.gov/organization.php for CWA list. e.g. cwa='LOX' nwsfirezone: string, optional NWS fire zones. See http://www.nws.noaa.gov/geodata/catalog/wsom/html/firezone.htm for a shapefile containing the full list of zones. e.g. nwsfirezone='LOX241' gacc: string, optional Name of Geographic Area Coordination Center e.g. gacc='EBCC' See http://gacc.nifc.gov/ for a list of GACCs. subgacc: string, optional Name of Sub GACC e.g. subgacc='EB07' vars: string, optional Single or comma separated list of sensor variables. Will return all stations that match one of provided variables. Useful for filtering all stations that sense only certain vars. Do not request vars twice in the query. e.g. vars='wind_speed,pressure' Use the variables function to see a list of sensor vars. status: string, optional A value of either active or inactive returns stations currently set as active or inactive in the archive. Omitting this param returns all stations. e.g. status='active' units: string, optional String or set of strings and pipes separated by commas. Default is metric units. Set units='ENGLISH' for FREEDOM UNITS ;) Valid other combinations are as follows: temp|C, temp|F, temp|K; speed|mps, speed|mph, speed|kph, speed|kts; pres|pa, pres|mb; height|m, height|ft; precip|mm, precip|cm, precip|in; alti|pa, alti|inhg. e.g. units='temp|F,speed|kph,metric' groupby: string, optional Results can be grouped by key words: state, county, country, cwa, nwszone, mwsfirezone, gacc, subgacc e.g. groupby='state' timeformat: string, optional A python format string for returning customized date-time groups for observation times. Can include characters. e.g. timeformat='%m/%d/%Y at %H:%M' Returns: -------- Dictionary of climatology observations through the get_response() function. Raises: ------- None.
https://github.com/mesowx/MesoPy/blob/cd1e837e108ed7a110d81cf789f19afcdd52145b/MesoPy.py#L497-L571
mesowx/MesoPy
MesoPy.py
Meso.climate_stats
def climate_stats(self, startclim, endclim, type, **kwargs): r""" Returns a dictionary of aggregated yearly climate statistics (count, standard deviation, average, median, maximum, minimum, min time, and max time depending on user specified type) of a time series for a specified range of time at user specified location. Users must specify at least one geographic search parameter ('stid', 'state', 'country', 'county', 'radius', 'bbox', 'cwa', 'nwsfirezone', 'gacc', or 'subgacc') to obtain observation data. Other parameters may also be included. See below mandatory and optional parameters. Also see the metadata() function for station IDs. Arguments: ---------- type: string, mandatory Describes what statistical values will be returned. Can be one of the following values: "avg"/"average"/"mean", "max"/"maximum", "min"/"minimum", "stdev"/"standarddeviation"/"std", "median"/"med", "count", or "all". "All" will return all of the statistics. startclim: string, mandatory Start date in form of MMDDhhmm. MUST BE USED WITH THE ENDCLIM PARAMETER. Default time is UTC e.g. startclim=06011800 Do not specify a year. endclim: string, mandatory End date in form of MMDDhhmm. MUST BE USED WITH THE STARTCLIM PARAMETER. Default time is UTC e.g. endclim=06011800 Do not specify a year. obtimezone: string, optional Set to either UTC or local. Sets timezone of obs. Default is UTC. e.g. obtimezone='local'. showemptystations: string, optional Set to '1' to show stations even if no obs exist that match the time period. Stations without obs are omitted by default. stid: string, optional Single or comma separated list of MesoWest station IDs. e.g. stid='kden,kslc,wbb' county: string, optional County/parish/borough (US/Canada only), full name e.g. county='Larimer' state: string, optional US state, 2-letter ID e.g. state='CO' country: string, optional Single or comma separated list of abbreviated 2 or 3 character countries e.g. country='us,ca,mx' radius: string, optional Distance from a lat/lon pt or stid as [lat,lon,radius (mi)] or [stid, radius (mi)]. e.g. radius="-120,40,20" bbox: string, optional Stations within a [lon/lat] box in the order [lonmin,latmin,lonmax,latmax] e.g. bbox="-120,40,-119,41" cwa: string, optional NWS county warning area. See http://www.nws.noaa.gov/organization.php for CWA list. e.g. cwa='LOX' nwsfirezone: string, optional NWS fire zones. See http://www.nws.noaa.gov/geodata/catalog/wsom/html/firezone.htm for a shapefile containing the full list of zones. e.g. nwsfirezone='LOX241' gacc: string, optional Name of Geographic Area Coordination Center e.g. gacc='EBCC' See http://gacc.nifc.gov/ for a list of GACCs. subgacc: string, optional Name of Sub GACC e.g. subgacc='EB07' vars: string, optional Single or comma separated list of sensor variables. Will return all stations that match one of provided variables. Useful for filtering all stations that sense only certain vars. Do not request vars twice in the query. e.g. vars='wind_speed,pressure' Use the variables function to see a list of sensor vars. units: string, optional String or set of strings and pipes separated by commas. Default is metric units. Set units='ENGLISH' for FREEDOM UNITS ;) Valid other combinations are as follows: temp|C, temp|F, temp|K; speed|mps, speed|mph, speed|kph, speed|kts; pres|pa, pres|mb; height|m, height|ft; precip|mm, precip|cm, precip|in; alti|pa, alti|inhg. e.g. units='temp|F,speed|kph,metric' groupby: string, optional Results can be grouped by key words: state, county, country, cwa, nwszone, mwsfirezone, gacc, subgacc e.g. groupby='state' timeformat: string, optional A python format string for returning customized date-time groups for observation times. Can include characters. e.g. timeformat='%m/%d/%Y at %H:%M' Returns: -------- Dictionary of aggregated climatology statistics. Raises: ------- None. """ self._check_geo_param(kwargs) kwargs['type'] = type kwargs['startclim'] = startclim kwargs['endclim'] = endclim kwargs['token'] = self.token return self._get_response('stations/climatology', kwargs)
python
def climate_stats(self, startclim, endclim, type, **kwargs): r""" Returns a dictionary of aggregated yearly climate statistics (count, standard deviation, average, median, maximum, minimum, min time, and max time depending on user specified type) of a time series for a specified range of time at user specified location. Users must specify at least one geographic search parameter ('stid', 'state', 'country', 'county', 'radius', 'bbox', 'cwa', 'nwsfirezone', 'gacc', or 'subgacc') to obtain observation data. Other parameters may also be included. See below mandatory and optional parameters. Also see the metadata() function for station IDs. Arguments: ---------- type: string, mandatory Describes what statistical values will be returned. Can be one of the following values: "avg"/"average"/"mean", "max"/"maximum", "min"/"minimum", "stdev"/"standarddeviation"/"std", "median"/"med", "count", or "all". "All" will return all of the statistics. startclim: string, mandatory Start date in form of MMDDhhmm. MUST BE USED WITH THE ENDCLIM PARAMETER. Default time is UTC e.g. startclim=06011800 Do not specify a year. endclim: string, mandatory End date in form of MMDDhhmm. MUST BE USED WITH THE STARTCLIM PARAMETER. Default time is UTC e.g. endclim=06011800 Do not specify a year. obtimezone: string, optional Set to either UTC or local. Sets timezone of obs. Default is UTC. e.g. obtimezone='local'. showemptystations: string, optional Set to '1' to show stations even if no obs exist that match the time period. Stations without obs are omitted by default. stid: string, optional Single or comma separated list of MesoWest station IDs. e.g. stid='kden,kslc,wbb' county: string, optional County/parish/borough (US/Canada only), full name e.g. county='Larimer' state: string, optional US state, 2-letter ID e.g. state='CO' country: string, optional Single or comma separated list of abbreviated 2 or 3 character countries e.g. country='us,ca,mx' radius: string, optional Distance from a lat/lon pt or stid as [lat,lon,radius (mi)] or [stid, radius (mi)]. e.g. radius="-120,40,20" bbox: string, optional Stations within a [lon/lat] box in the order [lonmin,latmin,lonmax,latmax] e.g. bbox="-120,40,-119,41" cwa: string, optional NWS county warning area. See http://www.nws.noaa.gov/organization.php for CWA list. e.g. cwa='LOX' nwsfirezone: string, optional NWS fire zones. See http://www.nws.noaa.gov/geodata/catalog/wsom/html/firezone.htm for a shapefile containing the full list of zones. e.g. nwsfirezone='LOX241' gacc: string, optional Name of Geographic Area Coordination Center e.g. gacc='EBCC' See http://gacc.nifc.gov/ for a list of GACCs. subgacc: string, optional Name of Sub GACC e.g. subgacc='EB07' vars: string, optional Single or comma separated list of sensor variables. Will return all stations that match one of provided variables. Useful for filtering all stations that sense only certain vars. Do not request vars twice in the query. e.g. vars='wind_speed,pressure' Use the variables function to see a list of sensor vars. units: string, optional String or set of strings and pipes separated by commas. Default is metric units. Set units='ENGLISH' for FREEDOM UNITS ;) Valid other combinations are as follows: temp|C, temp|F, temp|K; speed|mps, speed|mph, speed|kph, speed|kts; pres|pa, pres|mb; height|m, height|ft; precip|mm, precip|cm, precip|in; alti|pa, alti|inhg. e.g. units='temp|F,speed|kph,metric' groupby: string, optional Results can be grouped by key words: state, county, country, cwa, nwszone, mwsfirezone, gacc, subgacc e.g. groupby='state' timeformat: string, optional A python format string for returning customized date-time groups for observation times. Can include characters. e.g. timeformat='%m/%d/%Y at %H:%M' Returns: -------- Dictionary of aggregated climatology statistics. Raises: ------- None. """ self._check_geo_param(kwargs) kwargs['type'] = type kwargs['startclim'] = startclim kwargs['endclim'] = endclim kwargs['token'] = self.token return self._get_response('stations/climatology', kwargs)
r""" Returns a dictionary of aggregated yearly climate statistics (count, standard deviation, average, median, maximum, minimum, min time, and max time depending on user specified type) of a time series for a specified range of time at user specified location. Users must specify at least one geographic search parameter ('stid', 'state', 'country', 'county', 'radius', 'bbox', 'cwa', 'nwsfirezone', 'gacc', or 'subgacc') to obtain observation data. Other parameters may also be included. See below mandatory and optional parameters. Also see the metadata() function for station IDs. Arguments: ---------- type: string, mandatory Describes what statistical values will be returned. Can be one of the following values: "avg"/"average"/"mean", "max"/"maximum", "min"/"minimum", "stdev"/"standarddeviation"/"std", "median"/"med", "count", or "all". "All" will return all of the statistics. startclim: string, mandatory Start date in form of MMDDhhmm. MUST BE USED WITH THE ENDCLIM PARAMETER. Default time is UTC e.g. startclim=06011800 Do not specify a year. endclim: string, mandatory End date in form of MMDDhhmm. MUST BE USED WITH THE STARTCLIM PARAMETER. Default time is UTC e.g. endclim=06011800 Do not specify a year. obtimezone: string, optional Set to either UTC or local. Sets timezone of obs. Default is UTC. e.g. obtimezone='local'. showemptystations: string, optional Set to '1' to show stations even if no obs exist that match the time period. Stations without obs are omitted by default. stid: string, optional Single or comma separated list of MesoWest station IDs. e.g. stid='kden,kslc,wbb' county: string, optional County/parish/borough (US/Canada only), full name e.g. county='Larimer' state: string, optional US state, 2-letter ID e.g. state='CO' country: string, optional Single or comma separated list of abbreviated 2 or 3 character countries e.g. country='us,ca,mx' radius: string, optional Distance from a lat/lon pt or stid as [lat,lon,radius (mi)] or [stid, radius (mi)]. e.g. radius="-120,40,20" bbox: string, optional Stations within a [lon/lat] box in the order [lonmin,latmin,lonmax,latmax] e.g. bbox="-120,40,-119,41" cwa: string, optional NWS county warning area. See http://www.nws.noaa.gov/organization.php for CWA list. e.g. cwa='LOX' nwsfirezone: string, optional NWS fire zones. See http://www.nws.noaa.gov/geodata/catalog/wsom/html/firezone.htm for a shapefile containing the full list of zones. e.g. nwsfirezone='LOX241' gacc: string, optional Name of Geographic Area Coordination Center e.g. gacc='EBCC' See http://gacc.nifc.gov/ for a list of GACCs. subgacc: string, optional Name of Sub GACC e.g. subgacc='EB07' vars: string, optional Single or comma separated list of sensor variables. Will return all stations that match one of provided variables. Useful for filtering all stations that sense only certain vars. Do not request vars twice in the query. e.g. vars='wind_speed,pressure' Use the variables function to see a list of sensor vars. units: string, optional String or set of strings and pipes separated by commas. Default is metric units. Set units='ENGLISH' for FREEDOM UNITS ;) Valid other combinations are as follows: temp|C, temp|F, temp|K; speed|mps, speed|mph, speed|kph, speed|kts; pres|pa, pres|mb; height|m, height|ft; precip|mm, precip|cm, precip|in; alti|pa, alti|inhg. e.g. units='temp|F,speed|kph,metric' groupby: string, optional Results can be grouped by key words: state, county, country, cwa, nwszone, mwsfirezone, gacc, subgacc e.g. groupby='state' timeformat: string, optional A python format string for returning customized date-time groups for observation times. Can include characters. e.g. timeformat='%m/%d/%Y at %H:%M' Returns: -------- Dictionary of aggregated climatology statistics. Raises: ------- None.
https://github.com/mesowx/MesoPy/blob/cd1e837e108ed7a110d81cf789f19afcdd52145b/MesoPy.py#L594-L672
mesowx/MesoPy
MesoPy.py
Meso.time_stats
def time_stats(self, start, end, type, **kwargs): r""" Returns a dictionary of discrete time statistics (count, standard deviation, average, median, maximum, minimum, min time, and max time depending on user specified type) of a time series for a specified range of time at user specified location. Users must specify at least one geographic search parameter ('stid', 'state', 'country', 'county', 'radius', 'bbox', 'cwa', 'nwsfirezone', 'gacc', or 'subgacc') to obtain observation data. Other parameters may also be included. See below mandatory and optional parameters. Also see the metadata() function for station IDs. Arguments: ---------- type: string, mandatory Describes what statistical values will be returned. Can be one of the following values: "avg"/"average"/"mean", "max"/"maximum", "min"/"minimum", "stdev"/"standarddeviation"/"std", "median"/"med", "count", or "all". "All" will return all of the statistics. start: string, optional Start date in form of YYYYMMDDhhmm. MUST BE USED WITH THE END PARAMETER. Default time is UTC e.g. start=201506011800. end: string, optional End date in form of YYYYMMDDhhmm. MUST BE USED WITH THE START PARAMETER. Default time is UTC e.g. end=201506011800. obtimezone: string, optional Set to either UTC or local. Sets timezone of obs. Default is UTC. e.g. obtimezone='local' showemptystations: string, optional Set to '1' to show stations even if no obs exist that match the time period. Stations without obs are omitted by default. stid: string, optional Single or comma separated list of MesoWest station IDs. e.g. stid='kden,kslc,wbb' county: string, optional County/parish/borough (US/Canada only), full name e.g. county='Larimer' state: string, optional US state, 2-letter ID e.g. state='CO' country: string, optional Single or comma separated list of abbreviated 2 or 3 character countries e.g. country='us,ca,mx' radius: list, optional Distance from a lat/lon pt or stid as [lat,lon,radius (mi)] or [stid, radius (mi)]. e.g. radius="-120,40,20" bbox: string, optional Stations within a [lon/lat] box in the order [lonmin,latmin,lonmax,latmax] e.g. bbox="-120,40,-119,41" cwa: string, optional NWS county warning area. See http://www.nws.noaa.gov/organization.php for CWA list. e.g. cwa='LOX' nwsfirezone: string, optional NWS fire zones. See http://www.nws.noaa.gov/geodata/catalog/wsom/html/firezone.htm for a shapefile containing the full list of zones. e.g. nwsfirezone='LOX241' gacc: string, optional Name of Geographic Area Coordination Center e.g. gacc='EBCC' See http://gacc.nifc.gov/ for a list of GACCs. subgacc: string, optional Name of Sub GACC e.g. subgacc='EB07' vars: string, optional Single or comma separated list of sensor variables. Will return all stations that match one of provided variables. Useful for filtering all stations that sense only certain vars. Do not request vars twice in the query. e.g. vars='wind_speed,pressure' Use the variables function to see a list of sensor vars. units: string, optional String or set of strings and pipes separated by commas. Default is metric units. Set units='ENGLISH' for FREEDOM UNITS ;) Valid other combinations are as follows: temp|C, temp|F, temp|K; speed|mps, speed|mph, speed|kph, speed|kts; pres|pa, pres|mb; height|m, height|ft; precip|mm, precip|cm, precip|in; alti|pa, alti|inhg. e.g. units='temp|F,speed|kph,metric' groupby: string, optional Results can be grouped by key words: state, county, country, cwa, nwszone, mwsfirezone, gacc, subgacc e.g. groupby='state' timeformat: string, optional A python format string for returning customized date-time groups for observation times. Can include characters. e.g. timeformat='%m/%d/%Y at %H:%M' Returns: -------- Dictionary of discrete time statistics. Raises: ------- None. """ self._check_geo_param(kwargs) kwargs['type'] = type kwargs['start'] = start kwargs['end'] = end kwargs['token'] = self.token return self._get_response('stations/statistics', kwargs)
python
def time_stats(self, start, end, type, **kwargs): r""" Returns a dictionary of discrete time statistics (count, standard deviation, average, median, maximum, minimum, min time, and max time depending on user specified type) of a time series for a specified range of time at user specified location. Users must specify at least one geographic search parameter ('stid', 'state', 'country', 'county', 'radius', 'bbox', 'cwa', 'nwsfirezone', 'gacc', or 'subgacc') to obtain observation data. Other parameters may also be included. See below mandatory and optional parameters. Also see the metadata() function for station IDs. Arguments: ---------- type: string, mandatory Describes what statistical values will be returned. Can be one of the following values: "avg"/"average"/"mean", "max"/"maximum", "min"/"minimum", "stdev"/"standarddeviation"/"std", "median"/"med", "count", or "all". "All" will return all of the statistics. start: string, optional Start date in form of YYYYMMDDhhmm. MUST BE USED WITH THE END PARAMETER. Default time is UTC e.g. start=201506011800. end: string, optional End date in form of YYYYMMDDhhmm. MUST BE USED WITH THE START PARAMETER. Default time is UTC e.g. end=201506011800. obtimezone: string, optional Set to either UTC or local. Sets timezone of obs. Default is UTC. e.g. obtimezone='local' showemptystations: string, optional Set to '1' to show stations even if no obs exist that match the time period. Stations without obs are omitted by default. stid: string, optional Single or comma separated list of MesoWest station IDs. e.g. stid='kden,kslc,wbb' county: string, optional County/parish/borough (US/Canada only), full name e.g. county='Larimer' state: string, optional US state, 2-letter ID e.g. state='CO' country: string, optional Single or comma separated list of abbreviated 2 or 3 character countries e.g. country='us,ca,mx' radius: list, optional Distance from a lat/lon pt or stid as [lat,lon,radius (mi)] or [stid, radius (mi)]. e.g. radius="-120,40,20" bbox: string, optional Stations within a [lon/lat] box in the order [lonmin,latmin,lonmax,latmax] e.g. bbox="-120,40,-119,41" cwa: string, optional NWS county warning area. See http://www.nws.noaa.gov/organization.php for CWA list. e.g. cwa='LOX' nwsfirezone: string, optional NWS fire zones. See http://www.nws.noaa.gov/geodata/catalog/wsom/html/firezone.htm for a shapefile containing the full list of zones. e.g. nwsfirezone='LOX241' gacc: string, optional Name of Geographic Area Coordination Center e.g. gacc='EBCC' See http://gacc.nifc.gov/ for a list of GACCs. subgacc: string, optional Name of Sub GACC e.g. subgacc='EB07' vars: string, optional Single or comma separated list of sensor variables. Will return all stations that match one of provided variables. Useful for filtering all stations that sense only certain vars. Do not request vars twice in the query. e.g. vars='wind_speed,pressure' Use the variables function to see a list of sensor vars. units: string, optional String or set of strings and pipes separated by commas. Default is metric units. Set units='ENGLISH' for FREEDOM UNITS ;) Valid other combinations are as follows: temp|C, temp|F, temp|K; speed|mps, speed|mph, speed|kph, speed|kts; pres|pa, pres|mb; height|m, height|ft; precip|mm, precip|cm, precip|in; alti|pa, alti|inhg. e.g. units='temp|F,speed|kph,metric' groupby: string, optional Results can be grouped by key words: state, county, country, cwa, nwszone, mwsfirezone, gacc, subgacc e.g. groupby='state' timeformat: string, optional A python format string for returning customized date-time groups for observation times. Can include characters. e.g. timeformat='%m/%d/%Y at %H:%M' Returns: -------- Dictionary of discrete time statistics. Raises: ------- None. """ self._check_geo_param(kwargs) kwargs['type'] = type kwargs['start'] = start kwargs['end'] = end kwargs['token'] = self.token return self._get_response('stations/statistics', kwargs)
r""" Returns a dictionary of discrete time statistics (count, standard deviation, average, median, maximum, minimum, min time, and max time depending on user specified type) of a time series for a specified range of time at user specified location. Users must specify at least one geographic search parameter ('stid', 'state', 'country', 'county', 'radius', 'bbox', 'cwa', 'nwsfirezone', 'gacc', or 'subgacc') to obtain observation data. Other parameters may also be included. See below mandatory and optional parameters. Also see the metadata() function for station IDs. Arguments: ---------- type: string, mandatory Describes what statistical values will be returned. Can be one of the following values: "avg"/"average"/"mean", "max"/"maximum", "min"/"minimum", "stdev"/"standarddeviation"/"std", "median"/"med", "count", or "all". "All" will return all of the statistics. start: string, optional Start date in form of YYYYMMDDhhmm. MUST BE USED WITH THE END PARAMETER. Default time is UTC e.g. start=201506011800. end: string, optional End date in form of YYYYMMDDhhmm. MUST BE USED WITH THE START PARAMETER. Default time is UTC e.g. end=201506011800. obtimezone: string, optional Set to either UTC or local. Sets timezone of obs. Default is UTC. e.g. obtimezone='local' showemptystations: string, optional Set to '1' to show stations even if no obs exist that match the time period. Stations without obs are omitted by default. stid: string, optional Single or comma separated list of MesoWest station IDs. e.g. stid='kden,kslc,wbb' county: string, optional County/parish/borough (US/Canada only), full name e.g. county='Larimer' state: string, optional US state, 2-letter ID e.g. state='CO' country: string, optional Single or comma separated list of abbreviated 2 or 3 character countries e.g. country='us,ca,mx' radius: list, optional Distance from a lat/lon pt or stid as [lat,lon,radius (mi)] or [stid, radius (mi)]. e.g. radius="-120,40,20" bbox: string, optional Stations within a [lon/lat] box in the order [lonmin,latmin,lonmax,latmax] e.g. bbox="-120,40,-119,41" cwa: string, optional NWS county warning area. See http://www.nws.noaa.gov/organization.php for CWA list. e.g. cwa='LOX' nwsfirezone: string, optional NWS fire zones. See http://www.nws.noaa.gov/geodata/catalog/wsom/html/firezone.htm for a shapefile containing the full list of zones. e.g. nwsfirezone='LOX241' gacc: string, optional Name of Geographic Area Coordination Center e.g. gacc='EBCC' See http://gacc.nifc.gov/ for a list of GACCs. subgacc: string, optional Name of Sub GACC e.g. subgacc='EB07' vars: string, optional Single or comma separated list of sensor variables. Will return all stations that match one of provided variables. Useful for filtering all stations that sense only certain vars. Do not request vars twice in the query. e.g. vars='wind_speed,pressure' Use the variables function to see a list of sensor vars. units: string, optional String or set of strings and pipes separated by commas. Default is metric units. Set units='ENGLISH' for FREEDOM UNITS ;) Valid other combinations are as follows: temp|C, temp|F, temp|K; speed|mps, speed|mph, speed|kph, speed|kts; pres|pa, pres|mb; height|m, height|ft; precip|mm, precip|cm, precip|in; alti|pa, alti|inhg. e.g. units='temp|F,speed|kph,metric' groupby: string, optional Results can be grouped by key words: state, county, country, cwa, nwszone, mwsfirezone, gacc, subgacc e.g. groupby='state' timeformat: string, optional A python format string for returning customized date-time groups for observation times. Can include characters. e.g. timeformat='%m/%d/%Y at %H:%M' Returns: -------- Dictionary of discrete time statistics. Raises: ------- None.
https://github.com/mesowx/MesoPy/blob/cd1e837e108ed7a110d81cf789f19afcdd52145b/MesoPy.py#L674-L752
mesowx/MesoPy
MesoPy.py
Meso.metadata
def metadata(self, **kwargs): r""" Returns the metadata for a station or stations. Users must specify at least one geographic search parameter ('stid', 'state', 'country', 'county', 'radius', 'bbox', 'cwa', 'nwsfirezone', 'gacc', or 'subgacc') to obtain observation data. Other parameters may also be included. See below for optional parameters. Arguments: ---------- complete: string, optional A value of 1 or 0. When set to 1, an extended list of metadata attributes for each returned station is provided. This result is useful for exploring the zones and regions in which a station resides. e.g. complete='1' sensorvars: string, optional A value of 1 or 0. When set to 1, a complete history of sensor variables and period of record is given for each station. e.g. sensorvars='1' obrange: string, optional Filters metadata for stations which were in operation for a specified time period. Users can specify one date or a date range. Dates are in the format of YYYYmmdd. e.g. obrange='20150101', obrange='20040101,20060101' obtimezone: string, optional Set to either UTC or local. Sets timezone of obs. Default is UTC. e.g. obtimezone='local' stid: string, optional Single or comma separated list of MesoWest station IDs. e.g. stid='kden,kslc,wbb' county: string, optional County/parish/borough (US/Canada only), full name e.g. county='Larimer' state: string, optional US state, 2-letter ID e.g. state='CO' country: string, optional Single or comma separated list of abbreviated 2 or 3 character countries e.g. country='us,ca,mx' radius: string, optional Distance from a lat/lon pt or stid as [lat,lon,radius (mi)] or [stid, radius (mi)]. e.g. radius="-120,40,20" bbox: string, optional Stations within a [lon/lat] box in the order [lonmin,latmin,lonmax,latmax] e.g. bbox="-120,40,-119,41" cwa: string, optional NWS county warning area. See http://www.nws.noaa.gov/organization.php for CWA list. e.g. cwa='LOX' nwsfirezone: string, optional NWS fire zones. See http://www.nws.noaa.gov/geodata/catalog/wsom/html/firezone.htm for a shapefile containing the full list of zones. e.g. nwsfirezone='LOX241' gacc: string, optional Name of Geographic Area Coordination Center e.g. gacc='EBCC' See http://gacc.nifc.gov/ for a list of GACCs. subgacc: string, optional Name of Sub GACC e.g. subgacc='EB07' vars: string, optional Single or comma separated list of sensor variables. Will return all stations that match one of provided variables. Useful for filtering all stations that sense only certain vars. Do not request vars twice in the query. e.g. vars='wind_speed,pressure' Use the variables function to see a list of sensor vars. status: string, optional A value of either active or inactive returns stations currently set as active or inactive in the archive. Omitting this param returns all stations. e.g. status='active' units: string, optional String or set of strings and pipes separated by commas. Default is metric units. Set units='ENGLISH' for FREEDOM UNITS ;) Valid other combinations are as follows: temp|C, temp|F, temp|K; speed|mps, speed|mph, speed|kph, speed|kts; pres|pa, pres|mb; height|m, height|ft; precip|mm, precip|cm, precip|in; alti|pa, alti|inhg. e.g. units='temp|F,speed|kph,metric' groupby: string, optional Results can be grouped by key words: state, county, country, cwa, nwszone, mwsfirezone, gacc, subgacc e.g. groupby='state' timeformat: string, optional A python format string for returning customized date-time groups for observation times. Can include characters. e.g. timeformat='%m/%d/%Y at %H:%M' Returns: -------- A dictionary of metadata. Raises: ------- None. """ self._check_geo_param(kwargs) kwargs['token'] = self.token return self._get_response('stations/metadata', kwargs)
python
def metadata(self, **kwargs): r""" Returns the metadata for a station or stations. Users must specify at least one geographic search parameter ('stid', 'state', 'country', 'county', 'radius', 'bbox', 'cwa', 'nwsfirezone', 'gacc', or 'subgacc') to obtain observation data. Other parameters may also be included. See below for optional parameters. Arguments: ---------- complete: string, optional A value of 1 or 0. When set to 1, an extended list of metadata attributes for each returned station is provided. This result is useful for exploring the zones and regions in which a station resides. e.g. complete='1' sensorvars: string, optional A value of 1 or 0. When set to 1, a complete history of sensor variables and period of record is given for each station. e.g. sensorvars='1' obrange: string, optional Filters metadata for stations which were in operation for a specified time period. Users can specify one date or a date range. Dates are in the format of YYYYmmdd. e.g. obrange='20150101', obrange='20040101,20060101' obtimezone: string, optional Set to either UTC or local. Sets timezone of obs. Default is UTC. e.g. obtimezone='local' stid: string, optional Single or comma separated list of MesoWest station IDs. e.g. stid='kden,kslc,wbb' county: string, optional County/parish/borough (US/Canada only), full name e.g. county='Larimer' state: string, optional US state, 2-letter ID e.g. state='CO' country: string, optional Single or comma separated list of abbreviated 2 or 3 character countries e.g. country='us,ca,mx' radius: string, optional Distance from a lat/lon pt or stid as [lat,lon,radius (mi)] or [stid, radius (mi)]. e.g. radius="-120,40,20" bbox: string, optional Stations within a [lon/lat] box in the order [lonmin,latmin,lonmax,latmax] e.g. bbox="-120,40,-119,41" cwa: string, optional NWS county warning area. See http://www.nws.noaa.gov/organization.php for CWA list. e.g. cwa='LOX' nwsfirezone: string, optional NWS fire zones. See http://www.nws.noaa.gov/geodata/catalog/wsom/html/firezone.htm for a shapefile containing the full list of zones. e.g. nwsfirezone='LOX241' gacc: string, optional Name of Geographic Area Coordination Center e.g. gacc='EBCC' See http://gacc.nifc.gov/ for a list of GACCs. subgacc: string, optional Name of Sub GACC e.g. subgacc='EB07' vars: string, optional Single or comma separated list of sensor variables. Will return all stations that match one of provided variables. Useful for filtering all stations that sense only certain vars. Do not request vars twice in the query. e.g. vars='wind_speed,pressure' Use the variables function to see a list of sensor vars. status: string, optional A value of either active or inactive returns stations currently set as active or inactive in the archive. Omitting this param returns all stations. e.g. status='active' units: string, optional String or set of strings and pipes separated by commas. Default is metric units. Set units='ENGLISH' for FREEDOM UNITS ;) Valid other combinations are as follows: temp|C, temp|F, temp|K; speed|mps, speed|mph, speed|kph, speed|kts; pres|pa, pres|mb; height|m, height|ft; precip|mm, precip|cm, precip|in; alti|pa, alti|inhg. e.g. units='temp|F,speed|kph,metric' groupby: string, optional Results can be grouped by key words: state, county, country, cwa, nwszone, mwsfirezone, gacc, subgacc e.g. groupby='state' timeformat: string, optional A python format string for returning customized date-time groups for observation times. Can include characters. e.g. timeformat='%m/%d/%Y at %H:%M' Returns: -------- A dictionary of metadata. Raises: ------- None. """ self._check_geo_param(kwargs) kwargs['token'] = self.token return self._get_response('stations/metadata', kwargs)
r""" Returns the metadata for a station or stations. Users must specify at least one geographic search parameter ('stid', 'state', 'country', 'county', 'radius', 'bbox', 'cwa', 'nwsfirezone', 'gacc', or 'subgacc') to obtain observation data. Other parameters may also be included. See below for optional parameters. Arguments: ---------- complete: string, optional A value of 1 or 0. When set to 1, an extended list of metadata attributes for each returned station is provided. This result is useful for exploring the zones and regions in which a station resides. e.g. complete='1' sensorvars: string, optional A value of 1 or 0. When set to 1, a complete history of sensor variables and period of record is given for each station. e.g. sensorvars='1' obrange: string, optional Filters metadata for stations which were in operation for a specified time period. Users can specify one date or a date range. Dates are in the format of YYYYmmdd. e.g. obrange='20150101', obrange='20040101,20060101' obtimezone: string, optional Set to either UTC or local. Sets timezone of obs. Default is UTC. e.g. obtimezone='local' stid: string, optional Single or comma separated list of MesoWest station IDs. e.g. stid='kden,kslc,wbb' county: string, optional County/parish/borough (US/Canada only), full name e.g. county='Larimer' state: string, optional US state, 2-letter ID e.g. state='CO' country: string, optional Single or comma separated list of abbreviated 2 or 3 character countries e.g. country='us,ca,mx' radius: string, optional Distance from a lat/lon pt or stid as [lat,lon,radius (mi)] or [stid, radius (mi)]. e.g. radius="-120,40,20" bbox: string, optional Stations within a [lon/lat] box in the order [lonmin,latmin,lonmax,latmax] e.g. bbox="-120,40,-119,41" cwa: string, optional NWS county warning area. See http://www.nws.noaa.gov/organization.php for CWA list. e.g. cwa='LOX' nwsfirezone: string, optional NWS fire zones. See http://www.nws.noaa.gov/geodata/catalog/wsom/html/firezone.htm for a shapefile containing the full list of zones. e.g. nwsfirezone='LOX241' gacc: string, optional Name of Geographic Area Coordination Center e.g. gacc='EBCC' See http://gacc.nifc.gov/ for a list of GACCs. subgacc: string, optional Name of Sub GACC e.g. subgacc='EB07' vars: string, optional Single or comma separated list of sensor variables. Will return all stations that match one of provided variables. Useful for filtering all stations that sense only certain vars. Do not request vars twice in the query. e.g. vars='wind_speed,pressure' Use the variables function to see a list of sensor vars. status: string, optional A value of either active or inactive returns stations currently set as active or inactive in the archive. Omitting this param returns all stations. e.g. status='active' units: string, optional String or set of strings and pipes separated by commas. Default is metric units. Set units='ENGLISH' for FREEDOM UNITS ;) Valid other combinations are as follows: temp|C, temp|F, temp|K; speed|mps, speed|mph, speed|kph, speed|kts; pres|pa, pres|mb; height|m, height|ft; precip|mm, precip|cm, precip|in; alti|pa, alti|inhg. e.g. units='temp|F,speed|kph,metric' groupby: string, optional Results can be grouped by key words: state, county, country, cwa, nwszone, mwsfirezone, gacc, subgacc e.g. groupby='state' timeformat: string, optional A python format string for returning customized date-time groups for observation times. Can include characters. e.g. timeformat='%m/%d/%Y at %H:%M' Returns: -------- A dictionary of metadata. Raises: ------- None.
https://github.com/mesowx/MesoPy/blob/cd1e837e108ed7a110d81cf789f19afcdd52145b/MesoPy.py#L754-L827
mesowx/MesoPy
MesoPy.py
Meso.latency
def latency(self, start, end, **kwargs): r""" Returns data latency values for a station based on a start and end date/time. Users must specify at least one geographic search parameter ('stid', 'state', 'country', 'county', 'radius', 'bbox', 'cwa', 'nwsfirezone', 'gacc', or 'subgacc') to obtain observation data. Other parameters may also be included. See below mandatory and optional parameters. Also see the metadata() function for station IDs. Arguments: ---------- start: string, mandatory Start date in form of YYYYMMDDhhmm. MUST BE USED WITH THE END PARAMETER. Default time is UTC e.g. start='201506011800' end: string, mandatory End date in form of YYYYMMDDhhmm. MUST BE USED WITH THE START PARAMETER. Default time is UTC e.g. end='201506011800' stats: string, optional Describes what statistical values will be returned. Can be one of the following values: "avg"/"average"/"mean", "max"/"maximum", "min"/"minimum", "stdev"/"standarddeviation"/"std", "median"/"med", "count", or "all". "All" will return all of the statistics. e.g. stats='avg' obtimezone: string, optional Set to either UTC or local. Sets timezone of obs. Default is UTC. e.g. obtimezone='local' stid: string, optional Single or comma separated list of MesoWest station IDs. e.g. stid='kden,kslc,wbb' county: string, optional County/parish/borough (US/Canada only), full name e.g. county='Larimer' state: string, optional US state, 2-letter ID e.g. state='CO' country: string, optional Single or comma separated list of abbreviated 2 or 3 character countries e.g. country='us,ca,mx' radius: list, optional Distance from a lat/lon pt or stid as [lat,lon,radius (mi)] or [stid, radius (mi)]. e.g. radius="-120,40,20" bbox: string, optional Stations within a [lon/lat] box in the order [lonmin,latmin,lonmax,latmax] e.g. bbox="-120,40,-119,41" cwa: string, optional NWS county warning area. See http://www.nws.noaa.gov/organization.php for CWA list. e.g. cwa='LOX' nwsfirezone: string, optional NWS fire zones. See http://www.nws.noaa.gov/geodata/catalog/wsom/html/firezone.htm for a shapefile containing the full list of zones. e.g. nwsfirezone='LOX241' gacc: string, optional Name of Geographic Area Coordination Center e.g. gacc='EBCC' See http://gacc.nifc.gov/ for a list of GACCs. subgacc: string, optional Name of Sub GACC e.g. subgacc='EB07' vars: string, optional Single or comma separated list of sensor variables. Will return all stations that match one of provided variables. Useful for filtering all stations that sense only certain vars. Do not request vars twice in the query. e.g. vars='wind_speed,pressure' Use the variables() function to see a list of sensor vars. units: string, optional String or set of strings and pipes separated by commas. Default is metric units. Set units='ENGLISH' for FREEDOM UNITS ;) Valid other combinations are as follows: temp|C, temp|F, temp|K; speed|mps, speed|mph, speed|kph, speed|kts; pres|pa, pres|mb; height|m, height|ft; precip|mm, precip|cm, precip|in; alti|pa, alti|inhg. e.g. units='temp|F,speed|kph,metric' groupby: string, optional Results can be grouped by key words: state, county, country, cwa, nwszone, mwsfirezone, gacc, subgacc e.g. groupby='state' timeformat: string, optional A python format string for returning customized date-time groups for observation times. Can include characters. e.g. timeformat='%m/%d/%Y at %H:%M' Returns: -------- Dictionary of latency data. Raises: ------- None. """ self._check_geo_param(kwargs) kwargs['start'] = start kwargs['end'] = end kwargs['token'] = self.token return self._get_response('stations/latency', kwargs)
python
def latency(self, start, end, **kwargs): r""" Returns data latency values for a station based on a start and end date/time. Users must specify at least one geographic search parameter ('stid', 'state', 'country', 'county', 'radius', 'bbox', 'cwa', 'nwsfirezone', 'gacc', or 'subgacc') to obtain observation data. Other parameters may also be included. See below mandatory and optional parameters. Also see the metadata() function for station IDs. Arguments: ---------- start: string, mandatory Start date in form of YYYYMMDDhhmm. MUST BE USED WITH THE END PARAMETER. Default time is UTC e.g. start='201506011800' end: string, mandatory End date in form of YYYYMMDDhhmm. MUST BE USED WITH THE START PARAMETER. Default time is UTC e.g. end='201506011800' stats: string, optional Describes what statistical values will be returned. Can be one of the following values: "avg"/"average"/"mean", "max"/"maximum", "min"/"minimum", "stdev"/"standarddeviation"/"std", "median"/"med", "count", or "all". "All" will return all of the statistics. e.g. stats='avg' obtimezone: string, optional Set to either UTC or local. Sets timezone of obs. Default is UTC. e.g. obtimezone='local' stid: string, optional Single or comma separated list of MesoWest station IDs. e.g. stid='kden,kslc,wbb' county: string, optional County/parish/borough (US/Canada only), full name e.g. county='Larimer' state: string, optional US state, 2-letter ID e.g. state='CO' country: string, optional Single or comma separated list of abbreviated 2 or 3 character countries e.g. country='us,ca,mx' radius: list, optional Distance from a lat/lon pt or stid as [lat,lon,radius (mi)] or [stid, radius (mi)]. e.g. radius="-120,40,20" bbox: string, optional Stations within a [lon/lat] box in the order [lonmin,latmin,lonmax,latmax] e.g. bbox="-120,40,-119,41" cwa: string, optional NWS county warning area. See http://www.nws.noaa.gov/organization.php for CWA list. e.g. cwa='LOX' nwsfirezone: string, optional NWS fire zones. See http://www.nws.noaa.gov/geodata/catalog/wsom/html/firezone.htm for a shapefile containing the full list of zones. e.g. nwsfirezone='LOX241' gacc: string, optional Name of Geographic Area Coordination Center e.g. gacc='EBCC' See http://gacc.nifc.gov/ for a list of GACCs. subgacc: string, optional Name of Sub GACC e.g. subgacc='EB07' vars: string, optional Single or comma separated list of sensor variables. Will return all stations that match one of provided variables. Useful for filtering all stations that sense only certain vars. Do not request vars twice in the query. e.g. vars='wind_speed,pressure' Use the variables() function to see a list of sensor vars. units: string, optional String or set of strings and pipes separated by commas. Default is metric units. Set units='ENGLISH' for FREEDOM UNITS ;) Valid other combinations are as follows: temp|C, temp|F, temp|K; speed|mps, speed|mph, speed|kph, speed|kts; pres|pa, pres|mb; height|m, height|ft; precip|mm, precip|cm, precip|in; alti|pa, alti|inhg. e.g. units='temp|F,speed|kph,metric' groupby: string, optional Results can be grouped by key words: state, county, country, cwa, nwszone, mwsfirezone, gacc, subgacc e.g. groupby='state' timeformat: string, optional A python format string for returning customized date-time groups for observation times. Can include characters. e.g. timeformat='%m/%d/%Y at %H:%M' Returns: -------- Dictionary of latency data. Raises: ------- None. """ self._check_geo_param(kwargs) kwargs['start'] = start kwargs['end'] = end kwargs['token'] = self.token return self._get_response('stations/latency', kwargs)
r""" Returns data latency values for a station based on a start and end date/time. Users must specify at least one geographic search parameter ('stid', 'state', 'country', 'county', 'radius', 'bbox', 'cwa', 'nwsfirezone', 'gacc', or 'subgacc') to obtain observation data. Other parameters may also be included. See below mandatory and optional parameters. Also see the metadata() function for station IDs. Arguments: ---------- start: string, mandatory Start date in form of YYYYMMDDhhmm. MUST BE USED WITH THE END PARAMETER. Default time is UTC e.g. start='201506011800' end: string, mandatory End date in form of YYYYMMDDhhmm. MUST BE USED WITH THE START PARAMETER. Default time is UTC e.g. end='201506011800' stats: string, optional Describes what statistical values will be returned. Can be one of the following values: "avg"/"average"/"mean", "max"/"maximum", "min"/"minimum", "stdev"/"standarddeviation"/"std", "median"/"med", "count", or "all". "All" will return all of the statistics. e.g. stats='avg' obtimezone: string, optional Set to either UTC or local. Sets timezone of obs. Default is UTC. e.g. obtimezone='local' stid: string, optional Single or comma separated list of MesoWest station IDs. e.g. stid='kden,kslc,wbb' county: string, optional County/parish/borough (US/Canada only), full name e.g. county='Larimer' state: string, optional US state, 2-letter ID e.g. state='CO' country: string, optional Single or comma separated list of abbreviated 2 or 3 character countries e.g. country='us,ca,mx' radius: list, optional Distance from a lat/lon pt or stid as [lat,lon,radius (mi)] or [stid, radius (mi)]. e.g. radius="-120,40,20" bbox: string, optional Stations within a [lon/lat] box in the order [lonmin,latmin,lonmax,latmax] e.g. bbox="-120,40,-119,41" cwa: string, optional NWS county warning area. See http://www.nws.noaa.gov/organization.php for CWA list. e.g. cwa='LOX' nwsfirezone: string, optional NWS fire zones. See http://www.nws.noaa.gov/geodata/catalog/wsom/html/firezone.htm for a shapefile containing the full list of zones. e.g. nwsfirezone='LOX241' gacc: string, optional Name of Geographic Area Coordination Center e.g. gacc='EBCC' See http://gacc.nifc.gov/ for a list of GACCs. subgacc: string, optional Name of Sub GACC e.g. subgacc='EB07' vars: string, optional Single or comma separated list of sensor variables. Will return all stations that match one of provided variables. Useful for filtering all stations that sense only certain vars. Do not request vars twice in the query. e.g. vars='wind_speed,pressure' Use the variables() function to see a list of sensor vars. units: string, optional String or set of strings and pipes separated by commas. Default is metric units. Set units='ENGLISH' for FREEDOM UNITS ;) Valid other combinations are as follows: temp|C, temp|F, temp|K; speed|mps, speed|mph, speed|kph, speed|kts; pres|pa, pres|mb; height|m, height|ft; precip|mm, precip|cm, precip|in; alti|pa, alti|inhg. e.g. units='temp|F,speed|kph,metric' groupby: string, optional Results can be grouped by key words: state, county, country, cwa, nwszone, mwsfirezone, gacc, subgacc e.g. groupby='state' timeformat: string, optional A python format string for returning customized date-time groups for observation times. Can include characters. e.g. timeformat='%m/%d/%Y at %H:%M' Returns: -------- Dictionary of latency data. Raises: ------- None.
https://github.com/mesowx/MesoPy/blob/cd1e837e108ed7a110d81cf789f19afcdd52145b/MesoPy.py#L829-L901
sdispater/backpack
backpack/collections/base_collection.py
BaseCollection.avg
def avg(self, key=None): """ Get the average value of a given key. :param key: The key to get the average for :type key: mixed :rtype: float or int """ count = self.count() if count: return self.sum(key) / count
python
def avg(self, key=None): """ Get the average value of a given key. :param key: The key to get the average for :type key: mixed :rtype: float or int """ count = self.count() if count: return self.sum(key) / count
Get the average value of a given key. :param key: The key to get the average for :type key: mixed :rtype: float or int
https://github.com/sdispater/backpack/blob/764e7f79fd2b1c1ac4883d8e5c9da5c65dfc875e/backpack/collections/base_collection.py#L59-L71
sdispater/backpack
backpack/collections/base_collection.py
BaseCollection.chunk
def chunk(self, size): """ Chunk the underlying collection. :param size: The chunk size :type size: int :rtype: Collection """ chunks = self._chunk(size) return self.__class__(list(map(self.__class__, chunks)))
python
def chunk(self, size): """ Chunk the underlying collection. :param size: The chunk size :type size: int :rtype: Collection """ chunks = self._chunk(size) return self.__class__(list(map(self.__class__, chunks)))
Chunk the underlying collection. :param size: The chunk size :type size: int :rtype: Collection
https://github.com/sdispater/backpack/blob/764e7f79fd2b1c1ac4883d8e5c9da5c65dfc875e/backpack/collections/base_collection.py#L73-L84
sdispater/backpack
backpack/collections/base_collection.py
BaseCollection._chunk
def _chunk(self, size): """ Chunk the underlying collection. :param size: The chunk size :type size: int :rtype: Collection """ items = self.items return [items[i:i + size] for i in range(0, len(items), size)]
python
def _chunk(self, size): """ Chunk the underlying collection. :param size: The chunk size :type size: int :rtype: Collection """ items = self.items return [items[i:i + size] for i in range(0, len(items), size)]
Chunk the underlying collection. :param size: The chunk size :type size: int :rtype: Collection
https://github.com/sdispater/backpack/blob/764e7f79fd2b1c1ac4883d8e5c9da5c65dfc875e/backpack/collections/base_collection.py#L86-L97
sdispater/backpack
backpack/collections/base_collection.py
BaseCollection.contains
def contains(self, key, value=None): """ Determine if an element is in the collection :param key: The element :type key: int or str or callable :param value: The value of the element :type value: mixed :return: Whether the element is in the collection :rtype: bool """ if value is not None: return self.contains(lambda x: data_get(x, key) == value) if self._use_as_callable(key): return self.first(key) is not None return key in self.items
python
def contains(self, key, value=None): """ Determine if an element is in the collection :param key: The element :type key: int or str or callable :param value: The value of the element :type value: mixed :return: Whether the element is in the collection :rtype: bool """ if value is not None: return self.contains(lambda x: data_get(x, key) == value) if self._use_as_callable(key): return self.first(key) is not None return key in self.items
Determine if an element is in the collection :param key: The element :type key: int or str or callable :param value: The value of the element :type value: mixed :return: Whether the element is in the collection :rtype: bool
https://github.com/sdispater/backpack/blob/764e7f79fd2b1c1ac4883d8e5c9da5c65dfc875e/backpack/collections/base_collection.py#L102-L121
sdispater/backpack
backpack/collections/base_collection.py
BaseCollection.collapse
def collapse(self): """ Collapse the collection items into a single element (list) :return: A new Collection instance with collapsed items :rtype: Collection """ results = [] items = self.items for values in items: if isinstance(values, BaseCollection): values = values.all() results += values return self.__class__(results)
python
def collapse(self): """ Collapse the collection items into a single element (list) :return: A new Collection instance with collapsed items :rtype: Collection """ results = [] items = self.items for values in items: if isinstance(values, BaseCollection): values = values.all() results += values return self.__class__(results)
Collapse the collection items into a single element (list) :return: A new Collection instance with collapsed items :rtype: Collection
https://github.com/sdispater/backpack/blob/764e7f79fd2b1c1ac4883d8e5c9da5c65dfc875e/backpack/collections/base_collection.py#L123-L140
sdispater/backpack
backpack/collections/base_collection.py
BaseCollection.diff
def diff(self, items): """ Diff the collections with the given items :param items: The items to diff with :type items: mixed :return: A Collection instance :rtype: Collection """ return self.__class__([i for i in self.items if i not in items])
python
def diff(self, items): """ Diff the collections with the given items :param items: The items to diff with :type items: mixed :return: A Collection instance :rtype: Collection """ return self.__class__([i for i in self.items if i not in items])
Diff the collections with the given items :param items: The items to diff with :type items: mixed :return: A Collection instance :rtype: Collection
https://github.com/sdispater/backpack/blob/764e7f79fd2b1c1ac4883d8e5c9da5c65dfc875e/backpack/collections/base_collection.py#L145-L155
sdispater/backpack
backpack/collections/base_collection.py
BaseCollection.each
def each(self, callback): """ Execute a callback over each item. .. code:: collection = Collection([1, 2, 3]) collection.each(lambda x: x + 3) .. warning:: It only applies the callback but does not modify the collection's items. Use the `transform() <#backpack.Collection.transform>`_ method to modify the collection. :param callback: The callback to execute :type callback: callable :rtype: Collection """ items = self.items for item in items: if callback(item) is False: break return self
python
def each(self, callback): """ Execute a callback over each item. .. code:: collection = Collection([1, 2, 3]) collection.each(lambda x: x + 3) .. warning:: It only applies the callback but does not modify the collection's items. Use the `transform() <#backpack.Collection.transform>`_ method to modify the collection. :param callback: The callback to execute :type callback: callable :rtype: Collection """ items = self.items for item in items: if callback(item) is False: break return self
Execute a callback over each item. .. code:: collection = Collection([1, 2, 3]) collection.each(lambda x: x + 3) .. warning:: It only applies the callback but does not modify the collection's items. Use the `transform() <#backpack.Collection.transform>`_ method to modify the collection. :param callback: The callback to execute :type callback: callable :rtype: Collection
https://github.com/sdispater/backpack/blob/764e7f79fd2b1c1ac4883d8e5c9da5c65dfc875e/backpack/collections/base_collection.py#L157-L183
sdispater/backpack
backpack/collections/base_collection.py
BaseCollection.every
def every(self, step, offset=0): """ Create a new collection consisting of every n-th element. :param step: The step size :type step: int :param offset: The start offset :type offset: int :rtype: Collection """ new = [] for position, item in enumerate(self.items): if position % step == offset: new.append(item) return self.__class__(new)
python
def every(self, step, offset=0): """ Create a new collection consisting of every n-th element. :param step: The step size :type step: int :param offset: The start offset :type offset: int :rtype: Collection """ new = [] for position, item in enumerate(self.items): if position % step == offset: new.append(item) return self.__class__(new)
Create a new collection consisting of every n-th element. :param step: The step size :type step: int :param offset: The start offset :type offset: int :rtype: Collection
https://github.com/sdispater/backpack/blob/764e7f79fd2b1c1ac4883d8e5c9da5c65dfc875e/backpack/collections/base_collection.py#L185-L203
sdispater/backpack
backpack/collections/base_collection.py
BaseCollection.without
def without(self, *keys): """ Get all items except for those with the specified keys. :param keys: The keys to remove :type keys: tuple :rtype: Collection """ items = copy(self.items) keys = reversed(sorted(keys)) for key in keys: del items[key] return self.__class__(items)
python
def without(self, *keys): """ Get all items except for those with the specified keys. :param keys: The keys to remove :type keys: tuple :rtype: Collection """ items = copy(self.items) keys = reversed(sorted(keys)) for key in keys: del items[key] return self.__class__(items)
Get all items except for those with the specified keys. :param keys: The keys to remove :type keys: tuple :rtype: Collection
https://github.com/sdispater/backpack/blob/764e7f79fd2b1c1ac4883d8e5c9da5c65dfc875e/backpack/collections/base_collection.py#L205-L221
sdispater/backpack
backpack/collections/base_collection.py
BaseCollection.only
def only(self, *keys): """ Get the items with the specified keys. :param keys: The keys to keep :type keys: tuple :rtype: Collection """ items = [] for key, value in enumerate(self.items): if key in keys: items.append(value) return self.__class__(items)
python
def only(self, *keys): """ Get the items with the specified keys. :param keys: The keys to keep :type keys: tuple :rtype: Collection """ items = [] for key, value in enumerate(self.items): if key in keys: items.append(value) return self.__class__(items)
Get the items with the specified keys. :param keys: The keys to keep :type keys: tuple :rtype: Collection
https://github.com/sdispater/backpack/blob/764e7f79fd2b1c1ac4883d8e5c9da5c65dfc875e/backpack/collections/base_collection.py#L223-L238
sdispater/backpack
backpack/collections/base_collection.py
BaseCollection.filter
def filter(self, callback=None): """ Run a filter over each of the items. :param callback: The filter callback :type callback: callable or None :rtype: Collection """ if callback: return self.__class__(list(filter(callback, self.items))) return self.__class__(list(filter(None, self.items)))
python
def filter(self, callback=None): """ Run a filter over each of the items. :param callback: The filter callback :type callback: callable or None :rtype: Collection """ if callback: return self.__class__(list(filter(callback, self.items))) return self.__class__(list(filter(None, self.items)))
Run a filter over each of the items. :param callback: The filter callback :type callback: callable or None :rtype: Collection
https://github.com/sdispater/backpack/blob/764e7f79fd2b1c1ac4883d8e5c9da5c65dfc875e/backpack/collections/base_collection.py#L240-L252
sdispater/backpack
backpack/collections/base_collection.py
BaseCollection.where
def where(self, key, value): """ Filter items by the given key value pair. :param key: The key to filter by :type key: str :param value: The value to filter by :type value: mixed :rtype: Collection """ return self.filter(lambda item: data_get(item, key) == value)
python
def where(self, key, value): """ Filter items by the given key value pair. :param key: The key to filter by :type key: str :param value: The value to filter by :type value: mixed :rtype: Collection """ return self.filter(lambda item: data_get(item, key) == value)
Filter items by the given key value pair. :param key: The key to filter by :type key: str :param value: The value to filter by :type value: mixed :rtype: Collection
https://github.com/sdispater/backpack/blob/764e7f79fd2b1c1ac4883d8e5c9da5c65dfc875e/backpack/collections/base_collection.py#L254-L266
sdispater/backpack
backpack/collections/base_collection.py
BaseCollection.first
def first(self, callback=None, default=None): """ Get the first item of the collection. :param default: The default value :type default: mixed """ if callback is not None: for val in self.items: if callback(val): return val return value(default) if len(self.items) > 0: return self.items[0] else: return default
python
def first(self, callback=None, default=None): """ Get the first item of the collection. :param default: The default value :type default: mixed """ if callback is not None: for val in self.items: if callback(val): return val return value(default) if len(self.items) > 0: return self.items[0] else: return default
Get the first item of the collection. :param default: The default value :type default: mixed
https://github.com/sdispater/backpack/blob/764e7f79fd2b1c1ac4883d8e5c9da5c65dfc875e/backpack/collections/base_collection.py#L268-L285
sdispater/backpack
backpack/collections/base_collection.py
BaseCollection.flatten
def flatten(self): """ Get a flattened list of the items in the collection. :rtype: Collection """ def _flatten(d): if isinstance(d, dict): for v in d.values(): for nested_v in _flatten(v): yield nested_v elif isinstance(d, list): for list_v in d: for nested_v in _flatten(list_v): yield nested_v else: yield d return self.__class__(list(_flatten(self.items)))
python
def flatten(self): """ Get a flattened list of the items in the collection. :rtype: Collection """ def _flatten(d): if isinstance(d, dict): for v in d.values(): for nested_v in _flatten(v): yield nested_v elif isinstance(d, list): for list_v in d: for nested_v in _flatten(list_v): yield nested_v else: yield d return self.__class__(list(_flatten(self.items)))
Get a flattened list of the items in the collection. :rtype: Collection
https://github.com/sdispater/backpack/blob/764e7f79fd2b1c1ac4883d8e5c9da5c65dfc875e/backpack/collections/base_collection.py#L287-L306