sentence1
stringlengths
52
3.87M
sentence2
stringlengths
1
47.2k
label
stringclasses
1 value
def styleToDict(styleStr): ''' getStyleDict - Gets a dictionary of style attribute/value pairs. NOTE: dash-names (like padding-top) are used here @return - OrderedDict of "style" attribute. ''' styleStr = styleStr.strip() styles = styleStr.split(';') # Won't work for strings containing semicolon.. styleDict = OrderedDict() for item in styles: try: splitIdx = item.index(':') name = item[:splitIdx].strip().lower() value = item[splitIdx+1:].strip() styleDict[name] = value except: continue return styleDict
getStyleDict - Gets a dictionary of style attribute/value pairs. NOTE: dash-names (like padding-top) are used here @return - OrderedDict of "style" attribute.
entailment
def _asStr(self): ''' _asStr - Get the string representation of this style @return <str> - A string representation of this style (semicolon separated, key: value format) ''' styleDict = self._styleDict if styleDict: return '; '.join([name + ': ' + value for name, value in styleDict.items()]) return ''
_asStr - Get the string representation of this style @return <str> - A string representation of this style (semicolon separated, key: value format)
entailment
def _special_value_rows(em): ''' _special_value_rows - Handle "rows" special attribute, which differs if tagName is a textarea or frameset ''' if em.tagName == 'textarea': return convertToIntRange(em.getAttribute('rows', 2), minValue=1, maxValue=None, invalidDefault=2) else: # frameset return em.getAttribute('rows', '')
_special_value_rows - Handle "rows" special attribute, which differs if tagName is a textarea or frameset
entailment
def _special_value_cols(em): ''' _special_value_cols - Handle "cols" special attribute, which differs if tagName is a textarea or frameset ''' if em.tagName == 'textarea': return convertToIntRange(em.getAttribute('cols', 20), minValue=1, maxValue=None, invalidDefault=20) else: # frameset return em.getAttribute('cols', '')
_special_value_cols - Handle "cols" special attribute, which differs if tagName is a textarea or frameset
entailment
def _special_value_autocomplete(em): ''' handle "autocomplete" property, which has different behaviour for form vs input" ''' if em.tagName == 'form': return convertPossibleValues(em.getAttribute('autocomplete', 'on'), POSSIBLE_VALUES_ON_OFF, invalidDefault='on', emptyValue=EMPTY_IS_INVALID) # else: input return convertPossibleValues(em.getAttribute('autocomplete', ''), POSSIBLE_VALUES_ON_OFF, invalidDefault="", emptyValue='')
handle "autocomplete" property, which has different behaviour for form vs input"
entailment
def _special_value_size(em): ''' handle "size" property, which has different behaviour for input vs everything else ''' if em.tagName == 'input': # TODO: "size" on an input is implemented very weirdly. Negative values are treated as invalid, # A value of "0" raises an exception (and does not set HTML attribute) # No upper limit. return convertToPositiveInt(em.getAttribute('size', 20), invalidDefault=20) return em.getAttribute('size', '')
handle "size" property, which has different behaviour for input vs everything else
entailment
def _special_value_maxLength(em, newValue=NOT_PROVIDED): ''' _special_value_maxLength - Handle the special "maxLength" property @param em <AdvancedTag> - The tag element @param newValue - Default NOT_PROVIDED, if provided will use that value instead of the current .getAttribute value on the tag. This is because this method can be used for both validation and getting/setting ''' if newValue is NOT_PROVIDED: if not em.hasAttribute('maxlength'): return -1 curValue = em.getAttribute('maxlength', '-1') # If we are accessing, the invalid default should be negative invalidDefault = -1 else: curValue = newValue # If we are setting, we should raise an exception upon invalid value invalidDefault = IndexSizeErrorException return convertToIntRange(curValue, minValue=0, maxValue=None, emptyValue='0', invalidDefault=invalidDefault)
_special_value_maxLength - Handle the special "maxLength" property @param em <AdvancedTag> - The tag element @param newValue - Default NOT_PROVIDED, if provided will use that value instead of the current .getAttribute value on the tag. This is because this method can be used for both validation and getting/setting
entailment
def get_by_value(cls, value, type): """ Converts a value into a corresponding data object. For files, this looks up a file DataObject by name, uuid, and/or md5. For other types, it creates a new DataObject. """ if type == 'file': return cls._get_file_by_value(value) else: data_object = DataObject(data={ 'value': cls._type_cast(value, type)}, type=type) data_object.full_clean() data_object.save() return data_object
Converts a value into a corresponding data object. For files, this looks up a file DataObject by name, uuid, and/or md5. For other types, it creates a new DataObject.
entailment
def _get_file_by_value(cls, value): """Look up a file DataObject by name, uuid, and/or md5. """ # Ignore any FileResource with no DataObject. This is a typical state # for a deleted file that has not yet been cleaned up. queryset = FileResource.objects.exclude(data_object__isnull=True) matches = FileResource.filter_by_name_or_id_or_tag_or_hash( value, queryset=queryset) if matches.count() == 0: raise ValidationError( 'No file found that matches value "%s"' % value) elif matches.count() > 1: match_id_list = ['%s@%s' % (match.filename, match.get_uuid()) for match in matches] match_id_string = ('", "'.join(match_id_list)) raise ValidationError( 'Multiple files were found matching value "%s": "%s". '\ 'Use a more precise identifier to select just one file.' % ( value, match_id_string)) return matches.first().data_object
Look up a file DataObject by name, uuid, and/or md5.
entailment
def _get_run_breadcrumbs(cls, source_type, data_object, task_attempt): """Create a path for a given file, in such a way that files end up being organized and browsable by run """ # We cannot generate the path unless connect to a TaskAttempt # and a run if not task_attempt: return [] # If multiple tasks exist, use the original. task = task_attempt.tasks.earliest('datetime_created') if task is None: return [] run = task.run if run is None: return [] breadcrumbs = [ run.name, "task-%s" % str(task.uuid)[0:8], "attempt-%s" % str(task_attempt.uuid)[0:8], ] # Include any ancestors if run is nested while run.parent is not None: run = run.parent breadcrumbs = [run.name] + breadcrumbs # Prepend first breadcrumb with datetime and id breadcrumbs[0] = "%s-%s-%s" % ( run.datetime_created.strftime('%Y-%m-%dT%H.%M.%SZ'), str(run.uuid)[0:8], breadcrumbs[0]) breadcrumbs = ['runs'] + breadcrumbs return breadcrumbs
Create a path for a given file, in such a way that files end up being organized and browsable by run
entailment
def filter_by_name_or_id_or_tag(self, query_string, queryset = None): """Find objects that match the identifier of form {name}@{ID}, {name}, or @{ID}, where ID may be truncated """ assert self.Model.NAME_FIELD, \ 'NAME_FIELD is missing on model %s' % self.Model.__name__ assert self.Model.ID_FIELD, \ 'ID_FIELD is missing on model %s' % self.Model.__name__ assert self.Model.TAG_FIELD, \ 'TAG_FIELD is missing on model %s' % self.Model.__name__ filter_args = {} name, uuid, tag = self._parse_as_name_or_id_or_tag(query_string) if name is not None: filter_args[self.Model.NAME_FIELD] = name if uuid is not None: filter_args[self.Model.ID_FIELD+'__startswith'] = uuid if tag is not None: filter_args[self.Model.TAG_FIELD] = tag if queryset is None: queryset = self.Model.objects.all() return queryset.filter(**filter_args)
Find objects that match the identifier of form {name}@{ID}, {name}, or @{ID}, where ID may be truncated
entailment
def save(self, *args, **kwargs): """ This save method protects against two processesses concurrently modifying the same object. Normally the second save would silently overwrite the changes from the first. Instead we raise a ConcurrentModificationError. """ cls = self.__class__ if self.pk: rows = cls.objects.filter( pk=self.pk, _change=self._change).update( _change=self._change + 1) if not rows: raise ConcurrentModificationError(cls.__name__, self.pk) self._change += 1 count = 0 max_retries=3 while True: try: return super(BaseModel, self).save(*args, **kwargs) except django.db.utils.OperationalError: if count >= max_retries: raise count += 1
This save method protects against two processesses concurrently modifying the same object. Normally the second save would silently overwrite the changes from the first. Instead we raise a ConcurrentModificationError.
entailment
def setattrs_and_save_with_retries(self, assignments, max_retries=5): """ If the object is being edited by other processes, save may fail due to concurrent modification. This method recovers and retries the edit. assignments is a dict of {attribute: value} """ count = 0 obj=self while True: for attribute, value in assignments.iteritems(): setattr(obj, attribute, value) try: obj.full_clean() obj.save() except ConcurrentModificationError: if count >= max_retries: raise SaveRetriesExceededError( 'Exceeded retries when saving "%s" of id "%s" '\ 'with assigned values "%s"' % (self.__class__, self.id, assignments)) count += 1 obj = self.__class__.objects.get(id=self.id) continue return obj
If the object is being edited by other processes, save may fail due to concurrent modification. This method recovers and retries the edit. assignments is a dict of {attribute: value}
entailment
def delete(self, *args, **kwargs): """ This method implements retries for object deletion. """ count = 0 max_retries=3 while True: try: return super(BaseModel, self).delete(*args, **kwargs) except django.db.utils.OperationalError: if count >= max_retries: raise count += 1
This method implements retries for object deletion.
entailment
def get_server_type(): """Checks server.ini for server type.""" server_location_file = os.path.expanduser(SERVER_LOCATION_FILE) if not os.path.exists(server_location_file): raise Exception( "%s not found. Please run 'loom server set " "<servertype>' first." % server_location_file) config = ConfigParser.SafeConfigParser() config.read(server_location_file) server_type = config.get('server', 'type') return server_type
Checks server.ini for server type.
entailment
def _set_upload_status(self, file_data_object, upload_status): """ Set file_data_object.file_resource.upload_status """ uuid = file_data_object['uuid'] return self.connection.update_data_object( uuid, {'uuid': uuid, 'value': { 'upload_status': upload_status}} )
Set file_data_object.file_resource.upload_status
entailment
def _substitute_file_uuids_throughout_template(self, template, file_dependencies): """Anywhere in "template" that refers to a data object but does not give a specific UUID, if a matching file can be found in "file_dependencies", we will change the data object reference to use that UUID. That way templates have a preference to connect to files nested under their ".dependencies" over files that were previously imported to the server. """ if not isinstance(template, dict): # Nothing to do if this is a reference to a previously imported template. return for input in template.get('inputs', []): self._substitute_file_uuids_in_input(input, file_dependencies) for step in template.get('steps', []): self._substitute_file_uuids_throughout_template(step, file_dependencies)
Anywhere in "template" that refers to a data object but does not give a specific UUID, if a matching file can be found in "file_dependencies", we will change the data object reference to use that UUID. That way templates have a preference to connect to files nested under their ".dependencies" over files that were previously imported to the server.
entailment
def _get_inputs(self): """Converts command line args into a list of template inputs """ # Convert file inputs to a dict, to make it easier to override # them with commandline inputs file_inputs = self._get_file_inputs() try: jsonschema.validate(file_inputs, file_input_schema) except jsonschema.ValidationError: raise SystemExit("ERROR! Input file was invalid") input_dict = {} for (channel, input_id) in file_inputs.iteritems(): input_dict[channel] = input_id if self.args.inputs: for kv_pair in self.args.inputs: (channel, input_id) = kv_pair.split('=') input_dict[channel] = self._parse_string_to_nested_lists( input_id) inputs = [] for (channel, contents) in input_dict.iteritems(): inputs.append({ 'channel': channel, 'data': { 'contents': contents } }) return inputs
Converts command line args into a list of template inputs
entailment
def _parse_string_to_nested_lists(self, value): """e.g., convert "[[a,b,c],[d,e],[f,g]]" into [["a","b","c"],["d","e"],["f","g"]] """ if not re.match(r'\[.*\]', value.strip()): if '[' in value or ']' in value or ',' in value: raise Exception('Missing outer brace') elif len(value.strip()) == 0: raise Exception('Missing value') else: terms = value.split(',') terms = [term.strip() for term in terms] if len(terms) == 1: return terms[0] else: return terms # remove outer braces value = value[1:-1] terms = [] depth = 0 leftmost = 0 first_open_brace = None break_on_commas = False for i in range(len(value)): if value[i] == ',' and depth == 0: terms.append( self._parse_string_to_nested_lists(value[leftmost:i])) leftmost = i+1 if value[i] == '[': if first_open_brace is None: first_open_brace = i depth += 1 if value[i] == ']': depth -= 1 if depth < 0: raise Exception('Unbalanced close brace') i += i if depth > 0: raise Exception('Expected "]"') terms.append( self._parse_string_to_nested_lists(value[leftmost:len(value)])) return terms
e.g., convert "[[a,b,c],[d,e],[f,g]]" into [["a","b","c"],["d","e"],["f","g"]]
entailment
def _get_inputs(self, old_inputs): """Converts command line args into a list of template inputs """ # Convert inputs to dict to facilitate overriding by channel name # Also, drop DataNode ID and keep only contents. input_dict = {} for input in old_inputs: # Strip out DataNode UUID and URL input['data'] = {'contents': input['data']['contents']} input_dict[input['channel']] = input file_inputs = self._get_file_inputs() try: jsonschema.validate(file_inputs, file_input_schema) except jsonschema.ValidationError: raise SystemExit("ERROR! User inputs file is not valid") for (channel, input_id) in file_inputs.iteritems(): input_dict[channel] = { 'channel': channel, 'data': {'contents': input_id} } # Override with cli user inputs if specified if self.args.inputs: for kv_pair in self.args.inputs: (channel, input_id) = kv_pair.split('=') input_dict[channel] = { 'channel': channel, 'data': { 'contents': self._parse_string_to_nested_lists(input_id)} } return input_dict.values()
Converts command line args into a list of template inputs
entailment
def create(self, validated_data): """ This is a standard method called indirectly by calling 'save' on the serializer. This method expects the 'parent_field' and 'parent_instance' to be included in the Serializer context. """ if self.context.get('parent_field') \ and self.context.get('parent_instance'): validated_data.update({ self.context.get('parent_field'): self.context.get('parent_instance')}) instance = self.Meta.model(**validated_data) instance.full_clean() instance.save() return instance
This is a standard method called indirectly by calling 'save' on the serializer. This method expects the 'parent_field' and 'parent_instance' to be included in the Serializer context.
entailment
def disable_insecure_request_warning(): """Suppress warning about untrusted SSL certificate.""" import requests from requests.packages.urllib3.exceptions import InsecureRequestWarning requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
Suppress warning about untrusted SSL certificate.
entailment
def _make_request_to_server(self, query_function, raise_for_status=True, time_limit_seconds=2, retry_delay_seconds=0.2): """Retry sending request until timeout or until receiving a response. """ start_time = datetime.datetime.now() while datetime.datetime.now() - start_time < datetime.timedelta( 0, time_limit_seconds): error = None response = None try: response = query_function() except requests.exceptions.ConnectionError as e: error = ServerConnectionError( "No response from server.\n%s" % e) except: if response: logger.info(response.text) raise if response is not None and raise_for_status: # raises requests.exceptions.HTTPError self._raise_for_status(response) if error: time.sleep(retry_delay_seconds) continue else: return response raise error
Retry sending request until timeout or until receiving a response.
entailment
def _get_resource(self, relative_url, params=None): """Convenience function for retrieving a resource. If resource does not exist, return None. """ response = self._get(relative_url, params=params, raise_for_status=False) if response.status_code == 404: return None self._raise_for_status(response) return response.json()
Convenience function for retrieving a resource. If resource does not exist, return None.
entailment
def TaskAttemptInput(input, task_attempt): """Returns the correct Input class for a given data type and gather mode """ (data_type, mode) = _get_input_info(input) if data_type != 'file': return NoOpInput(None, task_attempt) if mode == 'no_gather': return FileInput(input['data']['contents'], task_attempt) else: assert mode.startswith('gather') return FileListInput(input['data']['contents'], task_attempt)
Returns the correct Input class for a given data type and gather mode
entailment
def execute(task_function, *args, **kwargs): """Run a task asynchronously """ if get_setting('TEST_DISABLE_ASYNC_DELAY'): # Delay disabled, run synchronously logger.debug('Running function "%s" synchronously because '\ 'TEST_DISABLE_ASYNC_DELAY is True' % task_function.__name__) return task_function(*args, **kwargs) db.connections.close_all() task_function.delay(*args, **kwargs)
Run a task asynchronously
entailment
def execute_with_delay(task_function, *args, **kwargs): """Run a task asynchronously after at least delay_seconds """ delay = kwargs.pop('delay', 0) if get_setting('TEST_DISABLE_ASYNC_DELAY'): # Delay disabled, run synchronously logger.debug('Running function "%s" synchronously because '\ 'TEST_DISABLE_ASYNC_DELAY is True' % task_function.__name__) return task_function(*args, **kwargs) db.connections.close_all() task_function.apply_async(args=args, kwargs=kwargs, countdown=delay)
Run a task asynchronously after at least delay_seconds
entailment
def check_for_stalled_tasks(): """Check for tasks that are no longer sending a heartbeat """ from api.models.tasks import Task for task in Task.objects.filter(status_is_running=True): if not task.is_responsive(): task.system_error() if task.is_timed_out(): task.timeout_error()
Check for tasks that are no longer sending a heartbeat
entailment
def check_for_missed_cleanup(): """Check for TaskAttempts that were never cleaned up """ if get_setting('PRESERVE_ALL'): return from api.models.tasks import TaskAttempt if get_setting('PRESERVE_ON_FAILURE'): for task_attempt in TaskAttempt.objects.filter( status_is_running=False).filter( status_is_cleaned_up=False).exclude( status_is_failed=True): task_attempt.cleanup() else: for task_attempt in TaskAttempt.objects.filter( status_is_running=False).filter(status_is_cleaned_up=False): task_attempt.cleanup()
Check for TaskAttempts that were never cleaned up
entailment
def execute_with_retries(retryable_function, retryable_errors, logger, human_readable_action_name='Action', nonretryable_errors=None): """This attempts to execute "retryable_function" with exponential backoff on delay time. 10 retries adds up to about 34 minutes total delay before the last attempt. "human_readable_action_name" is an option input to customize retry message. """ max_retries = 10 attempt = 0 if not nonretryable_errors: nonretryable_errors = () while True: try: return retryable_function() except tuple(nonretryable_errors): raise except tuple(retryable_errors) as e: attempt += 1 if attempt > max_retries: raise # Exponentional backoff on retry delay as suggested by # https://cloud.google.com/storage/docs/exponential-backoff delay = 2**attempt + random.random() logger.info('"%s" failed with error "%s". '\ 'Retry number %s of %s in %s seconds' % (human_readable_action_name, str(e), attempt, max_retries, delay)) time.sleep(delay)
This attempts to execute "retryable_function" with exponential backoff on delay time. 10 retries adds up to about 34 minutes total delay before the last attempt. "human_readable_action_name" is an option input to customize retry message.
entailment
def export_file(self, data_object, destination_directory=None, destination_filename=None, retry=False, export_metadata=False, export_raw_file=True): """Export a file from Loom to some file storage location. Default destination_directory is cwd. Default destination_filename is the filename from the file data object associated with the given file_id. """ if not destination_directory: destination_directory = os.getcwd() # We get filename from the dataobject if not destination_filename: destination_filename = data_object['value']['filename'] destination_file_url = os.path.join(destination_directory, destination_filename) logger.info('Exporting file %s@%s ...' % ( data_object['value']['filename'], data_object['uuid'])) if export_raw_file: destination = File( destination_file_url, self.storage_settings, retry=retry) if destination.exists(): raise FileAlreadyExistsError( 'File already exists at %s' % destination_file_url) logger.info('...copying file to %s' % ( destination.get_url())) # Copy from the first file location file_resource = data_object.get('value') md5 = file_resource.get('md5') source_url = data_object['value']['file_url'] File(source_url, self.storage_settings, retry=retry).copy_to( destination, expected_md5=md5) data_object['value'] = self._create_new_file_resource( data_object['value'], destination.get_url()) else: logger.info('...skipping raw file') if export_metadata: data_object['value'].pop('link', None) data_object['value'].pop('upload_status', None) destination_metadata_url = os.path.join( destination_file_url + '.metadata.yaml') logger.info('...writing metadata to %s' % destination_metadata_url) metadata = yaml.safe_dump(data_object, default_flow_style=False) metadata_file = File(destination_metadata_url, self.storage_settings, retry=retry) metadata_file.write(metadata) else: logger.info('...skipping metadata') logger.info('...finished file export')
Export a file from Loom to some file storage location. Default destination_directory is cwd. Default destination_filename is the filename from the file data object associated with the given file_id.
entailment
def _urlparse(path): """Like urlparse except it assumes 'file://' if no scheme is specified """ url = urlparse.urlparse(path) _validate_url(url) if not url.scheme or url.scheme == 'file://': # Normalize path, and set scheme to "file" if missing path = os.path.abspath( os.path.expanduser(path)) url = urlparse.urlparse('file://'+path) return url
Like urlparse except it assumes 'file://' if no scheme is specified
entailment
def FilePattern(pattern, settings, **kwargs): """Factory method returns LocalFilePattern or GoogleStorageFilePattern """ url = _urlparse(pattern) if url.scheme == 'gs': return GoogleStorageFilePattern(pattern, settings, **kwargs) else: assert url.scheme == 'file' return LocalFilePattern(pattern, settings, **kwargs)
Factory method returns LocalFilePattern or GoogleStorageFilePattern
entailment
def File(url, settings, retry=False): """Factory method """ parsed_url = _urlparse(url) if parsed_url.scheme == 'gs': return GoogleStorageFile(url, settings, retry=retry) elif parsed_url.scheme == 'file': if parsed_url.hostname == 'localhost' or parsed_url.hostname is None: return LocalFile(url, settings, retry=retry) else: raise FileUtilsError( "Cannot process file url %s. Remote file hosts not supported." % url) else: raise FileUtilsError('Unsupported scheme "%s" in file "%s"' % (parsed_url.scheme, url))
Factory method
entailment
def Copier(source, destination): """Factory method to select the right copier for a given source and destination. """ if source.type == 'local' and destination.type == 'local': return LocalCopier(source, destination) elif source.type == 'local' and destination.type == 'google_storage': return Local2GoogleStorageCopier(source, destination) elif source.type == 'google_storage' and destination.type == 'local': return GoogleStorage2LocalCopier(source, destination) elif source.type == 'google_storage' and destination.type == 'google_storage': return GoogleStorageCopier(source, destination) else: raise FileUtilsError('Could not find method to copy from source '\ '"%s" to destination "%s".' % (source, destination))
Factory method to select the right copier for a given source and destination.
entailment
def create_from_data_channel(cls, data_channel): """Scan the data tree on the given data_channel to create a corresponding InputSetGenerator tree. """ gather_depth = cls._get_gather_depth(data_channel) generator = InputSetGeneratorNode() for (data_path, data_node) in data_channel.get_ready_data_nodes( [], gather_depth): flat_data_node = data_node.flattened_clone(save=False) input_item = InputItem( flat_data_node, data_channel.channel, data_channel.as_channel, mode=data_channel.mode) generator._add_input_item(data_path, input_item) return generator
Scan the data tree on the given data_channel to create a corresponding InputSetGenerator tree.
entailment
def TaskAttemptOutput(output, task_attempt): """Returns the correct Output class for a given data type, source type, and scatter mode """ (data_type, mode, source_type) = _get_output_info(output) if data_type == 'file': if mode == 'scatter': assert source_type in ['filenames', 'glob'], \ 'source type "%s" not allowed' % source_type if source_type == 'filenames': return FileListScatterOutput(output, task_attempt) return GlobScatterOutput(output, task_attempt) else: assert mode == 'no_scatter' assert source_type == 'filename', \ 'source type "%s" not allowed' % source_type return FileOutput(output, task_attempt) else: # data_type is non-file if mode == 'scatter': assert source_type in [ 'filename', 'filenames', 'glob', 'stream'], \ 'source type "%s" not allowed' % source_type if source_type == 'filename': return FileContentsScatterOutput(output, task_attempt) if source_type == 'filenames': return FileListContentsScatterOutput(output, task_attempt) if source_type == 'glob': return GlobContentsScatterOutput(output, task_attempt) assert source_type == 'stream' return StreamScatterOutput(output, task_attempt) else: assert mode == 'no_scatter' assert source_type in ['filename', 'stream'], \ 'source type "%s" not allowed' % source_type if source_type == 'filename': return FileContentsOutput(output, task_attempt) assert source_type == 'stream' return StreamOutput(output, task_attempt)
Returns the correct Output class for a given data type, source type, and scatter mode
entailment
def add_leaf(self, index, data_object, save=False): """Adds a new leaf node at the given index with the given data_object """ assert self.type == data_object.type, 'data type mismatch' if self._get_child_by_index(index) is not None: raise NodeAlreadyExistsError( 'Leaf data node already exists at this index') else: data_node = DataNode( parent=self, index=index, data_object=data_object, type=self.type) if save: data_node.full_clean() data_node.save() self._add_unsaved_child(data_node) return data_node
Adds a new leaf node at the given index with the given data_object
entailment
def get_ready_data_nodes(self, seed_path, gather_depth): """Returns a list [(path1,data_node1),...] with entries only for existing nodes with DataObjects where is_ready==True. Missing nodes or those with non-ready or non-existing data are ignored. """ try: seed_node = self.get_node(seed_path) except MissingBranchError: return [] all_paths = seed_node._get_all_paths(seed_path, gather_depth) ready_data_nodes = [] for path in all_paths: if self.is_ready(data_path=path): ready_data_nodes.append((path, self.get_node(path))) return ready_data_nodes
Returns a list [(path1,data_node1),...] with entries only for existing nodes with DataObjects where is_ready==True. Missing nodes or those with non-ready or non-existing data are ignored.
entailment
def _check_index(self, index): """Verify that the given index is consistent with the degree of the node. """ if self.degree is None: raise UnknownDegreeError( 'Cannot access child DataNode on a parent with degree of None. '\ 'Set the degree on the parent first.') if index < 0 or index >= self.degree: raise IndexOutOfRangeError( 'Out of range index %s. DataNode parent has degree %s, so index '\ 'should be in the range 0 to %s' % ( index, self.degree, self.degree-1))
Verify that the given index is consistent with the degree of the node.
entailment
def on_gcloud_vm(): """ Determines if we're running on a GCE instance.""" r = None try: r = requests.get('http://metadata.google.internal') except requests.ConnectionError: return False try: if r.headers['Metadata-Flavor'] == 'Google' and \ r.headers['Server'] == 'Metadata Server for VM': return True except KeyError: return False
Determines if we're running on a GCE instance.
entailment
def get_cheapest_instance_type(cores, memory): """Determine the cheapest instance type given a minimum number of cores and minimum amount of RAM (in GB). """ pricelist = get_gcloud_pricelist() # Filter out preemptible, shared-CPU, and non-US instance types us_instance_types = {k: v for k, v in pricelist.items() if k.startswith('CP-COMPUTEENGINE-VMIMAGE-') and not k.endswith('-PREEMPTIBLE') and 'us' in v and v['cores'] != 'shared'} # Convert to array and add keys (instance type names) as type names price_array = [] for key in us_instance_types: value = us_instance_types[key] value.update({'name': key.replace( 'CP-COMPUTEENGINE-VMIMAGE-', '').lower()}) price_array.append(value) # Sort by price in US price_array.sort(None, lambda x: x['us']) # Look for an instance type that satisfies requested # cores and memory; first will be cheapest for instance_type in price_array: if int(instance_type['cores']) >= int(cores) \ and float(instance_type['memory']) >= float(memory): print instance_type['name'] return instance_type['name'] # No instance type found that can fulfill requested cores and memory raise Exception('No instance type found with at least %d cores ' 'and %f GB of RAM.' % (cores, memory))
Determine the cheapest instance type given a minimum number of cores and minimum amount of RAM (in GB).
entailment
def get_gcloud_pricelist(): """Retrieve latest pricelist from Google Cloud, or use cached copy if not reachable. """ try: r = requests.get('http://cloudpricingcalculator.appspot.com' '/static/data/pricelist.json') content = json.loads(r.content) except ConnectionError: logger.warning( "Couldn't get updated pricelist from " "http://cloudpricingcalculator.appspot.com" "/static/data/pricelist.json. Falling back to cached " "copy, but prices may be out of date.") with open('gcloudpricelist.json') as infile: content = json.load(infile) pricelist = content['gcp_price_list'] return pricelist
Retrieve latest pricelist from Google Cloud, or use cached copy if not reachable.
entailment
def _get_base_name(hostname, step_name, attempt_id, max_length): """Create a base name for the worker instance that will run the specified task run attempt, from this server. Since hostname and step name will be duplicated across workers (reruns, etc.), ensure that at least MIN_TASK_ID_CHARS are preserved in the instance name. Also, prevent names from ending with dashes. """ max_length = int(max_length) if len(hostname)+len(step_name)+MIN_TASK_ID_CHARS+2 > max_length: # round with ceil/floor such that extra char goes to hostname if odd hostname_chars = int(math.ceil( (max_length-MIN_TASK_ID_CHARS-2)/float(2))) step_name_chars = int(math.floor( (max_length-MIN_TASK_ID_CHARS-2)/float(2))) hostname = hostname[:hostname_chars] step_name = step_name[:step_name_chars] name_base = '-'.join([hostname, step_name, attempt_id]) return _sanitize_instance_name(name_base, max_length)
Create a base name for the worker instance that will run the specified task run attempt, from this server. Since hostname and step name will be duplicated across workers (reruns, etc.), ensure that at least MIN_TASK_ID_CHARS are preserved in the instance name. Also, prevent names from ending with dashes.
entailment
def _sanitize_instance_name(name, max_length): """Instance names must start with a lowercase letter. All following characters must be a dash, lowercase letter, or digit. """ name = str(name).lower() # make all letters lowercase name = re.sub(r'[^-a-z0-9]', '', name) # remove invalid characters # remove non-lowercase letters from the beginning name = re.sub(r'^[^a-z]+', '', name) name = name[:max_length] name = re.sub(r'-+$', '', name) # remove hyphens from the end return name
Instance names must start with a lowercase letter. All following characters must be a dash, lowercase letter, or digit.
entailment
def is_valid(self, max_age=None): ''' Determines if the cache files have expired, or if it is still valid ''' if max_age is None: max_age = self.cache_max_age if os.path.isfile(self.cache_path_cache): mod_time = os.path.getmtime(self.cache_path_cache) current_time = time() if (mod_time + max_age) > current_time: return True return False
Determines if the cache files have expired, or if it is still valid
entailment
def get_all_data_from_cache(self, filename=''): ''' Reads the JSON inventory from the cache file. Returns Python dictionary. ''' data = '' if not filename: filename = self.cache_path_cache with open(filename, 'r') as cache: data = cache.read() return json.loads(data)
Reads the JSON inventory from the cache file. Returns Python dictionary.
entailment
def write_to_cache(self, data, filename=''): ''' Writes data to file as JSON. Returns True. ''' if not filename: filename = self.cache_path_cache json_data = json.dumps(data) with open(filename, 'w') as cache: cache.write(json_data) return True
Writes data to file as JSON. Returns True.
entailment
def get_config(self): """ Reads the settings from the gce.ini file. Populates a SafeConfigParser object with defaults and attempts to read an .ini-style configuration from the filename specified in GCE_INI_PATH. If the environment variable is not present, the filename defaults to gce.ini in the current working directory. """ gce_ini_default_path = os.path.join( os.path.dirname(os.path.realpath(__file__)), "gce.ini") gce_ini_path = os.environ.get('GCE_INI_PATH', gce_ini_default_path) # Create a ConfigParser. # This provides empty defaults to each key, so that environment # variable configuration (as opposed to INI configuration) is able # to work. config = ConfigParser.SafeConfigParser(defaults={ 'gce_service_account_email_address': '', 'gce_service_account_pem_file_path': '', 'gce_project_id': '', 'libcloud_secrets': '', 'inventory_ip_type': '', 'cache_path': '~/.ansible/tmp', 'cache_max_age': '300' }) if 'gce' not in config.sections(): config.add_section('gce') if 'inventory' not in config.sections(): config.add_section('inventory') if 'cache' not in config.sections(): config.add_section('cache') config.read(gce_ini_path) ######### # Section added for processing ini settings ######### # Set the instance_states filter based on config file options self.instance_states = [] if config.has_option('gce', 'instance_states'): states = config.get('gce', 'instance_states') # Ignore if instance_states is an empty string. if states: self.instance_states = states.split(',') # Caching cache_path = config.get('cache', 'cache_path') cache_max_age = config.getint('cache', 'cache_max_age') # TOOD(supertom): support project-specific caches cache_name = 'ansible-gce.cache' self.cache = CloudInventoryCache(cache_path=cache_path, cache_max_age=cache_max_age, cache_name=cache_name) return config
Reads the settings from the gce.ini file. Populates a SafeConfigParser object with defaults and attempts to read an .ini-style configuration from the filename specified in GCE_INI_PATH. If the environment variable is not present, the filename defaults to gce.ini in the current working directory.
entailment
def get_inventory_options(self): """Determine inventory options. Environment variables always take precedence over configuration files.""" ip_type = self.config.get('inventory', 'inventory_ip_type') # If the appropriate environment variables are set, they override # other configuration ip_type = os.environ.get('INVENTORY_IP_TYPE', ip_type) return ip_type
Determine inventory options. Environment variables always take precedence over configuration files.
entailment
def get_gce_driver(self): """Determine the GCE authorization settings and return a libcloud driver. """ # Attempt to get GCE params from a configuration file, if one # exists. secrets_path = self.config.get('gce', 'libcloud_secrets') secrets_found = False try: import secrets args = list(getattr(secrets, 'GCE_PARAMS', [])) kwargs = getattr(secrets, 'GCE_KEYWORD_PARAMS', {}) secrets_found = True except: pass if not secrets_found and secrets_path: if not secrets_path.endswith('secrets.py'): err = "Must specify libcloud secrets file as " err += "/absolute/path/to/secrets.py" sys.exit(err) sys.path.append(os.path.dirname(secrets_path)) try: import secrets args = list(getattr(secrets, 'GCE_PARAMS', [])) kwargs = getattr(secrets, 'GCE_KEYWORD_PARAMS', {}) secrets_found = True except: pass if not secrets_found: args = [ self.config.get('gce','gce_service_account_email_address'), self.config.get('gce','gce_service_account_pem_file_path') ] kwargs = {'project': self.config.get('gce', 'gce_project_id')} # If the appropriate environment variables are set, they override # other configuration; process those into our args and kwargs. args[0] = os.environ.get('GCE_EMAIL', args[0]) args[1] = os.environ.get('GCE_PEM_FILE_PATH', args[1]) kwargs['project'] = os.environ.get('GCE_PROJECT', kwargs['project']) # Retrieve and return the GCE driver. gce = get_driver(Provider.GCE)(*args, **kwargs) gce.connection.user_agent_append( '%s/%s' % (USER_AGENT_PRODUCT, USER_AGENT_VERSION), ) return gce
Determine the GCE authorization settings and return a libcloud driver.
entailment
def parse_env_zones(self): '''returns a list of comma separated zones parsed from the GCE_ZONE environment variable. If provided, this will be used to filter the results of the grouped_instances call''' import csv reader = csv.reader([os.environ.get('GCE_ZONE',"")], skipinitialspace=True) zones = [r for r in reader] return [z for z in zones[0]]
returns a list of comma separated zones parsed from the GCE_ZONE environment variable. If provided, this will be used to filter the results of the grouped_instances call
entailment
def parse_cli_args(self): ''' Command line argument processing ''' parser = argparse.ArgumentParser( description='Produce an Ansible Inventory file based on GCE') parser.add_argument('--list', action='store_true', default=True, help='List instances (default: True)') parser.add_argument('--host', action='store', help='Get all information about an instance') parser.add_argument('--pretty', action='store_true', default=False, help='Pretty format (default: False)') parser.add_argument( '--refresh-cache', action='store_true', default=False, help='Force refresh of cache by making API requests (default: False - use cache files)') self.args = parser.parse_args()
Command line argument processing
entailment
def load_inventory_from_cache(self): ''' Loads inventory from JSON on disk. ''' try: self.inventory = self.cache.get_all_data_from_cache() hosts = self.inventory['_meta']['hostvars'] except Exception as e: print( "Invalid inventory file %s. Please rebuild with -refresh-cache option." % (self.cache.cache_path_cache)) raise
Loads inventory from JSON on disk.
entailment
def do_api_calls_update_cache(self): ''' Do API calls and save data in cache. ''' zones = self.parse_env_zones() data = self.group_instances(zones) self.cache.write_to_cache(data) self.inventory = data
Do API calls and save data in cache.
entailment
def group_instances(self, zones=None): '''Group all instances''' groups = {} meta = {} meta["hostvars"] = {} for node in self.list_nodes(): # This check filters on the desired instance states defined in the # config file with the instance_states config option. # # If the instance_states list is _empty_ then _ALL_ states are returned. # # If the instance_states list is _populated_ then check the current # state against the instance_states list if self.instance_states and not node.extra['status'] in self.instance_states: continue name = node.name meta["hostvars"][name] = self.node_to_dict(node) zone = node.extra['zone'].name # To avoid making multiple requests per zone # we list all nodes and then filter the results if zones and zone not in zones: continue if zone in groups: groups[zone].append(name) else: groups[zone] = [name] tags = node.extra['tags'] for t in tags: if t.startswith('group-'): tag = t[6:] else: tag = 'tag_%s' % t if tag in groups: groups[tag].append(name) else: groups[tag] = [name] net = node.extra['networkInterfaces'][0]['network'].split('/')[-1] net = 'network_%s' % net if net in groups: groups[net].append(name) else: groups[net] = [name] machine_type = node.size if machine_type in groups: groups[machine_type].append(name) else: groups[machine_type] = [name] image = node.image and node.image or 'persistent_disk' if image in groups: groups[image].append(name) else: groups[image] = [name] status = node.extra['status'] stat = 'status_%s' % status.lower() if stat in groups: groups[stat].append(name) else: groups[stat] = [name] groups["_meta"] = meta return groups
Group all instances
entailment
def json_format_dict(self, data, pretty=False): ''' Converts a dict to a JSON object and dumps it as a formatted string ''' if pretty: return json.dumps(data, sort_keys=True, indent=2) else: return json.dumps(data)
Converts a dict to a JSON object and dumps it as a formatted string
entailment
def _stream_docker_logs(self): """Stream stdout and stderr from the task container to this process's stdout and stderr, respectively. """ thread = threading.Thread(target=self._stderr_stream_worker) thread.start() for line in self.docker_client.logs(self.container, stdout=True, stderr=False, stream=True): sys.stdout.write(line) thread.join()
Stream stdout and stderr from the task container to this process's stdout and stderr, respectively.
entailment
def to_internal_value(self, data): """Because we allow template ID string values, where serializers normally expect a dict """ converted_data = _convert_template_id_to_dict(data) return super(TemplateSerializer, self)\ .to_internal_value(converted_data)
Because we allow template ID string values, where serializers normally expect a dict
entailment
def identifier_cmp(a, b): """Compare two identifier (for pre-release/build components).""" a_cmp, a_is_int = _to_int(a) b_cmp, b_is_int = _to_int(b) if a_is_int and b_is_int: # Numeric identifiers are compared as integers return base_cmp(a_cmp, b_cmp) elif a_is_int: # Numeric identifiers have lower precedence return -1 elif b_is_int: return 1 else: # Non-numeric identifiers are compared lexicographically return base_cmp(a_cmp, b_cmp)
Compare two identifier (for pre-release/build components).
entailment
def identifier_list_cmp(a, b): """Compare two identifier list (pre-release/build components). The rule is: - Identifiers are paired between lists - They are compared from left to right - If all first identifiers match, the longest list is greater. >>> identifier_list_cmp(['1', '2'], ['1', '2']) 0 >>> identifier_list_cmp(['1', '2a'], ['1', '2b']) -1 >>> identifier_list_cmp(['1'], ['1', '2']) -1 """ identifier_pairs = zip(a, b) for id_a, id_b in identifier_pairs: cmp_res = identifier_cmp(id_a, id_b) if cmp_res != 0: return cmp_res # alpha1.3 < alpha1.3.1 return base_cmp(len(a), len(b))
Compare two identifier list (pre-release/build components). The rule is: - Identifiers are paired between lists - They are compared from left to right - If all first identifiers match, the longest list is greater. >>> identifier_list_cmp(['1', '2'], ['1', '2']) 0 >>> identifier_list_cmp(['1', '2a'], ['1', '2b']) -1 >>> identifier_list_cmp(['1'], ['1', '2']) -1
entailment
def coerce(cls, version_string, partial=False): """Coerce an arbitrary version string into a semver-compatible one. The rule is: - If not enough components, fill minor/patch with zeroes; unless partial=True - If more than 3 dot-separated components, extra components are "build" data. If some "build" data already appeared, append it to the extra components Examples: >>> Version.coerce('0.1') Version(0, 1, 0) >>> Version.coerce('0.1.2.3') Version(0, 1, 2, (), ('3',)) >>> Version.coerce('0.1.2.3+4') Version(0, 1, 2, (), ('3', '4')) >>> Version.coerce('0.1+2-3+4_5') Version(0, 1, 0, (), ('2-3', '4-5')) """ base_re = re.compile(r'^\d+(?:\.\d+(?:\.\d+)?)?') match = base_re.match(version_string) if not match: raise ValueError( "Version string lacks a numerical component: %r" % version_string ) version = version_string[:match.end()] if not partial: # We need a not-partial version. while version.count('.') < 2: version += '.0' if match.end() == len(version_string): return Version(version, partial=partial) rest = version_string[match.end():] # Cleanup the 'rest' rest = re.sub(r'[^a-zA-Z0-9+.-]', '-', rest) if rest[0] == '+': # A 'build' component prerelease = '' build = rest[1:] elif rest[0] == '.': # An extra version component, probably 'build' prerelease = '' build = rest[1:] elif rest[0] == '-': rest = rest[1:] if '+' in rest: prerelease, build = rest.split('+', 1) else: prerelease, build = rest, '' elif '+' in rest: prerelease, build = rest.split('+', 1) else: prerelease, build = rest, '' build = build.replace('+', '.') if prerelease: version = '%s-%s' % (version, prerelease) if build: version = '%s+%s' % (version, build) return cls(version, partial=partial)
Coerce an arbitrary version string into a semver-compatible one. The rule is: - If not enough components, fill minor/patch with zeroes; unless partial=True - If more than 3 dot-separated components, extra components are "build" data. If some "build" data already appeared, append it to the extra components Examples: >>> Version.coerce('0.1') Version(0, 1, 0) >>> Version.coerce('0.1.2.3') Version(0, 1, 2, (), ('3',)) >>> Version.coerce('0.1.2.3+4') Version(0, 1, 2, (), ('3', '4')) >>> Version.coerce('0.1+2-3+4_5') Version(0, 1, 0, (), ('2-3', '4-5'))
entailment
def parse(cls, version_string, partial=False, coerce=False): """Parse a version string into a Version() object. Args: version_string (str), the version string to parse partial (bool), whether to accept incomplete input coerce (bool), whether to try to map the passed in string into a valid Version. """ if not version_string: raise ValueError('Invalid empty version string: %r' % version_string) if partial: version_re = cls.partial_version_re else: version_re = cls.version_re match = version_re.match(version_string) if not match: raise ValueError('Invalid version string: %r' % version_string) major, minor, patch, prerelease, build = match.groups() if _has_leading_zero(major): raise ValueError("Invalid leading zero in major: %r" % version_string) if _has_leading_zero(minor): raise ValueError("Invalid leading zero in minor: %r" % version_string) if _has_leading_zero(patch): raise ValueError("Invalid leading zero in patch: %r" % version_string) major = int(major) minor = cls._coerce(minor, partial) patch = cls._coerce(patch, partial) if prerelease is None: if partial and (build is None): # No build info, strip here return (major, minor, patch, None, None) else: prerelease = () elif prerelease == '': prerelease = () else: prerelease = tuple(prerelease.split('.')) cls._validate_identifiers(prerelease, allow_leading_zeroes=False) if build is None: if partial: build = None else: build = () elif build == '': build = () else: build = tuple(build.split('.')) cls._validate_identifiers(build, allow_leading_zeroes=True) return (major, minor, patch, prerelease, build)
Parse a version string into a Version() object. Args: version_string (str), the version string to parse partial (bool), whether to accept incomplete input coerce (bool), whether to try to map the passed in string into a valid Version.
entailment
def _comparison_functions(cls, partial=False): """Retrieve comparison methods to apply on version components. This is a private API. Args: partial (bool): whether to provide 'partial' or 'strict' matching. Returns: 5-tuple of cmp-like functions. """ def prerelease_cmp(a, b): """Compare prerelease components. Special rule: a version without prerelease component has higher precedence than one with a prerelease component. """ if a and b: return identifier_list_cmp(a, b) elif a: # Versions with prerelease field have lower precedence return -1 elif b: return 1 else: return 0 def build_cmp(a, b): """Compare build metadata. Special rule: there is no ordering on build metadata. """ if a == b: return 0 else: return NotImplemented def make_optional(orig_cmp_fun): """Convert a cmp-like function to consider 'None == *'.""" @functools.wraps(orig_cmp_fun) def alt_cmp_fun(a, b): if a is None or b is None: return 0 return orig_cmp_fun(a, b) return alt_cmp_fun if partial: return [ base_cmp, # Major is still mandatory make_optional(base_cmp), make_optional(base_cmp), make_optional(prerelease_cmp), make_optional(build_cmp), ] else: return [ base_cmp, base_cmp, base_cmp, prerelease_cmp, build_cmp, ]
Retrieve comparison methods to apply on version components. This is a private API. Args: partial (bool): whether to provide 'partial' or 'strict' matching. Returns: 5-tuple of cmp-like functions.
entailment
def __compare_helper(self, other, condition, notimpl_target): """Helper for comparison. Allows the caller to provide: - The condition - The return value if the comparison is meaningless (ie versions with build metadata). """ if not isinstance(other, self.__class__): return NotImplemented cmp_res = self.__cmp__(other) if cmp_res is NotImplemented: return notimpl_target return condition(cmp_res)
Helper for comparison. Allows the caller to provide: - The condition - The return value if the comparison is meaningless (ie versions with build metadata).
entailment
def match(self, version): """Check whether a Version satisfies the Spec.""" return all(spec.match(version) for spec in self.specs)
Check whether a Version satisfies the Spec.
entailment
def select(self, versions): """Select the best compatible version among an iterable of options.""" options = list(self.filter(versions)) if options: return max(options) return None
Select the best compatible version among an iterable of options.
entailment
def deconstruct(self): """Handle django.db.migrations.""" name, path, args, kwargs = super(VersionField, self).deconstruct() kwargs['partial'] = self.partial kwargs['coerce'] = self.coerce return name, path, args, kwargs
Handle django.db.migrations.
entailment
def to_python(self, value): """Converts any value to a base.Version field.""" if value is None or value == '': return value if isinstance(value, base.Version): return value if self.coerce: return base.Version.coerce(value, partial=self.partial) else: return base.Version(value, partial=self.partial)
Converts any value to a base.Version field.
entailment
def to_python(self, value): """Converts any value to a base.Spec field.""" if value is None or value == '': return value if isinstance(value, base.Spec): return value return base.Spec(value)
Converts any value to a base.Spec field.
entailment
def move_left(self): """Make the drone move left.""" self.at(ardrone.at.pcmd, True, -self.speed, 0, 0, 0)
Make the drone move left.
entailment
def move_right(self): """Make the drone move right.""" self.at(ardrone.at.pcmd, True, self.speed, 0, 0, 0)
Make the drone move right.
entailment
def move_up(self): """Make the drone rise upwards.""" self.at(ardrone.at.pcmd, True, 0, 0, self.speed, 0)
Make the drone rise upwards.
entailment
def move_down(self): """Make the drone decent downwards.""" self.at(ardrone.at.pcmd, True, 0, 0, -self.speed, 0)
Make the drone decent downwards.
entailment
def move_forward(self): """Make the drone move forward.""" self.at(ardrone.at.pcmd, True, 0, -self.speed, 0, 0)
Make the drone move forward.
entailment
def move_backward(self): """Make the drone move backwards.""" self.at(ardrone.at.pcmd, True, 0, self.speed, 0, 0)
Make the drone move backwards.
entailment
def turn_left(self): """Make the drone rotate left.""" self.at(ardrone.at.pcmd, True, 0, 0, 0, -self.speed)
Make the drone rotate left.
entailment
def turn_right(self): """Make the drone rotate right.""" self.at(ardrone.at.pcmd, True, 0, 0, 0, self.speed)
Make the drone rotate right.
entailment
def reset(self): """Toggle the drone's emergency state.""" self.at(ardrone.at.ref, False, True) time.sleep(0.1) self.at(ardrone.at.ref, False, False)
Toggle the drone's emergency state.
entailment
def at(self, cmd, *args, **kwargs): """Wrapper for the low level at commands. This method takes care that the sequence number is increased after each at command and the watchdog timer is started to make sure the drone receives a command at least every second. """ with self.lock: self.com_watchdog_timer.cancel() cmd(self.host, self.sequence, *args, **kwargs) self.sequence += 1 self.com_watchdog_timer = threading.Timer(self.timer, self.commwdg) self.com_watchdog_timer.start()
Wrapper for the low level at commands. This method takes care that the sequence number is increased after each at command and the watchdog timer is started to make sure the drone receives a command at least every second.
entailment
def halt(self): """Shutdown the drone. This method does not land or halt the actual drone, but the communication with the drone. You should call it at the end of your application to close all sockets, pipes, processes and threads related with this object. """ with self.lock: self.com_watchdog_timer.cancel() self.ipc_thread.stop() self.ipc_thread.join() self.network_process.terminate() self.network_process.join()
Shutdown the drone. This method does not land or halt the actual drone, but the communication with the drone. You should call it at the end of your application to close all sockets, pipes, processes and threads related with this object.
entailment
def move(self, lr, fb, vv, va): """Makes the drone move (translate/rotate). Parameters: lr -- left-right tilt: float [-1..1] negative: left, positive: right fb -- front-back tilt: float [-1..1] negative: forwards, positive: backwards vv -- vertical speed: float [-1..1] negative: go down, positive: rise va -- angular speed: float [-1..1] negative: spin left, positive: spin right""" self.at(ardrone.at.pcmd, True, lr, fb, vv, va)
Makes the drone move (translate/rotate). Parameters: lr -- left-right tilt: float [-1..1] negative: left, positive: right fb -- front-back tilt: float [-1..1] negative: forwards, positive: backwards vv -- vertical speed: float [-1..1] negative: go down, positive: rise va -- angular speed: float [-1..1] negative: spin left, positive: spin right
entailment
def ref(host, seq, takeoff, emergency=False): """ Basic behaviour of the drone: take-off/landing, emergency stop/reset) Parameters: seq -- sequence number takeoff -- True: Takeoff / False: Land emergency -- True: Turn off the engines """ p = 0b10001010101000000000000000000 if takeoff: p |= 0b1000000000 if emergency: p |= 0b100000000 at(host, 'REF', seq, [p])
Basic behaviour of the drone: take-off/landing, emergency stop/reset) Parameters: seq -- sequence number takeoff -- True: Takeoff / False: Land emergency -- True: Turn off the engines
entailment
def pcmd(host, seq, progressive, lr, fb, vv, va): """ Makes the drone move (translate/rotate). Parameters: seq -- sequence number progressive -- True: enable progressive commands, False: disable (i.e. enable hovering mode) lr -- left-right tilt: float [-1..1] negative: left, positive: right rb -- front-back tilt: float [-1..1] negative: forwards, positive: backwards vv -- vertical speed: float [-1..1] negative: go down, positive: rise va -- angular speed: float [-1..1] negative: spin left, positive: spin right The above float values are a percentage of the maximum speed. """ p = 1 if progressive else 0 at(host, 'PCMD', seq, [p, float(lr), float(fb), float(vv), float(va)])
Makes the drone move (translate/rotate). Parameters: seq -- sequence number progressive -- True: enable progressive commands, False: disable (i.e. enable hovering mode) lr -- left-right tilt: float [-1..1] negative: left, positive: right rb -- front-back tilt: float [-1..1] negative: forwards, positive: backwards vv -- vertical speed: float [-1..1] negative: go down, positive: rise va -- angular speed: float [-1..1] negative: spin left, positive: spin right The above float values are a percentage of the maximum speed.
entailment
def config(host, seq, option, value): """Set configuration parameters of the drone.""" at(host, 'CONFIG', seq, [str(option), str(value)])
Set configuration parameters of the drone.
entailment
def pwm(host, seq, m1, m2, m3, m4): """ Sends control values directly to the engines, overriding control loops. Parameters: seq -- sequence number m1 -- Integer: front left command m2 -- Integer: front right command m3 -- Integer: back right command m4 -- Integer: back left command """ at(host, 'PWM', seq, [m1, m2, m3, m4])
Sends control values directly to the engines, overriding control loops. Parameters: seq -- sequence number m1 -- Integer: front left command m2 -- Integer: front right command m3 -- Integer: back right command m4 -- Integer: back left command
entailment
def led(host, seq, anim, f, d): """ Control the drones LED. Parameters: seq -- sequence number anim -- Integer: animation to play f -- Float: frequency in HZ of the animation d -- Integer: total duration in seconds of the animation """ at(host, 'LED', seq, [anim, float(f), d])
Control the drones LED. Parameters: seq -- sequence number anim -- Integer: animation to play f -- Float: frequency in HZ of the animation d -- Integer: total duration in seconds of the animation
entailment
def anim(host, seq, anim, d): """ Makes the drone execute a predefined movement (animation). Parameters: seq -- sequcence number anim -- Integer: animation to play d -- Integer: total duration in seconds of the animation """ at(host, 'ANIM', seq, [anim, d])
Makes the drone execute a predefined movement (animation). Parameters: seq -- sequcence number anim -- Integer: animation to play d -- Integer: total duration in seconds of the animation
entailment
def at(host, command, seq, params): """ Parameters: command -- the command seq -- the sequence number params -- a list of elements which can be either int, float or string """ params_str = [] for p in params: if type(p) == int: params_str.append('{:d}'.format(p)) elif type(p) == float: params_str.append('{:d}'.format(f2i(p))) elif type(p) == str: params_str.append('"{:s}"'.format(p)) msg = 'AT*{:s}={:d},{:s}\r'.format(command, seq, ','.join(params_str)) sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) sock.sendto(msg.encode(), (host, ardrone.constant.COMMAND_PORT))
Parameters: command -- the command seq -- the sequence number params -- a list of elements which can be either int, float or string
entailment
def decode(packet): """Decode a navdata packet.""" offset = 0 _ = struct.unpack_from('IIII', packet, offset) s = _[1] state = dict() state['fly'] = s & 1 # FLY MASK : (0) ardrone is landed, (1) ardrone is flying state['video'] = s >> 1 & 1 # VIDEO MASK : (0) video disable, (1) video enable state['vision'] = s >> 2 & 1 # VISION MASK : (0) vision disable, (1) vision enable state['control'] = s >> 3 & 1 # CONTROL ALGO (0) euler angles control, (1) angular speed control state['altitude'] = s >> 4 & 1 # ALTITUDE CONTROL ALGO : (0) altitude control inactive (1) altitude control active state['user_feedback_start'] = s >> 5 & 1 # USER feedback : Start button state state['command'] = s >> 6 & 1 # Control command ACK : (0) None, (1) one received state['fw_file'] = s >> 7 & 1 # Firmware file is good (1) state['fw_ver'] = s >> 8 & 1 # Firmware update is newer (1) state['fw_upd'] = s >> 9 & 1 # Firmware update is ongoing (1) state['navdata_demo'] = s >> 10 & 1 # Navdata demo : (0) All navdata, (1) only navdata demo state['navdata_bootstrap'] = s >> 11 & 1 # Navdata bootstrap : (0) options sent in all or demo mode, (1) no navdata options sent state['motors'] = s >> 12 & 1 # Motor status : (0) Ok, (1) Motors problem state['com_lost'] = s >> 13 & 1 # Communication lost : (1) com problem, (0) Com is ok state['vbat_low'] = s >> 15 & 1 # VBat low : (1) too low, (0) Ok state['user_el'] = s >> 16 & 1 # User Emergency Landing : (1) User EL is ON, (0) User EL is OFF state['timer_elapsed'] = s >> 17 & 1 # Timer elapsed : (1) elapsed, (0) not elapsed state['angles_out_of_range'] = s >> 19 & 1 # Angles : (0) Ok, (1) out of range state['ultrasound'] = s >> 21 & 1 # Ultrasonic sensor : (0) Ok, (1) deaf state['cutout'] = s >> 22 & 1 # Cutout system detection : (0) Not detected, (1) detected state['pic_version'] = s >> 23 & 1 # PIC Version number OK : (0) a bad version number, (1) version number is OK state['atcodec_thread_on'] = s >> 24 & 1 # ATCodec thread ON : (0) thread OFF (1) thread ON state['navdata_thread_on'] = s >> 25 & 1 # Navdata thread ON : (0) thread OFF (1) thread ON state['video_thread_on'] = s >> 26 & 1 # Video thread ON : (0) thread OFF (1) thread ON state['acq_thread_on'] = s >> 27 & 1 # Acquisition thread ON : (0) thread OFF (1) thread ON state['ctrl_watchdog'] = s >> 28 & 1 # CTRL watchdog : (1) delay in control execution (> 5ms), (0) control is well scheduled state['adc_watchdog'] = s >> 29 & 1 # ADC Watchdog : (1) delay in uart2 dsr (> 5ms), (0) uart2 is good state['com_watchdog'] = s >> 30 & 1 # Communication Watchdog : (1) com problem, (0) Com is ok state['emergency'] = s >> 31 & 1 # Emergency landing : (0) no emergency, (1) emergency data = dict() data['state'] = state data['header'] = _[0] data['sequence'] = _[2] data['vision'] = _[3] offset += struct.calcsize('IIII') demo_fields = [ 'ctrl_state', 'battery', 'theta', 'phi', 'psi', 'altitude', 'vx', 'vy', 'vz', 'num_frames' ] angles = ['theta', 'phi', 'psi'] while True: try: id_nr, size = struct.unpack_from('HH', packet, offset) offset += struct.calcsize('HH') except struct.error: break values = [] for i in range(size - struct.calcsize('HH')): values.append(struct.unpack_from('c', packet, offset)[0]) offset += struct.calcsize('c') if id_nr == 0: values = struct.unpack_from('IIfffIfffI', b''.join(values)) demo = dict(zip(demo_fields, values)) for a in angles: demo[a] = int(demo[a] / 1000) data['demo'] = demo return data
Decode a navdata packet.
entailment
def tofile(self, filename, format = 'ascii'): """Save VTK data to file. """ if not common.is_string(filename): raise TypeError('argument filename must be string but got %s'%(type(filename))) if format not in ['ascii','binary']: raise TypeError('argument format must be ascii | binary') filename = filename.strip() if not filename: raise ValueError('filename must be non-empty string') if filename[-4:]!='.vtk': filename += '.vtk' f = open(filename,'wb') f.write(self.to_string(format)) f.close()
Save VTK data to file.
entailment
def _get_attr_value(instance, attr, default=None): """ Simple helper to get the value of an instance's attribute if it exists. If the instance attribute is callable it will be called and the result will be returned. Optionally accepts a default value to return if the attribute is missing. Defaults to `None` >>> class Foo(object): ... bar = 'baz' ... def hi(self): ... return 'hi' >>> f = Foo() >>> _get_attr_value(f, 'bar') 'baz' >>> _get_attr_value(f, 'xyz') >>> _get_attr_value(f, 'xyz', False) False >>> _get_attr_value(f, 'hi') 'hi' """ value = default if hasattr(instance, attr): value = getattr(instance, attr) if callable(value): value = value() return value
Simple helper to get the value of an instance's attribute if it exists. If the instance attribute is callable it will be called and the result will be returned. Optionally accepts a default value to return if the attribute is missing. Defaults to `None` >>> class Foo(object): ... bar = 'baz' ... def hi(self): ... return 'hi' >>> f = Foo() >>> _get_attr_value(f, 'bar') 'baz' >>> _get_attr_value(f, 'xyz') >>> _get_attr_value(f, 'xyz', False) False >>> _get_attr_value(f, 'hi') 'hi'
entailment
def audit_customer_subscription(customer, unknown=True): """ Audits the provided customer's subscription against stripe and returns a pair that contains a boolean and a result type. Default result types can be found in zebra.conf.defaults and can be overridden in your project's settings. """ if (hasattr(customer, 'suspended') and customer.suspended): result = AUDIT_RESULTS['suspended'] else: if hasattr(customer, 'subscription'): try: result = AUDIT_RESULTS[customer.subscription.status] except KeyError, err: # TODO should this be a more specific exception class? raise Exception("Unable to locate a result set for \ subscription status %s in ZEBRA_AUDIT_RESULTS") % str(err) else: result = AUDIT_RESULTS['no_subscription'] return result
Audits the provided customer's subscription against stripe and returns a pair that contains a boolean and a result type. Default result types can be found in zebra.conf.defaults and can be overridden in your project's settings.
entailment
def polydata_fromfile(f, self): """Use VtkData(<filename>).""" points = [] data = dict(vertices=[], lines=[], polygons=[], triangle_strips=[]) l = common._getline(f).decode('ascii') k,n,datatype = [s.strip().lower() for s in l.split(' ')] if k!='points': raise ValueError('expected points but got %s'%(repr(k))) n = int(n) assert datatype in ['bit','unsigned_char','char','unsigned_short','short','unsigned_int','int','unsigned_long','long','float','double'],repr(datatype) log.debug('\tgetting %s points'%n) while len(points) < 3*n: l = common._getline(f).decode('ascii') points += map(eval,l.split(' ')) assert len(points)==3*n while 1: l = common._getline(f) if l is None: break l = l.decode('ascii') sl = l.split(' ') k = sl[0].strip().lower() if k not in ['vertices','lines','polygons','triangle_strips']: break assert len(sl)==3 n = int(sl[1]) size = int(sl[2]) lst = [] while len(lst) < size: l = common._getline(f).decode('ascii') lst += map(eval, l.split(' ')) assert len(lst)==size lst2 = [] j = 0 for i in range(n): lst2.append(lst[j+1:j+lst[j]+1]) j += lst[j]+1 data[k] = lst2 return PolyData(points,data['vertices'], data['lines'], data['polygons'], data['triangle_strips']), l.encode()
Use VtkData(<filename>).
entailment
def webhooks(request): """ Handles all known webhooks from stripe, and calls signals. Plug in as you need. """ if request.method != "POST": return HttpResponse("Invalid Request.", status=400) json = simplejson.loads(request.POST["json"]) if json["event"] == "recurring_payment_failed": zebra_webhook_recurring_payment_failed.send(sender=None, customer=_try_to_get_customer_from_customer_id(json["customer"]), full_json=json) elif json["event"] == "invoice_ready": zebra_webhook_invoice_ready.send(sender=None, customer=_try_to_get_customer_from_customer_id(json["customer"]), full_json=json) elif json["event"] == "recurring_payment_succeeded": zebra_webhook_recurring_payment_succeeded.send(sender=None, customer=_try_to_get_customer_from_customer_id(json["customer"]), full_json=json) elif json["event"] == "subscription_trial_ending": zebra_webhook_subscription_trial_ending.send(sender=None, customer=_try_to_get_customer_from_customer_id(json["customer"]), full_json=json) elif json["event"] == "subscription_final_payment_attempt_failed": zebra_webhook_subscription_final_payment_attempt_failed.send(sender=None, customer=_try_to_get_customer_from_customer_id(json["customer"]), full_json=json) elif json["event"] == "ping": zebra_webhook_subscription_ping_sent.send(sender=None) else: return HttpResponse(status=400) return HttpResponse(status=200)
Handles all known webhooks from stripe, and calls signals. Plug in as you need.
entailment
def webhooks_v2(request): """ Handles all known webhooks from stripe, and calls signals. Plug in as you need. """ if request.method != "POST": return HttpResponse("Invalid Request.", status=400) try: event_json = simplejson.loads(request.body) except AttributeError: # Backwords compatibility # Prior to Django 1.4, request.body was named request.raw_post_data event_json = simplejson.loads(request.raw_post_data) event_key = event_json['type'].replace('.', '_') if event_key in WEBHOOK_MAP: WEBHOOK_MAP[event_key].send(sender=None, full_json=event_json) return HttpResponse(status=200)
Handles all known webhooks from stripe, and calls signals. Plug in as you need.
entailment
def is_number(obj): """Check if obj is number.""" return isinstance(obj, (int, float, np.int_, np.float_))
Check if obj is number.
entailment
def get_seq(self,obj,default=None): """Return sequence.""" if is_sequence(obj): return obj if is_number(obj): return [obj] if obj is None and default is not None: log.warning('using default value (%s)'%(default)) return self.get_seq(default) raise ValueError('expected sequence|number but got %s'%(type(obj)))
Return sequence.
entailment
def get_seq_seq(self,obj,default=None): """Return sequence of sequences.""" if is_sequence2(obj): return [self.get_seq(o,default) for o in obj] else: return [self.get_seq(obj,default)]
Return sequence of sequences.
entailment
def get_3_tuple(self,obj,default=None): """Return 3-tuple from number -> (obj,default[1],default[2]) 0-sequence|None -> default 1-sequence -> (obj[0],default[1],default[2]) 2-sequence -> (obj[0],obj[1],default[2]) (3 or more)-sequence -> (obj[0],obj[1],obj[2]) """ if not (default is not None \ and type(default) is tuple \ and len(default)==3): raise ValueError('argument default must be 3-tuple|None but got %s'%(default)) if is_sequence(obj): n = len(obj) if n>3: log.warning('expected 3-sequence but got %s-%s'%(n,type(obj))) if n>=3: return tuple(obj) log.warning('filling with default value (%s) to obtain size=3'%(default[0])) if default is not None: if n==0: return default elif n==1: return (obj[0],default[1],default[2]) elif n==2: return (obj[0],obj[1],default[2]) elif is_number(obj) and default is not None: log.warning('filling with default value (%s) to obtain size=3'%(default[0])) return (obj,default[1],default[2]) elif obj is None and default is not None: log.warning('filling with default value (%s) to obtain size=3'%(default[0])) return default raise ValueError('failed to construct 3-tuple from %s-%s'%(n,type(obj)))
Return 3-tuple from number -> (obj,default[1],default[2]) 0-sequence|None -> default 1-sequence -> (obj[0],default[1],default[2]) 2-sequence -> (obj[0],obj[1],default[2]) (3 or more)-sequence -> (obj[0],obj[1],obj[2])
entailment