text
stringlengths
78
104k
score
float64
0
0.18
def get_etree_layout_as_dict(layout_tree): """ Convert something that looks like this: <layout> <item> <name>color</name> <value>red</value> </item> <item> <name>shapefile</name> <value>blah.shp</value> </item> </layout> Into something that looks like this: { 'color' : ['red'], 'shapefile' : ['blah.shp'] } """ layout_dict = dict() for item in layout_tree.findall('item'): name = item.find('name').text val_element = item.find('value') value = val_element.text.strip() if value == '': children = val_element.getchildren() value = etree.tostring(children[0], pretty_print=True, encoding="unicode") layout_dict[name] = value return layout_dict
0.003538
def post(method, hmc, uri, uri_parms, body, logon_required, wait_for_completion): """Operation: Add Permission to User Role.""" assert wait_for_completion is True # synchronous operation user_role_oid = uri_parms[0] user_role_uri = '/api/user-roles/' + user_role_oid try: user_role = hmc.lookup_by_uri(user_role_uri) except KeyError: raise InvalidResourceError(method, uri) check_required_fields(method, uri, body, ['permitted-object', 'permitted-object-type']) # Reject if User Role is system-defined: if user_role.properties['type'] == 'system-defined': raise BadRequestError( method, uri, reason=314, message="Cannot add permission to " "system-defined user role: {}".format(user_role_uri)) # Apply defaults, so our internally stored copy has all fields: permission = copy.deepcopy(body) if 'include-members' not in permission: permission['include-members'] = False if 'view-only-mode' not in permission: permission['view-only-mode'] = True # Add the permission to its store (the faked User Role object): if user_role.properties.get('permissions', None) is None: user_role.properties['permissions'] = [] user_role.properties['permissions'].append(permission)
0.002088
def assertJsonContains(jsonStr=None, key=None, message=None): """ Assert that jsonStr contains key. :param jsonStr: Json as string :param key: Key to look for :param message: Failure message :raises: TestStepFail if key is not in jsonStr or if loading jsonStr to a dictionary fails or if jsonStr is None. """ if jsonStr is not None: try: data = json.loads(jsonStr) if key not in data: raise TestStepFail( format_message(message) if message is not None else "Assert: " "Key : %s is not " "in : %s" % (str(key), str(jsonStr))) except (TypeError, ValueError) as e: raise TestStepFail( format_message(message) if message is not None else "Unable to parse json "+str(e)) else: raise TestStepFail( format_message(message) if message is not None else "Json string is empty")
0.006029
def invalidate(self, cls, id_field, id_val): """ Invalidate the cache for a given Mongo object by deleting the cached data and the cache flag. """ cache_key, flag_key = self.get_keys(cls, id_field, id_val) pipeline = self.redis.pipeline() pipeline.delete(cache_key) pipeline.delete(flag_key) pipeline.execute()
0.005222
def _fill_array_from_list(the_list, the_array): """Fill an `array` from a `list`""" for i, val in enumerate(the_list): the_array[i] = val return the_array
0.010526
def get_options(self): """ A hook to override the flattened list of all options used to generate option names and defaults. """ return reduce( list.__add__, [list(option_list) for option_list in self.get_option_lists()], [])
0.009709
def wait_for_text(self, text, selector="html", by=By.CSS_SELECTOR, timeout=settings.LARGE_TIMEOUT): """ The shorter version of wait_for_text_visible() """ if self.timeout_multiplier and timeout == settings.LARGE_TIMEOUT: timeout = self.__get_new_timeout(timeout) return self.wait_for_text_visible( text, selector, by=by, timeout=timeout)
0.007371
def return_reply(*types, **options): """Decorator for returning replies from request handler methods. The method being decorated should return an iterable of result values. If the first value is 'ok', the decorator will check the remaining values against the specified list of types (if any). If the first value is 'fail' or 'error', there must be only one remaining parameter, and it must be a string describing the failure or error In both cases, the decorator will pack the values into a reply message. Parameters ---------- types : list of kattypes The types of the reply message parameters (in order). Keyword Arguments ----------------- major : int, optional Major version of KATCP to use when interpreting types. Defaults to latest implemented KATCP version. Examples -------- >>> class MyDevice(DeviceServer): ... @request(Int()) ... @return_reply(Int(), Float()) ... def request_myreq(self, req, my_int): ... return ("ok", my_int + 1, my_int * 2.0) ... """ major = options.pop('major', DEFAULT_KATCP_MAJOR) if len(options) > 0: raise TypeError('return_reply does not take keyword argument(s) %r.' % options.keys()) # Check that only the last type has multiple=True if len(types) > 1: for type_ in types[:-1]: if type_._multiple: raise TypeError('Only the last parameter type ' 'can accept multiple arguments.') def decorator(handler): if not handler.__name__.startswith("request_"): raise ValueError("This decorator can only be used on a katcp" " request handler (method name should start" " with 'request_').") msgname = convert_method_name('request_', handler.__name__) @wraps(handler) def raw_handler(self, *args): reply_args = handler(self, *args) if gen.is_future(reply_args): return async_make_reply(msgname, types, reply_args, major) else: return make_reply(msgname, types, reply_args, major) # TODO NM 2017-01-12 Consider using the decorator module to create # signature preserving decorators that would avoid the need for this # trickery if not getattr(handler, "_request_decorated", False): # We are on the inside. # We must preserve the original function parameter names for the # request decorator raw_handler._orig_argnames = inspect.getargspec(handler)[0] return raw_handler return decorator
0.000729
def get(cls, community_id, record_uuid): """Get an inclusion request.""" return cls.query.filter_by( id_record=record_uuid, id_community=community_id ).one_or_none()
0.00995
def command(self, name=None): """A decorator to add subcommands. """ def decorator(f): self.add_command(f, name) return f return decorator
0.010309
def process_data_events(self, to_tuple=False, auto_decode=True): """Consume inbound messages. :param bool to_tuple: Should incoming messages be converted to a tuple before delivery. :param bool auto_decode: Auto-decode strings when possible. :raises AMQPChannelError: Raises if the channel encountered an error. :raises AMQPConnectionError: Raises if the connection encountered an error. :return: """ if not self._consumer_callbacks: raise AMQPChannelError('no consumer callback defined') for message in self.build_inbound_messages(break_on_empty=True, auto_decode=auto_decode): consumer_tag = message._method.get('consumer_tag') if to_tuple: # noinspection PyCallingNonCallable self._consumer_callbacks[consumer_tag](*message.to_tuple()) continue # noinspection PyCallingNonCallable self._consumer_callbacks[consumer_tag](message)
0.001767
def dumps(self): """Return a dictionnary of current tables""" return {table_name: getattr(self, table_name).dumps() for table_name in self.TABLES}
0.018519
def merge(iterables, key=None, reverse=False): '''Merge multiple sorted inputs into a single sorted output. Similar to sorted(itertools.chain(*iterables)) but returns a generator, does not pull the data into memory all at once, and assumes that each of the input streams is already sorted (smallest to largest). >>> list(merge([1,3,5,7], [0,2,4,8], [5,10,15,20], [], [25])) [0, 1, 2, 3, 4, 5, 5, 7, 8, 10, 15, 20, 25] If *key* is not None, applies a key function to each element to determine its sort order. >>> list(merge(['dog', 'horse'], ['cat', 'fish', 'kangaroo'], key=len)) ['dog', 'cat', 'fish', 'horse', 'kangaroo'] ''' h = [] h_append = h.append if reverse: _heapify = _heapify_max _heappop = _heappop_max _heapreplace = _heapreplace_max direction = -1 else: _heapify = heapify _heappop = heappop _heapreplace = heapreplace direction = 1 if key is None: for order, it in enumerate(map(iter, iterables)): try: h_append([next(it), order * direction, it]) except StopIteration: pass _heapify(h) while len(h) > 1: try: while True: value, order, it = s = h[0] yield value s[0] = next(it) # raises StopIteration when exhausted _heapreplace(h, s) # restore heap condition except StopIteration: _heappop(h) # remove empty iterator if h: # fast case when only a single iterator remains value, order, it = h[0] yield value for value in it: yield value return for order, it in enumerate(map(iter, iterables)): try: value = next(it) h_append([key(value), order * direction, value, it]) except StopIteration: pass _heapify(h) while len(h) > 1: try: while True: key_value, order, value, it = s = h[0] yield value value = next(it) s[0] = key(value) s[2] = value _heapreplace(h, s) except StopIteration: _heappop(h) if h: key_value, order, value, it = h[0] yield value for value in it: yield value
0.000802
def set_euk_hmm(self, args): 'Set the hmm used by graftM to cross check for euks.' if hasattr(args, 'euk_hmm_file'): pass elif not hasattr(args, 'euk_hmm_file'): # set to path based on the location of bin/graftM, which has # a more stable relative path to the HMM when installed through # pip. setattr(args, 'euk_hmm_file', os.path.join(os.path.dirname(inspect.stack()[-1][1]),'..','share', '18S.hmm')) else: raise Exception('Programming Error: setting the euk HMM')
0.008772
def sub_hmm(self, states): r""" Returns HMM on a subset of states Returns the HMM restricted to the selected subset of states. Will raise exception if the hidden transition matrix cannot be normalized on this subset """ # restrict initial distribution pi_sub = self._Pi[states] pi_sub /= pi_sub.sum() # restrict transition matrix P_sub = self._Tij[states, :][:, states] # checks if this selection is possible assert np.all(P_sub.sum(axis=1) > 0), \ 'Illegal sub_hmm request: transition matrix cannot be normalized on ' + str(states) P_sub /= P_sub.sum(axis=1)[:, None] # restrict output model out_sub = self.output_model.sub_output_model(states) return HMM(pi_sub, P_sub, out_sub, lag=self.lag)
0.004813
def create_disk(name, size): ''' Create a VMM disk with the specified `name` and `size`. size: Size in megabytes, or use a specifier such as M, G, T. CLI Example: .. code-block:: bash salt '*' vmctl.create_disk /path/to/disk.img size=10G ''' ret = False cmd = 'vmctl create {0} -s {1}'.format(name, size) result = __salt__['cmd.run_all'](cmd, output_loglevel='trace', python_shell=False) if result['retcode'] == 0: ret = True else: raise CommandExecutionError( 'Problem encountered creating disk image', info={'errors': [result['stderr']], 'changes': ret} ) return ret
0.001314
def download_file(file_id, file_name): '''Download a file from UPLOAD_FOLDER''' extracted_out_dir = os.path.join(app.config['UPLOAD_FOLDER'], file_id) return send_file(os.path.join(extracted_out_dir, file_name))
0.004484
def is_callable(self): """The fake can be called. This is useful for when you stub out a function as opposed to a class. For example:: >>> import fudge >>> remove = Fake('os.remove').is_callable() >>> remove('some/path') """ self._callable = Call(self, call_name=self._name, callable=True) return self
0.005141
def _exclude_ipv4_networks(self, networks, networks_to_exclude): """ Exclude the list of networks from another list of networks and return a flat list of new networks. :param networks: List of IPv4 networks to exclude from :param networks_to_exclude: List of IPv4 networks to exclude :returns: Flat list of IPv4 networks """ for network_to_exclude in networks_to_exclude: def _exclude_ipv4_network(network): """ Exclude a single network from another single network and return a list of networks. Network to exclude comes from the outer scope. :param network: Network to exclude from :returns: Flat list of IPv4 networks after exclusion. If exclude fails because networks do not overlap, a single element list with the orignal network is returned. If it overlaps, even partially, the network is excluded. """ try: return list(network.address_exclude(network_to_exclude)) except ValueError: # If networks overlap partially, `address_exclude` # will fail, but the network still must not be used # in generation. if network.overlaps(network_to_exclude): return [] else: return [network] networks = list(map(_exclude_ipv4_network, networks)) # flatten list of lists networks = [ item for nested in networks for item in nested ] return networks
0.001107
def config_managed(name, value, force_password=False): ''' Manage a LXD Server config setting. name : The name of the config key. value : Its value. force_password : False Set this to True if you want to set the password on every run. As we can't retrieve the password from LXD we can't check if the current one is the same as the given one. ''' ret = { 'name': name, 'value': value if name != 'core.trust_password' else True, 'force_password': force_password } try: current_value = __salt__['lxd.config_get'](name) except CommandExecutionError as e: return _error(ret, six.text_type(e)) if (name == _password_config_key and (not force_password or not current_value)): msg = ( ('"{0}" is already set ' '(we don\'t known if the password is correct)').format(name) ) return _success(ret, msg) elif six.text_type(value) == current_value: msg = ('"{0}" is already set to "{1}"'.format(name, value)) return _success(ret, msg) if __opts__['test']: if name == _password_config_key: msg = 'Would set the LXD password' ret['changes'] = {'password': msg} return _unchanged(ret, msg) else: msg = 'Would set the "{0}" to "{1}"'.format(name, value) ret['changes'] = {name: msg} return _unchanged(ret, msg) result_msg = '' try: result_msg = __salt__['lxd.config_set'](name, value)[0] if name == _password_config_key: ret['changes'] = { name: 'Changed the password' } else: ret['changes'] = { name: 'Changed from "{0}" to {1}"'.format( current_value, value ) } except CommandExecutionError as e: return _error(ret, six.text_type(e)) return _success(ret, result_msg)
0.000495
def eval_hook(self, name: str, ctx: list) -> Node: """Evaluate the hook by its name""" if name not in self.__class__._hooks: # TODO: don't always throw error, could have return True by default self.diagnostic.notify( error.Severity.ERROR, "Unknown hook : %s" % name, error.LocationInfo.from_stream(self._stream, is_error=True) ) raise self.diagnostic self._lastRule = '#' + name res = self.__class__._hooks[name](self, *ctx) if type(res) is not bool: raise TypeError("Your hook %r didn't return a bool value" % name) return res
0.002928
def convert_namespaces_ast( ast, api_url: str = None, namespace_targets: Mapping[str, List[str]] = None, canonicalize: bool = False, decanonicalize: bool = False, ): """Recursively convert namespaces of BEL Entities in BEL AST using API endpoint Canonicalization and decanonicalization is determined by endpoint used and namespace_targets. Args: ast (BEL): BEL AST api_url (str): endpoint url with a placeholder for the term_id (either /terms/<term_id>/canonicalized or /terms/<term_id>/decanonicalized) namespace_targets (Mapping[str, List[str]]): (de)canonical targets for converting BEL Entities Returns: BEL: BEL AST """ if isinstance(ast, NSArg): given_term_id = "{}:{}".format(ast.namespace, ast.value) # Get normalized term if necessary if (canonicalize and not ast.canonical) or ( decanonicalize and not ast.decanonical ): normalized_term = convert_nsarg( given_term_id, api_url=api_url, namespace_targets=namespace_targets, canonicalize=canonicalize, decanonicalize=decanonicalize, ) if canonicalize: ast.canonical = normalized_term elif decanonicalize: ast.decanonical = normalized_term # Update normalized term if canonicalize: ns, value = ast.canonical.split(":") ast.change_nsvalue(ns, value) elif decanonicalize: ns, value = ast.canonical.split(":") ast.change_nsvalue(ns, value) # Recursively process every NSArg by processing BELAst and Functions if hasattr(ast, "args"): for arg in ast.args: convert_namespaces_ast( arg, api_url=api_url, namespace_targets=namespace_targets, canonicalize=canonicalize, decanonicalize=decanonicalize, ) return ast
0.002444
def update(check, enter_parameters, version): """ Update package with latest template. Must be inside of the project folder to run. Using "-e" will prompt for re-entering the template parameters again even if the project is up to date. Use "-v" to update to a particular version of a template. Using "-c" will perform a check that the project is up to date with the latest version of the template (or the version specified by "-v"). No updating will happen when using this option. """ if check: if temple.update.up_to_date(version=version): print('Temple package is up to date') else: msg = ( 'This temple package is out of date with the latest template.' ' Update your package by running "temple update" and commiting changes.' ) raise temple.exceptions.NotUpToDateWithTemplateError(msg) else: temple.update.update(new_version=version, enter_parameters=enter_parameters)
0.002924
def alloc_seg(self, net_id): """Allocates the segmentation ID. """ segmentation_id = self.service_segs.allocate_segmentation_id( net_id, source=fw_const.FW_CONST) return segmentation_id
0.00905
def handle_read_value(self, buff, start, end): ''' handle read of the value based on the expected length :param buff: :param start: :param end: ''' segmenttype = self._state[1].value.segmenttype value = None eventtype = None ftype = self._state[0] # parsing value if segmenttype <= SegmentType.VARIABLE_LENGTH_VALUE: self._scstate = self.next_state_afterraw() value = self.parse_value(self._state[0], buff, start, end) eventtype = EventType.VALUE # next we should expect length elif segmenttype >= SegmentType.EXT_FORMAT: value = self.parse_ext_value(self._state[0], self._state[4], buff, start, end) eventtype = EventType.EXT ftype = ExtType(self._state[0], self._state[4]) else: raise InvalidStateException(self._scstate, "header") self.events.append((self.value_event_type(eventtype), ftype, value))
0.005831
def reset(self, indices=None): """Reset the batch of environments. Args: indices: The batch indices of the environments to reset; defaults to all. Returns: Batch tensor of the new observations. """ if indices is None: indices = tf.range(len(self._batch_env)) observ_dtype = self._parse_dtype(self._batch_env.observation_space) observ = tf.py_func( self._batch_env.reset, [indices], observ_dtype, name='reset') observ = tf.check_numerics(observ, 'observ') reward = tf.zeros_like(indices, tf.float32) done = tf.zeros_like(indices, tf.bool) with tf.control_dependencies([ tf.scatter_update(self._observ, indices, observ), tf.scatter_update(self._reward, indices, reward), tf.scatter_update(self._done, indices, done)]): return tf.identity(observ)
0.004734
def context_from_module(module): """ Given a module, create a context from all of the top level annotated symbols in that module. """ con = find_all(module) if hasattr(module, "__doc__"): setattr(con, "__doc__", module.__doc__) name = module.__name__ if hasattr(module, "_name_"): name = module._name_ # pylint: disable=W0212 con = annotated(con, name) setattr(con, 'context', True) return name, con
0.002151
def _Open(self, path_spec, mode='rb'): """Opens the file system object defined by path specification. Args: path_spec (PathSpec): path specification. mode (Optional[str]): file access mode. The default is 'rb' which represents read-only binary. Raises: AccessError: if the access to open the file was denied. IOError: if the file system object could not be opened. PathSpecError: if the path specification is incorrect. ValueError: if the path specification is invalid. """ if not path_spec.HasParent(): raise errors.PathSpecError( 'Unsupported path specification without parent.') file_object = resolver.Resolver.OpenFileObject( path_spec.parent, resolver_context=self._resolver_context) try: vshadow_volume = pyvshadow.volume() vshadow_volume.open_file_object(file_object) except: file_object.close() raise self._file_object = file_object self._vshadow_volume = vshadow_volume
0.006883
def reload(cls, args): """Reload NApps code.""" LOG.info('Reloading NApps...') mgr = NAppsManager() try: if args['all']: mgr.reload(None) else: napps = args['<napp>'] mgr.reload(napps) LOG.info('\tReloaded.') except requests.HTTPError as exception: if exception.response.status_code != 200: msg = json.loads(exception.response.content) LOG.error('\tServer error: %s - ', msg['error'])
0.00361
async def remember_ticket(self, request, ticket): """Called to store the ticket data for a request. Ticket data is stored in the aiohttp_session object Args: request: aiohttp Request object. ticket: String like object representing the ticket to be stored. """ session = await get_session(request) session[self.cookie_name] = ticket
0.004938
def _parse_pairwise_input(indices1, indices2, MDlogger, fname=''): r"""For input of pairwise type (distances, inverse distances, contacts) checks the type of input the user gave and reformats it so that :py:func:`DistanceFeature`, :py:func:`InverseDistanceFeature`, and ContactFeature can work. In case the input isn't already a list of distances, this function will: - sort the indices1 array - check for duplicates within the indices1 array - sort the indices2 array - check for duplicates within the indices2 array - check for duplicates between the indices1 and indices2 array - if indices2 is None, produce a list of pairs of indices in indices1, or - if indices2 is not None, produce a list of pairs of (i,j) where i comes from indices1, and j from indices2 """ if is_iterable_of_int(indices1): MDlogger.warning('The 1D arrays input for %s have been sorted, and ' 'index duplicates have been eliminated.\n' 'Check the output of describe() to see the actual order of the features' % fname) # Eliminate duplicates and sort indices1 = np.unique(indices1) # Intra-group distances if indices2 is None: atom_pairs = combinations(indices1, 2) # Inter-group distances elif is_iterable_of_int(indices2): # Eliminate duplicates and sort indices2 = np.unique(indices2) # Eliminate duplicates between indices1 and indices1 uniqs = np.in1d(indices2, indices1, invert=True) indices2 = indices2[uniqs] atom_pairs = product(indices1, indices2) else: atom_pairs = indices1 return atom_pairs
0.00384
def _chip_erase_program(self, progress_cb=_stub_progress): """! @brief Program by first performing an erase all.""" LOG.debug("%i of %i pages have erased data", len(self.page_list) - self.chip_erase_count, len(self.page_list)) progress_cb(0.0) progress = 0 self.flash.init(self.flash.Operation.ERASE) self.flash.erase_all() self.flash.uninit() progress += self.flash.get_flash_info().erase_weight progress_cb(float(progress) / float(self.chip_erase_weight)) self.flash.init(self.flash.Operation.PROGRAM) for page in self.page_list: if not page.erased: self.flash.program_page(page.addr, page.data) progress += page.get_program_weight() progress_cb(float(progress) / float(self.chip_erase_weight)) self.flash.uninit() progress_cb(1.0) return FlashBuilder.FLASH_CHIP_ERASE
0.005203
def main(depth_file, json_dict, cutoff, sample_id): """ Function that handles the inputs required to parse depth files from bowtie and dumps a dict to a json file that can be imported into pATLAS. Parameters ---------- depth_file: str the path to depth file for each sample json_dict: str the file that contains the dictionary with keys and values for accessions and their respective lengths cutoff: str the cutoff used to trim the unwanted matches for the minimum coverage results from mapping. This value may range between 0 and 1. sample_id: str the id of the sample being parsed """ # check for the appropriate value for the cutoff value for coverage results logger.debug("Cutoff value: {}. Type: {}".format(cutoff, type(cutoff))) try: cutoff_val = float(cutoff) if cutoff_val < 0.4: logger.warning("This cutoff value will generate a high volume of " "plot data. Therefore '.report.json' can be too big") except ValueError: logger.error("Cutoff value should be a string such as: '0.6'. " "The outputted value: {}. Make sure to provide an " "appropriate value for --cov_cutoff".format(cutoff)) sys.exit(1) # loads dict from file, this file is provided in docker image plasmid_length = json.load(open(json_dict)) if plasmid_length: logger.info("Loaded dictionary of plasmid lengths") else: logger.error("Something went wrong and plasmid lengths dictionary" "could not be loaded. Check if process received this" "param successfully.") sys.exit(1) # read depth file depth_file_in = open(depth_file) # first reads the depth file and generates dictionaries to handle the input # to a simpler format logger.info("Reading depth file and creating dictionary to dump.") depth_dic_coverage = depth_file_reader(depth_file_in) percentage_bases_covered, dict_cov = generate_jsons(depth_dic_coverage, plasmid_length, cutoff_val) if percentage_bases_covered and dict_cov: logger.info("percentage_bases_covered length: {}".format( str(len(percentage_bases_covered)))) logger.info("dict_cov length: {}".format(str(len(dict_cov)))) else: logger.error("Both dicts that dump to JSON file or .report.json are " "empty.") # then dump do file logger.info("Dumping to {}".format("{}_mapping.json".format(depth_file))) with open("{}_mapping.json".format(depth_file), "w") as output_json: output_json.write(json.dumps(percentage_bases_covered)) json_dic = { "tableRow": [{ "sample": sample_id, "data": [{ "header": "Mapping", "table": "plasmids", "patlas_mapping": percentage_bases_covered, "value": len(percentage_bases_covered) }] }], "sample": sample_id, "patlas_mapping": percentage_bases_covered, "plotData": [{ "sample": sample_id, "data": { "patlasMappingSliding": dict_cov }, }] } logger.debug("Size of dict_cov: {} kb".format(asizeof(json_dic)/1024)) logger.info("Writing to .report.json") with open(".report.json", "w") as json_report: json_report.write(json.dumps(json_dic, separators=(",", ":")))
0.000547
def extract_left_hand_side(target): """Extract the left hand side variable from a target. Removes list indexes, stars and other left hand side elements. """ left_hand_side = _get_names(target, '') left_hand_side.replace('*', '') if '[' in left_hand_side: index = left_hand_side.index('[') left_hand_side = target[:index] return left_hand_side
0.002571
def new_bundle(self, name: str, created_at: dt.datetime=None) -> models.Bundle: """Create a new file bundle.""" new_bundle = self.Bundle(name=name, created_at=created_at) return new_bundle
0.018868
def read(self, timeout=None): ''' Read from the transport. If no data is available, should return None. The timeout is ignored as this returns only data that has already been buffered locally. ''' # NOTE: copying over this comment from Connection, because there is # knowledge captured here, even if the details are stale # Because of the timer callback to dataRead when we re-buffered, # there's a chance that in between we've lost the socket. If that's # the case, just silently return as some code elsewhere would have # already notified us. That bug could be fixed by improving the # message reading so that we consume all possible messages and ensure # that only a partial message was rebuffered, so that we can rely on # the next read event to read the subsequent message. if not hasattr(self, '_sock'): return None # This is sort of a hack because we're faking that data is ready, but # it works for purposes of supporting timeouts if timeout: if self._heartbeat_timeout: self._heartbeat_timeout.delete() self._heartbeat_timeout = \ event.timeout(timeout, self._sock_read_cb, self._sock) elif self._heartbeat_timeout: self._heartbeat_timeout.delete() self._heartbeat_timeout = None return self._sock.read()
0.001363
def import_legislators(src): """ Read the legislators from the csv files into a single Dataframe. Intended for importing new data. """ logger.info("Importing Legislators From: {0}".format(src)) current = pd.read_csv("{0}/{1}/legislators-current.csv".format( src, LEGISLATOR_DIR)) historic = pd.read_csv("{0}/{1}/legislators-historic.csv".format( src, LEGISLATOR_DIR)) legislators = current.append(historic) return legislators
0.002092
def series_with_slh(self, other): """Series product with another :class:`SLH` object Args: other (SLH): An upstream SLH circuit. Returns: SLH: The combined system. """ new_S = self.S * other.S new_L = self.S * other.L + self.L def ImAdjoint(m): return (m.H - m) * (I / 2) delta = ImAdjoint(self.L.adjoint() * self.S * other.L) if isinstance(delta, Matrix): new_H = self.H + other.H + delta[0, 0] else: assert delta == 0 new_H = self.H + other.H return SLH(new_S, new_L, new_H)
0.003115
def conduit_lengths(target, throat_endpoints='throat.endpoints', throat_length='throat.length'): r""" Calculate conduit lengths. A conduit is defined as half pore + throat + half pore. Parameters ---------- target : OpenPNM Object The object which this model is associated with. This controls the length of the calculated array, and also provides access to other necessary properties. throat_endpoints : string Dictionary key of the throat endpoint values. throat_diameter : string Dictionary key of the throat length values. throat_length : string (optional) Dictionary key of the throat length values. If not given then the direct distance bewteen the two throat end points is used. Returns ------- Dictionary containing conduit lengths, which can be accessed via the dict keys 'pore1', 'pore2', and 'throat'. """ _np.warnings.filterwarnings('ignore', category=RuntimeWarning) network = target.project.network throats = network.map_throats(throats=target.Ts, origin=target) cn = network['throat.conns'][throats] # Get pore coordinates C1 = network['pore.coords'][cn[:, 0]] C2 = network['pore.coords'][cn[:, 1]] # Get throat endpoints and length EP1 = network[throat_endpoints + '.head'][throats] EP2 = network[throat_endpoints + '.tail'][throats] try: # Look up throat length if given Lt = network[throat_length][throats] except KeyError: # Calculate throat length otherwise Lt = _sqrt(((EP1 - EP2)**2).sum(axis=1)) # Calculate conduit lengths L1 = _sqrt(((C1 - EP1)**2).sum(axis=1)) L2 = _sqrt(((C2 - EP2)**2).sum(axis=1)) _np.warnings.filterwarnings('default', category=RuntimeWarning) return {'pore1': L1, 'throat': Lt, 'pore2': L2}
0.000532
def handle_message_registered(self, msg_data, host): """Processes messages that have been delivered by a registered client. Args: msg (string): The raw packet data delivered from the listener. This data will be unserialized and then processed based on the packet's method. host (tuple): The (address, host) tuple of the source message. Returns: A response that will be sent back to the client via the listener. """ response = None if msg_data["method"] == "EVENT": logger.debug("<%s> <euuid:%s> Event message " "received" % (msg_data["cuuid"], msg_data["euuid"])) response = self.event(msg_data["cuuid"], host, msg_data["euuid"], msg_data["event_data"], msg_data["timestamp"], msg_data["priority"]) elif msg_data["method"] == "OK EVENT": logger.debug("<%s> <euuid:%s> Event confirmation message " "received" % (msg_data["cuuid"], msg_data["euuid"])) try: del self.event_uuids[msg_data["euuid"]] except KeyError: logger.warning("<%s> <euuid:%s> Euuid does not exist in event " "buffer. Key was removed before we could process " "it." % (msg_data["cuuid"], msg_data["euuid"])) elif msg_data["method"] == "OK NOTIFY": logger.debug("<%s> <euuid:%s> Ok notify " "received" % (msg_data["cuuid"], msg_data["euuid"])) try: del self.event_uuids[msg_data["euuid"]] except KeyError: logger.warning("<%s> <euuid:%s> Euuid does not exist in event " "buffer. Key was removed before we could process " "it." % (msg_data["cuuid"], msg_data["euuid"])) return response
0.002366
def get_atoms(self, inc_alt_states=False): """Returns all atoms in the `Monomer`. Parameters ---------- inc_alt_states : bool, optional If `True`, will return `Atoms` for alternate states. """ if inc_alt_states: return itertools.chain(*[x[1].values() for x in sorted(list(self.states.items()))]) return self.atoms.values()
0.007444
def double(window, config): """Double theme ================== = Header = ================== = items = ================== = footer = ================== """ cordx = round(config.get('cordx', 0)) color = config.get('color', red) icon = config.get('icon', '=') width = config.get('width', window.width) title = config.get('header', 'Menu'.center(width - 2)) back = config.get('footer', 'Pagina: {page:03d}/{last:03d}') align = config.get('align', 'left') term = window.term line = color(icon * width) l_eq = term.move_x(cordx) + color(icon) l_eq += term.move_x(cordx + width - 1) + \ color(icon) + term.move_x(cordx + 1) wrapper = f'{line}\n{l_eq}{{}}\n{line}' header = wrapper.format(title) footer = wrapper.format(back) if align == 'right': for_s = functools.partial(term.rjust, width=width - 4) # * elif align == 'center': for_s = functools.partial( term.center, width=width - 4) # -4 width "= text =" elif align == 'left': for_s = functools.partial(term.ljust, width=width - 4) # * else: raise ValueError("Only align center, left, right") return { 'header': header, 'footer': footer, 'formater': lambda text, **_: f'{l_eq} {for_s(text)}', 'selector': lambda text, **_: f'{l_eq} {underline_ns(for_s(text))}', }
0.000693
def ensure_dir_exists(f, fullpath=False): """ Ensure the existence of the (parent) directory of f """ if fullpath is False: # Get parent directory d = os.path.dirname(f) else: # Create the full path d = f if not os.path.exists(d): os.makedirs(d)
0.003236
def cli(env, abuse, address1, address2, city, company, country, firstname, lastname, postal, public, state): """Edit the RWhois data on the account.""" mgr = SoftLayer.NetworkManager(env.client) update = { 'abuse_email': abuse, 'address1': address1, 'address2': address2, 'company_name': company, 'city': city, 'country': country, 'first_name': firstname, 'last_name': lastname, 'postal_code': postal, 'state': state, 'private_residence': public, } if public is True: update['private_residence'] = False elif public is False: update['private_residence'] = True check = [x for x in update.values() if x is not None] if not check: raise exceptions.CLIAbort( "You must specify at least one field to update.") mgr.edit_rwhois(**update)
0.001107
def rectwidth(self): """Calculate :ref:`pysynphot-formula-rectw`. Returns ------- ans : float Bandpass rectangular width. """ mywaveunits = self.waveunits.name self.convert('angstroms') wave = self.wave thru = self.throughput self.convert(mywaveunits) num = self.trapezoidIntegration(wave, thru) den = thru.max() if 0.0 in (num, den): return 0.0 else: return num/den
0.003854
def check(self, message, ecc, k=None): '''Check if there's any error in a message+ecc. Can be used before decoding, in addition to hashes to detect if the message was tampered, or after decoding to check that the message was fully recovered.''' if not k: k = self.k message, _ = self.pad(message, k=k) ecc, _ = self.rpad(ecc, k=k) if self.algo == 1 or self.algo == 2: return self.ecc_manager.check_fast(message + ecc, k=k) elif self.algo == 3 or self.algo == 4: return reedsolo.rs_check(bytearray(message + ecc), self.n-k, fcr=self.fcr, generator=self.gen_nb)
0.007924
def getFormTemplate(self): """Returns the current samplinground rendered with the template specified in the request (param 'template'). Moves the iterator to the next samplinground available. """ templates_dir = self._TEMPLATES_DIR embedt = self.request.get('template', self._DEFAULT_TEMPLATE) if embedt.find(':') >= 0: prefix, embedt = embedt.split(':') templates_dir = queryResourceDirectory(self._TEMPLATES_ADDON_DIR, prefix).directory embed = ViewPageTemplateFile(os.path.join(templates_dir, embedt)) reptemplate = "" try: reptemplate = embed(self) except: tbex = traceback.format_exc() wsid = self._samplingrounds[self._current_sr_index].id reptemplate = "<div class='error-print'>%s - %s '%s':<pre>%s</pre></div>" % (wsid, _("Unable to load the template"), embedt, tbex) if self._current_sr_index < len(self._samplingrounds): self._current_sr_index += 1 return reptemplate
0.004655
def sepconv_relu_sepconv(inputs, filter_size, output_size, first_kernel_size=(1, 1), second_kernel_size=(1, 1), padding="LEFT", nonpadding_mask=None, dropout=0.0, name=None): """Hidden layer with RELU activation followed by linear projection.""" with tf.variable_scope(name, "sepconv_relu_sepconv", [inputs]): inputs = maybe_zero_out_padding(inputs, first_kernel_size, nonpadding_mask) if inputs.get_shape().ndims == 3: is_3d = True inputs = tf.expand_dims(inputs, 2) else: is_3d = False h = separable_conv( inputs, filter_size, first_kernel_size, activation=tf.nn.relu, padding=padding, name="conv1") if dropout != 0.0: h = tf.nn.dropout(h, 1.0 - dropout) h = maybe_zero_out_padding(h, second_kernel_size, nonpadding_mask) ret = separable_conv( h, output_size, second_kernel_size, padding=padding, name="conv2") if is_3d: ret = tf.squeeze(ret, 2) return ret
0.006757
def from_start_and_end(cls, start, end, sequence, phos_3_prime=False): """Creates a DNA duplex from a start and end point. Parameters ---------- start: [float, float, float] Start of the build axis. end: [float, float, float] End of build axis. sequence: str Nucleotide sequence. phos_3_prime: bool, optional If false the 5' and the 3' phosphor will be omitted.""" strand1 = NucleicAcidStrand.from_start_and_end( start, end, sequence, phos_3_prime=phos_3_prime) duplex = cls(strand1) return duplex
0.00295
def stderr_output(cmd): """Wraps the execution of check_output in a way that ignores stderr when not in debug mode""" handle, gpg_stderr = stderr_handle() try: output = subprocess.check_output(cmd, stderr=gpg_stderr) # nosec if handle: handle.close() return str(polite_string(output)) except subprocess.CalledProcessError as exception: LOGGER.debug("GPG Command %s", ' '.join(exception.cmd)) LOGGER.debug("GPG Output %s", exception.output) raise CryptoritoError('GPG Execution')
0.001783
def _do_main(self, commands): """ :type commands: list of VSCtlCommand """ self._reset() self._init_schema_helper() self._run_prerequisites(commands) idl_ = idl.Idl(self.remote, self.schema_helper) seqno = idl_.change_seqno while True: self._idl_wait(idl_, seqno) seqno = idl_.change_seqno if self._do_vsctl(idl_, commands): break if self.txn: self.txn.abort() self.txn = None # TODO:XXX # ovsdb_symbol_table_destroy(symtab) idl_.close()
0.00312
def __parse_json_data(self, data): """Process Json data :@param data :@type data: json/dict :throws TypeError """ if isinstance(data, dict) or isinstance(data, list): self._raw_data = data self._json_data = copy.deepcopy(self._raw_data) else: raise TypeError("Provided Data is not json")
0.005249
def get_next(self): """Return the next set of objects in a list""" url = self._get_link('next') resource = self.object_type.get_resource_class(self.client) resp = resource.perform_api_call(resource.REST_READ, url) return List(resp, self.object_type, self.client)
0.006623
def set_or_edit_conditional_breakpoint(self): """Set conditional breakpoint""" if self.data: editor = self.get_current_editor() editor.debugger.toogle_breakpoint(edit_condition=True)
0.00885
def wait_fds(fd_events, inmask=1, outmask=2, timeout=None): """wait for the first of a number of file descriptors to have activity .. note:: this method can block it will return once there is relevant activity on the file descriptors, or the timeout expires :param fd_events: two-tuples, each one a file descriptor and a mask made up of the inmask and/or the outmask bitwise-ORd together :type fd_events: list :param inmask: the mask to use for readable events (default 1) :type inmask: int :param outmask: the mask to use for writable events (default 2) :type outmask: int :param timeout: the maximum time to wait before raising an exception (default None) :type timeout: int, float or None :returns: a list of two-tuples, each is a file descriptor and an event mask (made up of inmask and/or outmask bitwise-ORd together) representing readable and writable events """ current = compat.getcurrent() activated = {} poll_regs = {} callback_refs = {} def activate(fd, event): if not activated and timeout != 0: # this is the first invocation of `activated` for a blocking # `wait_fds` call, so re-schedule the blocked coroutine scheduler.schedule(current) # if there was a timeout then also have to pull # the coroutine from the timed_paused structure if timeout: scheduler._remove_timer(waketime, current) # in any case, set the event information activated.setdefault(fd, 0) activated[fd] |= event for fd, events in fd_events: readable = None writable = None if events & inmask: readable = functools.partial(activate, fd, inmask) if events & outmask: writable = functools.partial(activate, fd, outmask) callback_refs[fd] = (readable, writable) poll_regs[fd] = scheduler._register_fd(fd, readable, writable) if timeout: # real timeout value, schedule ourself `timeout` seconds in the future waketime = time.time() + timeout scheduler.pause_until(waketime) elif timeout == 0: # timeout == 0, only pause for 1 loop iteration scheduler.pause() else: # timeout is None, it's up to _hit_poller->activate to bring us back scheduler.state.mainloop.switch() for fd, reg in poll_regs.iteritems(): readable, writable = callback_refs[fd] scheduler._unregister_fd(fd, readable, writable, reg) if scheduler.state.interrupted: raise IOError(errno.EINTR, "interrupted system call") return activated.items()
0.000366
def associate_flavor(self, flavor, body): """Associate a Neutron service flavor with a profile.""" return self.post(self.flavor_profile_bindings_path % (flavor), body=body)
0.00939
def data_read_write(data_path_in, data_path_out, format_type, **kwargs): """ General function to read, format, and write data. Parameters ---------- data_path_in : str Path to the file that will be read data_path_out : str Path of the file that will be output format_type : str Either 'dense', 'grid', 'columnar', or 'transect' kwargs Specific keyword args for given data types. See Notes Notes ----- 'Dense Parameters' non_label_cols : str Comma separated list of non label columns. ex. "lat, long, tree" sep : str The delimiter for the dense data. Default, "," na_values : int, float, str Value to be labeled as NA. Default, "" See misc.format_dense() for additional keyword parameters """ if format_type == "dense": # Set dense defaults kwargs = _set_dense_defaults_and_eval(kwargs) # Try to parse non label columns appropriately try: nlc = [nm.strip() for nm in kwargs['non_label_cols'].split(",")] kwargs.pop('non_label_cols', None) except KeyError: raise KeyError("'non_label_cols' is a required keyword dense data") # Read data with dense specific keywords arch_data = pd.read_csv(data_path_in, sep=kwargs['delimiter'], na_values=kwargs['na_values']) form_data = format_dense(arch_data, nlc, **kwargs) elif format_type == "grid": pass elif format_type == "stacked": pass elif format_type == "transect": pass else: raise NameError("%s is not a supported data format" % format_type) form_data.to_csv(data_path_out, index=False)
0.000571
def from_properties(cls, angle, axis, invert): """Initialize a rotation based on the properties""" norm = np.linalg.norm(axis) if norm > 0: x = axis[0] / norm y = axis[1] / norm z = axis[2] / norm c = np.cos(angle) s = np.sin(angle) r = (1-2*invert) * np.array([ [x*x*(1-c)+c , x*y*(1-c)-z*s, x*z*(1-c)+y*s], [x*y*(1-c)+z*s, y*y*(1-c)+c , y*z*(1-c)-x*s], [x*z*(1-c)-y*s, y*z*(1-c)+x*s, z*z*(1-c)+c ] ]) else: r = np.identity(3) * (1-2*invert) return cls(r)
0.007776
def frequency(self): """0 means unknown""" assert self.parsed_frames, "no frame parsed yet" f_index = self._fixed_header_key[4] try: return _FREQS[f_index] except IndexError: return 0
0.008032
def left_brake(self): """allows left motor to coast to a stop""" self.board.digital_write(L_CTRL_1, 1) self.board.digital_write(L_CTRL_2, 1) self.board.analog_write(PWM_L, 0)
0.009709
def get_view_root(view_name: str) -> XmlNode: '''Parses xml file and return root XmlNode''' try: path = join(deps.views_folder, '{0}.{1}'.format(view_name, deps.view_ext)) parser = Parser() if path not in _XML_CACHE: with open(path, 'rb') as xml_file: _XML_CACHE[path] = parser.parse(xml_file, view_name) return _XML_CACHE[path] except FileNotFoundError as error: error = ViewError('View is not found') error.add_info('View name', view_name) error.add_info('Path', path) raise error except CoreError as error: error.add_view_info(ViewInfo(view_name, None)) raise except: info = exc_info() error = ViewError('Unknown error occured during parsing xml', ViewInfo(view_name, None)) error.add_cause(info[1]) raise error from info[1]
0.00451
def adapt(self, d, x): """ Adapt weights according one desired value and its input. **Args:** * `d` : desired value (float) * `x` : input array (1-dimensional array) """ # create input matrix and target vector self.x_mem[:,1:] = self.x_mem[:,:-1] self.x_mem[:,0] = x self.d_mem[1:] = self.d_mem[:-1] self.d_mem[0] = d # estimate output and error self.y_mem = np.dot(self.x_mem.T, self.w) self.e_mem = self.d_mem - self.y_mem # update dw_part1 = np.dot(self.x_mem.T, self.x_mem) + self.ide_eps dw_part2 = np.linalg.solve(dw_part1, self.ide) dw = np.dot(self.x_mem, np.dot(dw_part2, self.e_mem)) self.w += self.mu * dw
0.006485
def element_for_value(cls, attrname, value): """Serialize the given value into an XML `Element` with the given tag name, returning it. The value argument may be: * a `Resource` instance * a `Money` instance * a `datetime.datetime` instance * a string, integer, or boolean value * ``None`` * a list or tuple of these values """ if isinstance(value, Resource): if attrname in cls._classes_for_nodename: # override the child's node name with this attribute name return value.to_element(attrname) return value.to_element() el = ElementTreeBuilder.Element(attrname) if value is None: el.attrib['nil'] = 'nil' elif isinstance(value, bool): el.attrib['type'] = 'boolean' el.text = 'true' if value else 'false' elif isinstance(value, int): el.attrib['type'] = 'integer' el.text = str(value) elif isinstance(value, datetime): el.attrib['type'] = 'datetime' el.text = value.strftime('%Y-%m-%dT%H:%M:%SZ') elif isinstance(value, list) or isinstance(value, tuple): for sub_resource in value: if hasattr(sub_resource, 'to_element'): el.append(sub_resource.to_element()) else: el.append(cls.element_for_value(re.sub(r"s$", "", attrname), sub_resource)) elif isinstance(value, Money): value.add_to_element(el) else: el.text = six.text_type(value) return el
0.003028
def plot2d(self, c_poly='default', alpha=1, cmap='default', ret=False, title=' ', colorbar=False, cbar_label=''): """ Generates a 2D plot for the z=0 Surface projection. :param c_poly: Polygons color. :type c_poly: matplotlib color :param alpha: Opacity. :type alpha: float :param cmap: colormap :type cmap: matplotlib.cm :param ret: If True, returns the figure. It can be used to add more elements to the plot or to modify it. :type ret: bool :param title: Figure title. :type title: str :param colorbar: If True, inserts a colorbar in the figure. :type colorbar: bool :param cbar_label: Colorbar right label. :type cbar_label: str :returns: None, axes :rtype: None, matplotlib axes """ import matplotlib.pyplot as plt import matplotlib.patches as patches import matplotlib.cm as cm paths = [polygon.get_path() for polygon in self] domain = self.get_domain()[:, :2] # Color if type(c_poly) == str: # Unicolor if c_poly is 'default': c_poly = 'b' color_vector = c_poly*len(paths) colorbar = False else: # Colormap if cmap is 'default': cmap = cm.YlOrRd import matplotlib.colors as mcolors normalize = mcolors.Normalize(vmin=c_poly.min(), vmax=c_poly.max()) color_vector = cmap(normalize(c_poly)) # Plot fig = plt.figure(title) ax = fig.add_subplot(111) for p, c in zip(paths, color_vector): ax.add_patch(patches.PathPatch(p, facecolor=c, lw=1, edgecolor='k', alpha=alpha)) ax.set_xlim(domain[0,0],domain[1,0]) ax.set_ylim(domain[0,1], domain[1,1]) # Colorbar if colorbar: scalarmappaple = cm.ScalarMappable(norm=normalize, cmap=cmap) scalarmappaple.set_array(c_poly) cbar = plt.colorbar(scalarmappaple, shrink=0.8, aspect=10) cbar.ax.set_ylabel(cbar_label, rotation=0) if ret: return ax
0.007353
def _as_json_dumps(self, indent: str=' ', **kwargs) -> str: """ Convert to a stringified json object. This is the same as _as_json with the exception that it isn't a property, meaning that we can actually pass arguments... :param indent: indent argument to dumps :param kwargs: other arguments for dumps :return: JSON formatted string """ return json.dumps(self, default=self._default, indent=indent, **kwargs)
0.008386
def add_fields(self, *args): """ This method only works for extensible fields. It allows to add values without precising their fields' names or indexes. Parameters ---------- args: field values """ if not self.is_extensible(): raise TypeError("Can't use add_fields on a non extensible record.") # prepare update data self_len = len(self) data = dict([(self_len + i, args[i]) for i in range(len(args))]) # update self.update(data)
0.005474
def variantSetsGenerator(self, request): """ Returns a generator over the (variantSet, nextPageToken) pairs defined by the specified request. """ dataset = self.getDataRepository().getDataset(request.dataset_id) return self._topLevelObjectGenerator( request, dataset.getNumVariantSets(), dataset.getVariantSetByIndex)
0.005141
def sectionOutZip(self,zipcontainer,zipdir='',figtype='png'): """Prepares section for zip output """ from io import StringIO, BytesIO text = self.p if not self.settings['doubleslashnewline'] else self.p.replace('//','\n') zipcontainer.writestr( zipdir+'section.txt', '# {}\n{}'.format(self.title,text).encode() ) c = count(1) for ftitle,f in self.figs.items(): figfile = zipdir+'fig{}_{}.{}'.format(next(c),ftitle.replace(' ','_'),figtype) b = BytesIO() f.savefig(b,format=figtype,transparent=True) b.seek(0) zipcontainer.writestr(figfile,b.getvalue()) c = count(1) for ttitle,t in self.tabs.items(): b = StringIO() t.to_csv(b,sep=csvsep,decimal=csvdec) b.seek(0) zipcontainer.writestr( zipdir+'table{}_{}.csv'.format(next(c),ttitle.replace(' ','_')), b.read().encode() ) c = count(1) for s in self.subs: s.sectionOutZip(zipcontainer,'{}s{}_{}/'.format(zipdir,next(c),s.title.replace(' ','_')),figtype=figtype)
0.023451
def get_route_templates(self): """ Generate Openshift route templates or playbook tasks. Each port on a service definition found in container.yml represents an externally exposed port. """ def _get_published_ports(service_config): result = [] for port in service_config.get('ports', []): protocol = 'TCP' if isinstance(port, string_types) and '/' in port: port, protocol = port.split('/') if isinstance(port, string_types) and ':' in port: host, container = port.split(':') else: host = port result.append({'port': host, 'protocol': protocol.lower()}) return result templates = [] for name, service_config in self._services.items(): state = service_config.get(self.CONFIG_KEY, {}).get('state', 'present') force = service_config.get(self.CONFIG_KEY, {}).get('force', False) published_ports = _get_published_ports(service_config) if state != 'present': continue for port in published_ports: route_name = "%s-%s" % (name, port['port']) labels = dict( app=self._namespace_name, service=name ) template = CommentedMap() template['apiVersion'] = self.DEFAULT_API_VERSION template['kind'] = 'Route' template['force'] = force template['metadata'] = CommentedMap([ ('name', route_name), ('namespace', self._namespace_name), ('labels', labels.copy()) ]) template['spec'] = CommentedMap([ ('to', CommentedMap([ ('kind', 'Service'), ('name', name) ])), ('port', CommentedMap([ ('targetPort', 'port-{}-{}'.format(port['port'], port['protocol'])) ])) ]) if service_config.get(self.CONFIG_KEY, {}).get('routes'): for route in service_config[self.CONFIG_KEY]['routes']: if str(route.get('port')) == str(port['port']): for key, value in route.items(): if key not in ('force', 'port'): self.copy_attribute(template['spec'], key, value) templates.append(template) return templates
0.002236
def compile(definition, handlers={}): """ Generates validation function for validating JSON schema passed in ``definition``. Example: .. code-block:: python import fastjsonschema validate = fastjsonschema.compile({'type': 'string'}) validate('hello') This implementation support keyword ``default``: .. code-block:: python validate = fastjsonschema.compile({ 'type': 'object', 'properties': { 'a': {'type': 'number', 'default': 42}, }, }) data = validate({}) assert data == {'a': 42} Supported implementations are draft-04, draft-06 and draft-07. Which version should be used is determined by `$draft` in your ``definition``. When not specified, the latest implementation is used (draft-07). .. code-block:: python validate = fastjsonschema.compile({ '$schema': 'http://json-schema.org/draft-04/schema', 'type': 'number', }) You can pass mapping from URI to function that should be used to retrieve remote schemes used in your ``definition`` in parameter ``handlers``. Exception :any:`JsonSchemaDefinitionException` is raised when generating the code fails (bad definition). Exception :any:`JsonSchemaException` is raised from generated funtion when validation fails (data do not follow the definition). """ resolver, code_generator = _factory(definition, handlers) global_state = code_generator.global_state # Do not pass local state so it can recursively call itself. exec(code_generator.func_code, global_state) return global_state[resolver.get_scope_name()]
0.002339
def PUT(self, rest_path_list, **kwargs): """Send a PUT request with optional streaming multipart encoding. See requests.sessions.request for optional parameters. See post() for parameters. :returns: Response object """ fields = kwargs.pop("fields", None) if fields is not None: return self._send_mmp_stream("PUT", rest_path_list, fields, **kwargs) else: return self._request("PUT", rest_path_list, **kwargs)
0.00818
def flip(args): """ %prog flip fastafile Go through each FASTA record, check against Genbank file and determines whether or not to flip the sequence. This is useful before updates of the sequences to make sure the same orientation is used. """ p = OptionParser(flip.__doc__) opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) fastafile, = args outfastafile = fastafile.rsplit(".", 1)[0] + ".flipped.fasta" fo = open(outfastafile, "w") f = Fasta(fastafile, lazy=True) for name, rec in f.iteritems_ordered(): tmpfasta = "a.fasta" fw = open(tmpfasta, "w") SeqIO.write([rec], fw, "fasta") fw.close() o = overlap([tmpfasta, name]) if o.orientation == '-': rec.seq = rec.seq.reverse_complement() SeqIO.write([rec], fo, "fasta") os.remove(tmpfasta)
0.001093
def rotate_sites(self, indices=None, theta=0, axis=None, anchor=None, to_unit_cell=True): """ Rotate specific sites by some angle around vector at anchor. Args: indices (list): List of site indices on which to perform the translation. theta (float): Angle in radians axis (3x1 array): Rotation axis vector. anchor (3x1 array): Point of rotation. to_unit_cell (bool): Whether new sites are transformed to unit cell """ from numpy.linalg import norm from numpy import cross, eye from scipy.linalg import expm if indices is None: indices = range(len(self)) if axis is None: axis = [0, 0, 1] if anchor is None: anchor = [0, 0, 0] anchor = np.array(anchor) axis = np.array(axis) theta %= 2 * np.pi rm = expm(cross(eye(3), axis / norm(axis)) * theta) for i in indices: site = self._sites[i] coords = ((np.dot(rm, np.array(site.coords - anchor).T)).T + anchor).ravel() new_site = PeriodicSite( site.species, coords, self._lattice, to_unit_cell=to_unit_cell, coords_are_cartesian=True, properties=site.properties) self._sites[i] = new_site
0.002849
def post(self, endpoint='', url='', data=None, use_api_key=False, omit_api_version=False): """Perform a post to an API endpoint. :param string endpoint: Target endpoint. (Optional). :param string url: Override the endpoint and provide the full url (eg for pagination). (Optional). :param dict data: Data to pass to the post. (Optional). :return: Response. :rtype: ``Response`` """ return self._request('post', endpoint, url, data=data, use_api_key=use_api_key, omit_api_version=omit_api_version)
0.008929
def calculate_splus_scross(nmax, mc, dl, F, e, t, l0, gamma, gammadot, inc): """ Calculate splus and scross summed over all harmonics. This waveform differs slightly from that in Taylor et al (2015) in that it includes the time dependence of the advance of periastron. :param nmax: Total number of harmonics to use :param mc: Chirp mass of binary [Solar Mass] :param dl: Luminosity distance [Mpc] :param F: Orbital frequency of binary [Hz] :param e: Orbital Eccentricity :param t: TOAs [s] :param l0: Initial eccentric anomoly [rad] :param gamma: Angle of periastron advance [rad] :param gammadot: Time derivative of angle of periastron advance [rad/s] :param inc: Inclination angle [rad] """ n = np.arange(1, nmax) # time dependent amplitudes an = get_an(n, mc, dl, F, e) bn = get_bn(n, mc, dl, F, e) cn = get_cn(n, mc, dl, F, e) # time dependent terms omega = 2*np.pi*F gt = gamma + gammadot * t lt = l0 + omega * t # tiled phase phase1 = n * np.tile(lt, (nmax-1,1)).T phase2 = np.tile(gt, (nmax-1,1)).T phasep = phase1 + 2*phase2 phasem = phase1 - 2*phase2 # intermediate terms sp = np.sin(phasem)/(n*omega-2*gammadot) + \ np.sin(phasep)/(n*omega+2*gammadot) sm = np.sin(phasem)/(n*omega-2*gammadot) - \ np.sin(phasep)/(n*omega+2*gammadot) cp = np.cos(phasem)/(n*omega-2*gammadot) + \ np.cos(phasep)/(n*omega+2*gammadot) cm = np.cos(phasem)/(n*omega-2*gammadot) - \ np.cos(phasep)/(n*omega+2*gammadot) splus_n = -0.5 * (1+np.cos(inc)**2) * (an*sp - bn*sm) + \ (1-np.cos(inc)**2)*cn * np.sin(phase1) scross_n = np.cos(inc) * (an*cm - bn*cp) return np.sum(splus_n, axis=1), np.sum(scross_n, axis=1)
0.009234
def write_configs(self): """Generate the configurations needed for pipes.""" utils.banner("Generating Configs") if not self.runway_dir: app_configs = configs.process_git_configs(git_short=self.git_short) else: app_configs = configs.process_runway_configs(runway_dir=self.runway_dir) self.configs = configs.write_variables( app_configs=app_configs, out_file=self.raw_path, git_short=self.git_short)
0.008439
def add_event(self, source, reference, event_title, event_type, method='', description='', bucket_list=[], campaign='', confidence='', date=None): """ Adds an event. If the event name already exists, it will return that event instead. Args: source: Source of the information reference: A reference where more information can be found event_title: The title of the event event_type: The type of event. See your CRITs vocabulary. method: The method for obtaining the event. description: A text description of the event. bucket_list: A list of bucket list items to add campaign: An associated campaign confidence: The campaign confidence date: A datetime.datetime object of when the event occurred. Returns: A JSON event object or None if there was an error. """ # Check to see if the event already exists events = self.get_events(event_title) if events is not None: if events['meta']['total_count'] == 1: return events['objects'][0] if events['meta']['total_count'] > 1: log.error('Multiple events found while trying to add the event' ': {}'.format(event_title)) return None # Now we can create the event data = { 'api_key': self.api_key, 'username': self.username, 'source': source, 'reference': reference, 'method': method, 'campaign': campaign, 'confidence': confidence, 'description': description, 'event_type': event_type, 'date': date, 'title': event_title, 'bucket_list': ','.join(bucket_list), } r = requests.post('{}/events/'.format(self.url), data=data, verify=self.verify, proxies=self.proxies) if r.status_code == 200: log.debug('Event created: {}'.format(event_title)) json_obj = json.loads(r.text) if 'id' not in json_obj: log.error('Error adding event. id not returned.') return None return json_obj else: log.error('Event creation failed with status code: ' '{}'.format(r.status_code)) return None
0.004547
def walk_perimeter(self, startx, starty): """ Starting at a point on the perimeter of a region, 'walk' the perimeter to return to the starting point. Record the path taken. Parameters ---------- startx, starty : int The starting location. Assumed to be on the perimeter of a region. Returns ------- perimeter : list A list of pixel coordinates [ [x1,y1], ...] that constitute the perimeter of the region. """ # checks startx = max(startx, 0) startx = min(startx, self.xsize) starty = max(starty, 0) starty = min(starty, self.ysize) points = [] x, y = startx, starty while True: self.step(x, y) if 0 <= x <= self.xsize and 0 <= y <= self.ysize: points.append((x, y)) if self.next == self.UP: y -= 1 elif self.next == self.LEFT: x -= 1 elif self.next == self.DOWN: y += 1 elif self.next == self.RIGHT: x += 1 # stop if we meet some kind of error elif self.next == self.NOWHERE: break # stop when we return to the starting location if x == startx and y == starty: break return points
0.002872
def _explain(self, tree): """ Set up the engine to do a dry run of a query """ self._explaining = True self._call_list = [] old_call = self.connection.call def fake_call(command, **kwargs): """ Replacement for connection.call that logs args """ if command == "describe_table": return old_call(command, **kwargs) self._call_list.append((command, kwargs)) raise ExplainSignal self.connection.call = fake_call try: ret = self._run(tree[1]) try: list(ret) except TypeError: pass finally: self.connection.call = old_call self._explaining = False
0.002625
def get_details_from_inst_literal(self, institute_literal, institution_id, institution_instance_id, paper_key): """ This method parses the institute literal to get the following 1. Department naame 2. Country 3. University name 4. ZIP, STATE AND CITY (Only if the country is USA. For other countries the standard may vary. So parsing these values becomes very difficult. However, the complete address can be found in the column "AddressLine1" Parameters ---------- institute_literal -> The literal value of the institute institution_id -> the Primary key value which is to be added in the fixture institution_instance_id -> Primary key value which is to be added in the fixture paper_key -> The Paper key which is used for the Institution Instance Returns ------- """ institute_details = institute_literal.split(',') institute_name = institute_details[0] country = institute_details[len(institute_details)-1].lstrip().replace('.', '') institute_row = None zipcode = "" state = "" city = "" if 'USA' in country: temp = country if(len(temp.split())) == 3: country = temp.split()[2] zipcode = temp.split()[1] state = temp.split()[0] elif(len(temp.split())) == 2: country = temp.split()[1] state = temp.split()[0] city = institute_details[len(institute_details)-2].lstrip() addressline1 = "" for i in range(1, len(institute_details)-1, 1): if i != len(institute_details)-2: addressline1 = addressline1 + institute_details[i]+',' else: addressline1 = addressline1 + institute_details[i] if institute_literal not in self.instituteIdMap: self.instituteIdMap[institute_literal] = institution_id institute_row = { "model": "django-tethne.institution", "pk": institution_id, "fields": { "institute_name": institute_name, "addressLine1": addressline1, "country": country, "zip": zipcode, "state": state, "city": city } } department = "" if re.search('Dept([^,]*),', institute_literal) is not None: department = re.search('Dept([^,]*),', institute_literal).group().replace(',', '') institute_instance_row = { "model": "django-tethne.institution_instance", "pk": institution_instance_id, "fields": { "institution": self.instituteIdMap[institute_literal], "literal": institute_literal, "institute_name": institute_name, "addressLine1": addressline1, "country": country, "paper": self.paperIdMap[paper_key], "department": department, "zip": zipcode, "state": state, "city": city } } return institute_row, institute_instance_row
0.002985
def get_sources(self, kind='all'): """ Extract the sources contained in the source models by optionally filtering and splitting them, depending on the passed parameter. """ assert kind in ('all', 'indep', 'mutex'), kind sources = [] for sm in self.source_models: for src_group in sm.src_groups: if kind in ('all', src_group.src_interdep): for src in src_group: if sm.samples > 1: src.samples = sm.samples sources.append(src) return sources
0.00319
def _parse_s3_config(config_file_name, config_format='boto', profile=None): """ Parses a config file for s3 credentials. Can currently parse boto, s3cmd.conf and AWS SDK config formats :param config_file_name: path to the config file :type config_file_name: str :param config_format: config type. One of "boto", "s3cmd" or "aws". Defaults to "boto" :type config_format: str :param profile: profile name in AWS type config file :type profile: str """ config = configparser.ConfigParser() if config.read(config_file_name): # pragma: no cover sections = config.sections() else: raise AirflowException("Couldn't read {0}".format(config_file_name)) # Setting option names depending on file format if config_format is None: config_format = 'boto' conf_format = config_format.lower() if conf_format == 'boto': # pragma: no cover if profile is not None and 'profile ' + profile in sections: cred_section = 'profile ' + profile else: cred_section = 'Credentials' elif conf_format == 'aws' and profile is not None: cred_section = profile else: cred_section = 'default' # Option names if conf_format in ('boto', 'aws'): # pragma: no cover key_id_option = 'aws_access_key_id' secret_key_option = 'aws_secret_access_key' # security_token_option = 'aws_security_token' else: key_id_option = 'access_key' secret_key_option = 'secret_key' # Actual Parsing if cred_section not in sections: raise AirflowException("This config file format is not recognized") else: try: access_key = config.get(cred_section, key_id_option) secret_key = config.get(cred_section, secret_key_option) except Exception: logging.warning("Option Error in parsing s3 config file") raise return access_key, secret_key
0.000504
def create(point_list=None, dimensions=None, axis=0, sel_axis=None): """ Creates a kd-tree from a list of points All points in the list must be of the same dimensionality. If no point_list is given, an empty tree is created. The number of dimensions has to be given instead. If both a point_list and dimensions are given, the numbers must agree. Axis is the axis on which the root-node should split. sel_axis(axis) is used when creating subnodes of a node. It receives the axis of the parent node and returns the axis of the child node. """ if not point_list and not dimensions: raise ValueError('either point_list or dimensions must be provided') elif point_list: dimensions = check_dimensionality(point_list, dimensions) # by default cycle through the axis sel_axis = sel_axis or (lambda prev_axis: (prev_axis+1) % dimensions) if not point_list: return KDNode(sel_axis=sel_axis, axis=axis, dimensions=dimensions) # Sort point list and choose median as pivot element point_list = list(point_list) point_list.sort(key=lambda point: point[axis]) median = len(point_list) // 2 loc = point_list[median] left = create(point_list[:median], dimensions, sel_axis(axis)) right = create(point_list[median + 1:], dimensions, sel_axis(axis)) return KDNode(loc, left, right, axis=axis, sel_axis=sel_axis, dimensions=dimensions)
0.002784
def validate_generations(self): ''' Make sure that the descendent depth is valid. ''' nodes = self.arc_root_node.get_descendants() for node in nodes: logger.debug("Checking parent for node of type %s" % node.arc_element_type) parent = ArcElementNode.objects.get(pk=node.pk).get_parent(update=True) if 'mile' in node.arc_element_type and parent.get_depth() > 1: logger.debug("Milestone node... with leaf parent") raise ArcGenerationError(_("Milestones cannot be descendants of anything besides the root!")) if (parent.get_depth() > 1 and parent.arc_element_type not in ARC_NODE_ELEMENT_DEFINITIONS[node.arc_element_type]['allowed_parents']): raise ArcGenerationError(_("Node %s cannot be a descendant of node %s" % (node, parent))) return None
0.008869
def orchestrate(mods, saltenv='base', test=None, exclude=None, pillar=None, pillarenv=None): ''' .. versionadded:: 2016.11.0 Execute the orchestrate runner from a masterless minion. .. seealso:: More Orchestrate documentation * :ref:`Full Orchestrate Tutorial <orchestrate-runner>` * :py:mod:`Docs for the ``salt`` state module <salt.states.saltmod>` CLI Examples: .. code-block:: bash salt-call --local state.orchestrate webserver salt-call --local state.orchestrate webserver saltenv=dev test=True salt-call --local state.orchestrate webserver saltenv=dev pillarenv=aws ''' return _orchestrate(mods=mods, saltenv=saltenv, test=test, exclude=exclude, pillar=pillar, pillarenv=pillarenv)
0.001036
def CreateSourceType(cls, type_indicator, attributes): """Creates a source type. Args: type_indicator (str): source type indicator. attributes (dict[str, object]): source type attributes. Returns: SourceType: a source type. Raises: FormatError: if the type indicator is not set or unsupported, or if required attributes are missing. """ if type_indicator not in cls._source_type_classes: raise errors.FormatError( 'Unsupported type indicator: {0:s}.'.format(type_indicator)) return cls._source_type_classes[type_indicator](**attributes)
0.003236
def _build_dictionary(self, results): """ Build model dictionary keyed by the relation's foreign key. :param results: The results :type results: Collection :rtype: dict """ foreign = self._first_key dictionary = {} for result in results: key = getattr(result, foreign) if key not in dictionary: dictionary[key] = [] dictionary[key].append(result) return dictionary
0.003984
def _delete(self, tx_id): """Delete a transaction. Read documentation about CRAB model in https://blog.bigchaindb.com/crab-create-retrieve-append-burn-b9f6d111f460. :param tx_id: transaction id :return: """ txs = self.driver.instance.transactions.get(asset_id=self.get_asset_id(tx_id)) unspent = txs[-1] output_index = 0 output = unspent['outputs'][output_index] transfer_input = { 'fulfillment': output['condition']['details'], 'fulfills': { 'output_index': output_index, 'transaction_id': unspent['id'] }, 'owners_before': output['public_keys'] } prepared_transfer_tx = self.driver.instance.transactions.prepare( operation='TRANSFER', asset=unspent['asset'] if 'id' in unspent['asset'] else {'id': unspent['id']}, inputs=transfer_input, recipients=self.BURN_ADDRESS, metadata={ 'namespace': 'burned', } ) signed_tx = self.driver.instance.transactions.fulfill( prepared_transfer_tx, private_keys=self.user.private_key, ) self.driver.instance.transactions.send_commit(signed_tx)
0.003864
def group(self): """Yield a group from the iterable""" yield self.current # start enumerate at 1 because we already yielded the last saved item for num, item in enumerate(self.iterator, 1): self.current = item if num == self.limit: break yield item else: self.on_going = False
0.005263
def upd_textures(self, *args): """Create one :class:`SwatchButton` for each texture""" if self.canvas is None: Clock.schedule_once(self.upd_textures, 0) return for name in list(self.swatches.keys()): if name not in self.atlas.textures: self.remove_widget(self.swatches[name]) del self.swatches[name] for (name, tex) in self.atlas.textures.items(): if name in self.swatches and self.swatches[name] != tex: self.remove_widget(self.swatches[name]) if name not in self.swatches or self.swatches[name] != tex: self.swatches[name] = SwatchButton( name=name, tex=tex, size_hint=(None, None), size=self.swatch_size ) self.add_widget(self.swatches[name])
0.002191
def created(self): 'return datetime.datetime' return dateutil.parser.parse(str(self.f.latestRevision.created))
0.015873
def components_to_df(components, id_func=None): """ Convert components to a join table with columns id1, id2 Args: components: A collection of components, each of which is a set of vertex ids. If a dictionary, then the key is the id for the component. Otherwise, the component id is determined by applying id_func to the component. id_func: If components is a dictionary, this should be None. Otherwise, this is a callable that, given a set of vertices, deermines the id. If components is not a dict and id_func is None, it defaults to `min`. Returns: A dataframe representing the one-to-many relationship between component names (id1) and their members (id2). """ deduped = np.empty((0, 2), dtype=int) if id_func is None: if isinstance(components, dict): raise ValueError("If components is a dict, id_func should be None.") else: id_func = min for c in components: if id_func is None: id1 = c c = components[c] else: id1 = id_func(c) deduped = np.append(deduped, [[id1, id2] for id2 in c], axis=0) deduped = pd.DataFrame(deduped, columns=['id1', 'id2']) return deduped
0.004655
def _update_sig(self, m, key): """ Sign 'm' with the PrivKey 'key' and update our own 'sig_val'. Note that, even when 'sig_alg' is not None, we use the signature scheme of the PrivKey (neither do we care to compare the both of them). """ if self.sig_alg is None: if self.tls_session.tls_version >= 0x0300: self.sig_val = key.sign(m, t='pkcs', h='md5-sha1') else: self.sig_val = key.sign(m, t='pkcs', h='md5') else: h, sig = _tls_hash_sig[self.sig_alg].split('+') if sig.endswith('pss'): t = "pss" else: t = "pkcs" self.sig_val = key.sign(m, t=t, h=h)
0.002695
def worker_task(work_item, config): """The celery task which performs a single mutation and runs a test suite. This runs `cosmic-ray worker` in a subprocess and returns the results, passing `config` to it via stdin. Args: work_item: A dict describing a WorkItem. config: The configuration to use for the test execution. Returns: An updated WorkItem """ global _workspace _ensure_workspace(config) result = worker( work_item.module_path, config.python_version, work_item.operator_name, work_item.occurrence, config.test_command, config.timeout) return work_item.job_id, result
0.001462
def get_data_from_sources(patton_config: PattonRunningConfig, dependency_or_banner: str = "dependency") \ -> List[str]: """This function try to get data from different sources: - command line arguments - from external input file - from stdin Return a list with the content of all of collected data. A list element by each input data found. :param dependency_or_banner: allowed values are: ["dependency" | "banner"] :type dependency_or_banner: str """ def _read_stdin() -> str: # Read input with 5 sec timeout while sys.stdin in select.select([sys.stdin], [], [], 2)[0]: line = sys.stdin.readline() if line: yield line else: # an empty line means stdin has been closed return # -------------------------------------------------------------------------- # Find data source # -------------------------------------------------------------------------- dependencies = [] # Data from command line from user? if patton_config.nargs_input: if dependency_or_banner == "banner": dependencies.append(["cli_input", patton_config.nargs_input]) else: dependencies.extend(patton_config.nargs_input) # Data form stdin input ? if not sys.stdin.isatty(): input_read = "".join(list(_read_stdin())) # YES => Data from stdin if input_read: if dependency_or_banner == "banner": dependencies.append(["stdin", input_read]) else: dependencies.extend(input_read.splitlines()) # Data from file? if patton_config.data_from_file: f = op.abspath(op.join(op.abspath(os.getcwd()), patton_config.data_from_file)) # YES => dependencies from file with open(f, "r") as f: if dependency_or_banner == "banner": dependencies.append(["file", f.read()]) else: dependencies.extend(f.read().splitlines()) # NO data from any other source => Continuous check selected? if not dependencies and not patton_config.follow_checking: # NO data source found => Error! We need some data! raise PCException("You need to specify andy dependency " "from any kind of source: stdin, " "file of cli") return dependencies
0.000402
def get_base_branch(): # type: () -> str """ Return the base branch for the current branch. This function will first try to guess the base branch and if it can't it will let the user choose the branch from the list of all local branches. Returns: str: The name of the branch the current branch is based on. """ base_branch = git.guess_base_branch() if base_branch is None: log.info("Can't guess the base branch, you have to pick one yourself:") base_branch = choose_branch() return base_branch
0.001795
def format(self, *args, **kwargs): """Return a formatted version, using substitutions from args and kwargs. The substitutions are identified by braces ('{' and '}'). """ return self.__class__(super(ColorStr, self).format(*args, **kwargs), keep_tags=True)
0.013937
def frange(stop, start=None, step=1.0): """A :func:`range` clone for float-based ranges. >>> frange(5) [0.0, 1.0, 2.0, 3.0, 4.0] >>> frange(6, step=1.25) [0.0, 1.25, 2.5, 3.75, 5.0] >>> frange(100.5, 101.5, 0.25) [100.5, 100.75, 101.0, 101.25] >>> frange(5, 0) [] >>> frange(5, 0, step=-1.25) [5.0, 3.75, 2.5, 1.25] """ if not step: raise ValueError('step must be non-zero') if start is None: start, stop = 0.0, stop * 1.0 else: # swap when all args are used stop, start = start * 1.0, stop * 1.0 count = int(math.ceil((stop - start) / step)) ret = [None] * count if not ret: return ret ret[0] = start for i in xrange(1, count): ret[i] = ret[i - 1] + step return ret
0.001252
def _load_json_config(self): """Load the configuration file in JSON format :rtype: dict """ try: return json.loads(self._read_config()) except ValueError as error: raise ValueError( 'Could not read configuration file: {}'.format(error))
0.006289
def _updateType(self): """Make sure that the class behaves like the data structure that it is, so that we don't get a ListFile trying to represent a dict.""" data = self._data() # Change type if needed if isinstance(data, dict) and isinstance(self, ListFile): self.__class__ = DictFile elif isinstance(data, list) and isinstance(self, DictFile): self.__class__ = ListFile
0.004515