code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def probability_gt(self, x): """ Returns the probability of a random variable being greater than the given value. """ if self.mean is None: return p = normdist(x=x, mu=self.mean, sigma=self.standard_deviation) return 1-p
Returns the probability of a random variable being greater than the given value.
def _recv(self): """Take all available bytes from socket, return list of any responses from parser""" recvd = [] self._lock.acquire() if not self._can_send_recv(): log.warning('%s cannot recv: socket not connected', self) self._lock.release() return () while len(recvd) < self.config['sock_chunk_buffer_count']: try: data = self._sock.recv(self.config['sock_chunk_bytes']) # We expect socket.recv to raise an exception if there are no # bytes available to read from the socket in non-blocking mode. # but if the socket is disconnected, we will get empty data # without an exception raised if not data: log.error('%s: socket disconnected', self) self._lock.release() self.close(error=Errors.KafkaConnectionError('socket disconnected')) return [] else: recvd.append(data) except SSLWantReadError: break except ConnectionError as e: if six.PY2 and e.errno == errno.EWOULDBLOCK: break log.exception('%s: Error receiving network data' ' closing socket', self) self._lock.release() self.close(error=Errors.KafkaConnectionError(e)) return [] except BlockingIOError: if six.PY3: break self._lock.release() raise recvd_data = b''.join(recvd) if self._sensors: self._sensors.bytes_received.record(len(recvd_data)) try: responses = self._protocol.receive_bytes(recvd_data) except Errors.KafkaProtocolError as e: self._lock.release() self.close(e) return [] else: self._lock.release() return responses
Take all available bytes from socket, return list of any responses from parser
def cmp_pkgrevno(package, revno, pkgcache=None): """Compare supplied revno with the revno of the installed package. * 1 => Installed revno is greater than supplied arg * 0 => Installed revno is the same as supplied arg * -1 => Installed revno is less than supplied arg This function imports YumBase function if the pkgcache argument is None. """ if not pkgcache: y = yum.YumBase() packages = y.doPackageLists() pkgcache = {i.Name: i.version for i in packages['installed']} pkg = pkgcache[package] if pkg > revno: return 1 if pkg < revno: return -1 return 0
Compare supplied revno with the revno of the installed package. * 1 => Installed revno is greater than supplied arg * 0 => Installed revno is the same as supplied arg * -1 => Installed revno is less than supplied arg This function imports YumBase function if the pkgcache argument is None.
def _restore(self, builder): """ The restore extension. :param builder: The query builder :type builder: orator.orm.builder.Builder """ builder.with_trashed() return builder.update({builder.get_model().get_deleted_at_column(): None})
The restore extension. :param builder: The query builder :type builder: orator.orm.builder.Builder
def parse_coach_bsites_inf(infile): """Parse the Bsites.inf output file of COACH and return a list of rank-ordered binding site predictions Bsites.inf contains the summary of COACH clustering results after all other prediction algorithms have finished For each site (cluster), there are three lines: - Line 1: site number, c-score of coach prediction, cluster size - Line 2: algorithm, PDB ID, ligand ID, center of binding site (cartesian coordinates), c-score of the algorithm's prediction, binding residues from single template - Line 3: Statistics of ligands in the cluster C-score information: - "In our training data, a prediction with C-score>0.35 has average false positive and false negative rates below 0.16 and 0.13, respectively." (https://zhanglab.ccmb.med.umich.edu/COACH/COACH.pdf) Args: infile (str): Path to Bsites.inf Returns: list: Ranked list of dictionaries, keys defined below - ``site_num``: cluster which is the consensus binding site - ``c_score``: confidence score of the cluster prediction - ``cluster_size``: number of predictions within this cluster - ``algorithm``: main? algorithm used to make the prediction - ``pdb_template_id``: PDB ID of the template used to make the prediction - ``pdb_template_chain``: chain of the PDB which has the ligand - ``pdb_ligand``: predicted ligand to bind - ``binding_location_coords``: centroid of the predicted ligand position in the homology model - ``c_score_method``: confidence score for the main algorithm - ``binding_residues``: predicted residues to bind the ligand - ``ligand_cluster_counts``: number of predictions per ligand """ bsites_results = [] with open(infile) as pp: lines = list(filter(None, (line.rstrip() for line in pp))) for i in range(len(lines) // 3): bsites_site_dict = {} line1 = lines[i * 3].split('\t') line2 = lines[i * 3 + 1].split('\t') line3 = lines[i * 3 + 2] bsites_site_dict['site_num'] = line1[0] bsites_site_dict['c_score'] = float(line1[1]) bsites_site_dict['cluster_size'] = line1[2] bsites_site_dict['algorithm'] = line2[0] bsites_site_dict['pdb_template_id'] = line2[1][:4] bsites_site_dict['pdb_template_chain'] = line2[1][4] bsites_site_dict['pdb_ligand'] = line2[2] bsites_site_dict['binding_location_coords'] = tuple(float(x) for x in line2[3].split()) # TODO: what's the difference between this c-score and the cluster's c-score? # how is the cluster's c-score computed? it's not the average c-score of all methods # also why are some COFACTOR c-scores >1? # 160411 - seems like the COFACTOR "BS-score" is being reported here, not its c-score... tmp_split = line2[4].split(' :') bsites_site_dict['c_score_method'] = tmp_split[0] bsites_site_dict['binding_residues'] = tmp_split[1] bsites_site_dict['ligand_cluster_counts'] = line3 bsites_results.append(bsites_site_dict) return bsites_results
Parse the Bsites.inf output file of COACH and return a list of rank-ordered binding site predictions Bsites.inf contains the summary of COACH clustering results after all other prediction algorithms have finished For each site (cluster), there are three lines: - Line 1: site number, c-score of coach prediction, cluster size - Line 2: algorithm, PDB ID, ligand ID, center of binding site (cartesian coordinates), c-score of the algorithm's prediction, binding residues from single template - Line 3: Statistics of ligands in the cluster C-score information: - "In our training data, a prediction with C-score>0.35 has average false positive and false negative rates below 0.16 and 0.13, respectively." (https://zhanglab.ccmb.med.umich.edu/COACH/COACH.pdf) Args: infile (str): Path to Bsites.inf Returns: list: Ranked list of dictionaries, keys defined below - ``site_num``: cluster which is the consensus binding site - ``c_score``: confidence score of the cluster prediction - ``cluster_size``: number of predictions within this cluster - ``algorithm``: main? algorithm used to make the prediction - ``pdb_template_id``: PDB ID of the template used to make the prediction - ``pdb_template_chain``: chain of the PDB which has the ligand - ``pdb_ligand``: predicted ligand to bind - ``binding_location_coords``: centroid of the predicted ligand position in the homology model - ``c_score_method``: confidence score for the main algorithm - ``binding_residues``: predicted residues to bind the ligand - ``ligand_cluster_counts``: number of predictions per ligand
def get_link(self, rel): """ Return link for specified resource """ if rel in self.links: return self.links[rel] raise ResourceNotFound('Resource requested: %r is not available ' 'on this element.' % rel)
Return link for specified resource
def multiply(self, a, b): """ :type A: List[List[int]] :type B: List[List[int]] :rtype: List[List[int]] """ if a is None or b is None: return None m, n, l = len(a), len(b[0]), len(b[0]) if len(b) != n: raise Exception("A's column number must be equal to B's row number.") c = [[0 for _ in range(l)] for _ in range(m)] for i, row in enumerate(a): for k, eleA in enumerate(row): if eleA: for j, eleB in enumerate(b[k]): if eleB: c[i][j] += eleA * eleB return c
:type A: List[List[int]] :type B: List[List[int]] :rtype: List[List[int]]
async def generate_waifu_insult(self, avatar): """Generate a waifu insult image. This function is a coroutine. Parameters: avatar: str - http/s url pointing to an image, has to have proper headers and be a direct link to an image Return Type: image data""" if not isinstance(avatar, str): raise TypeError("type of 'avatar' must be str.") async with aiohttp.ClientSession() as session: async with session.post("https://api.weeb.sh/auto-image/waifu-insult", headers=self.__headers, data={"avatar": avatar}) as resp: if resp.status == 200: return await resp.read() else: raise Exception((await resp.json())['message'])
Generate a waifu insult image. This function is a coroutine. Parameters: avatar: str - http/s url pointing to an image, has to have proper headers and be a direct link to an image Return Type: image data
def folderitem(self, obj, item, index): """Service triggered each time an item is iterated in folderitems. The use of this service prevents the extra-loops in child objects. :obj: the instance of the class to be foldered :item: dict containing the properties of the object to be used by the template :index: current index of the item """ uid = api.get_uid(obj) url = api.get_url(obj) title = api.get_title(obj) # get the category if self.show_categories_enabled(): category = obj.getCategoryTitle() if category not in self.categories: self.categories.append(category) item["category"] = category ref_results = self.get_reference_results() ref_result = ref_results.get(uid) item["Title"] = title item["replace"]["Title"] = get_link(url, value=title) item["result"] = ref_result.get("result") item["min"] = ref_result.get("min") item["max"] = ref_result.get("max") # Icons after_icons = "" if obj.getAccredited(): after_icons += get_image( "accredited.png", title=_("Accredited")) if obj.getAttachmentOption() == "r": after_icons += get_image( "attach_reqd.png", title=_("Attachment required")) if obj.getAttachmentOption() == "n": after_icons += get_image( "attach_no.png", title=_("Attachment not permitted")) if after_icons: item["after"]["Title"] = after_icons return item
Service triggered each time an item is iterated in folderitems. The use of this service prevents the extra-loops in child objects. :obj: the instance of the class to be foldered :item: dict containing the properties of the object to be used by the template :index: current index of the item
def make_slice_key(cls, start_string, size_string): """ Converts the given start and size query parts to a slice key. :return: slice key :rtype: slice """ try: start = int(start_string) except ValueError: raise ValueError('Query parameter "start" must be a number.') if start < 0: raise ValueError('Query parameter "start" must be zero or ' 'a positive number.') try: size = int(size_string) except ValueError: raise ValueError('Query parameter "size" must be a number.') if size < 1: raise ValueError('Query parameter "size" must be a positive ' 'number.') return slice(start, start + size)
Converts the given start and size query parts to a slice key. :return: slice key :rtype: slice
def ack(self, msg): """Process the message and determine what to do with it. """ self.log.info("receiverId <%s> Received: <%s> " % (self.receiverId, msg['body'])) #return super(MyStomp, self).ack(msg) return stomper.NO_REPONSE_NEEDED
Process the message and determine what to do with it.
def rec_update(self, other, **third): """Recursively update the dictionary with the contents of other and third like dict.update() does - but don't overwrite sub-dictionaries. Example: >>> d = RecursiveDictionary({'foo': {'bar': 42}}) >>> d.rec_update({'foo': {'baz': 36}}) >>> d {'foo': {'baz': 36, 'bar': 42}} """ try: iterator = other.iteritems() except AttributeError: iterator = other self.iter_rec_update(iterator) self.iter_rec_update(third.iteritems())
Recursively update the dictionary with the contents of other and third like dict.update() does - but don't overwrite sub-dictionaries. Example: >>> d = RecursiveDictionary({'foo': {'bar': 42}}) >>> d.rec_update({'foo': {'baz': 36}}) >>> d {'foo': {'baz': 36, 'bar': 42}}
def get_content(self, key): """ Gets given content from the cache. Usage:: >>> cache = Cache() >>> cache.add_content(John="Doe", Luke="Skywalker") True >>> cache.get_content("Luke") 'Skywalker' :param key: Content to retrieve. :type key: object :return: Content. :rtype: object """ LOGGER.debug("> Retrieving '{0}' content from the cache.".format(self.__class__.__name__, key)) return self.get(key)
Gets given content from the cache. Usage:: >>> cache = Cache() >>> cache.add_content(John="Doe", Luke="Skywalker") True >>> cache.get_content("Luke") 'Skywalker' :param key: Content to retrieve. :type key: object :return: Content. :rtype: object
def ParseFileSystemsStruct(struct_class, fs_count, data): """Take the struct type and parse it into a list of structs.""" results = [] cstr = lambda x: x.split(b"\x00", 1)[0] for count in range(0, fs_count): struct_size = struct_class.GetSize() s_data = data[count * struct_size:(count + 1) * struct_size] s = struct_class(s_data) s.f_fstypename = cstr(s.f_fstypename) s.f_mntonname = cstr(s.f_mntonname) s.f_mntfromname = cstr(s.f_mntfromname) results.append(s) return results
Take the struct type and parse it into a list of structs.
def delmod_cli(argv, alter_logger=True): """Command-line access to ``delmod`` functionality. The ``delmod`` task deletes "on-the-fly" model information from a Measurement Set. It is so easy to implement that a standalone function is essentially unnecessary. Just write:: from pwkit.environments.casa import util cb = util.tools.calibrater() cb.open('datasaet.ms', addcorr=False, addmodel=False) cb.delmod(otf=True, scr=False) cb.close() If you want to delete the scratch columns, use :func:`delcal`. If you want to clear the scratch columns, use :func:`clearcal`. """ check_usage(delmod_doc, argv, usageifnoargs=True) if alter_logger: util.logger() cb = util.tools.calibrater() for mspath in argv[1:]: cb.open(b(mspath), addcorr=False, addmodel=False) cb.delmod(otf=True, scr=False) cb.close()
Command-line access to ``delmod`` functionality. The ``delmod`` task deletes "on-the-fly" model information from a Measurement Set. It is so easy to implement that a standalone function is essentially unnecessary. Just write:: from pwkit.environments.casa import util cb = util.tools.calibrater() cb.open('datasaet.ms', addcorr=False, addmodel=False) cb.delmod(otf=True, scr=False) cb.close() If you want to delete the scratch columns, use :func:`delcal`. If you want to clear the scratch columns, use :func:`clearcal`.
def finalize_sv(samples, config): """Combine results from multiple sv callers into a single ordered 'sv' key. """ by_bam = collections.OrderedDict() for x in samples: batch = dd.get_batch(x) or [dd.get_sample_name(x)] try: by_bam[x["align_bam"], tuple(batch)].append(x) except KeyError: by_bam[x["align_bam"], tuple(batch)] = [x] by_batch = collections.OrderedDict() lead_batches = {} for grouped_calls in by_bam.values(): def orig_svcaller_order(x): orig_callers = tz.get_in(["config", "algorithm", "svcaller_orig"], x) cur_caller = tz.get_in(["config", "algorithm", "svcaller"], x) return orig_callers.index(cur_caller) sorted_svcalls = sorted([x for x in grouped_calls if "sv" in x], key=orig_svcaller_order) final = grouped_calls[0] if len(sorted_svcalls) > 0: final["sv"] = reduce(operator.add, [x["sv"] for x in sorted_svcalls]) final["config"]["algorithm"]["svcaller"] = final["config"]["algorithm"].pop("svcaller_orig") batch = dd.get_batch(final) or dd.get_sample_name(final) batches = batch if isinstance(batch, (list, tuple)) else [batch] if len(batches) > 1: lead_batches[(dd.get_sample_name(final), dd.get_phenotype(final) == "germline")] = batches[0] for batch in batches: try: by_batch[batch].append(final) except KeyError: by_batch[batch] = [final] out = [] for batch, items in by_batch.items(): if any("svplots" in dd.get_tools_on(d) for d in items): items = plot.by_regions(items) for data in items: if lead_batches.get((dd.get_sample_name(data), dd.get_phenotype(data) == "germline")) in [batch, None]: out.append([data]) return out
Combine results from multiple sv callers into a single ordered 'sv' key.
def _preprocess_data(self, X, Y=None, idxs=None, train=False): """ Preprocess the data: 1. Convert sparse matrix to dense matrix. 2. Select subset of the input if idxs exists. :param X: The input data of the model. :type X: pair with candidates and corresponding features :param Y: The labels of input data. :type Y: list or numpy.array :param idxs: The selected indices of input data. :type idxs: list or numpy.array :param train: Indicator of training set. :type train: bool :return: Preprocessed data. :rtype: list of features """ C, F = X if issparse(F): F = np.array(F.todense(), dtype=np.float32) if Y is not None: Y = np.array(Y).astype(np.float32) if idxs is None: if Y is not None: return F, Y else: return F if Y is not None: return F[idxs], Y[idxs] else: return F[idxs]
Preprocess the data: 1. Convert sparse matrix to dense matrix. 2. Select subset of the input if idxs exists. :param X: The input data of the model. :type X: pair with candidates and corresponding features :param Y: The labels of input data. :type Y: list or numpy.array :param idxs: The selected indices of input data. :type idxs: list or numpy.array :param train: Indicator of training set. :type train: bool :return: Preprocessed data. :rtype: list of features
def get_other_keys(self, key, including_current=False): """ Returns list of other keys that are mapped to the same value as specified key. @param key - key for which other keys should be returned. @param including_current if set to True - key will also appear on this list.""" other_keys = [] if key in self: other_keys.extend(self.__dict__[str(type(key))][key]) if not including_current: other_keys.remove(key) return other_keys
Returns list of other keys that are mapped to the same value as specified key. @param key - key for which other keys should be returned. @param including_current if set to True - key will also appear on this list.
def before(self, value): """ Sets the operator type to Query.Op.Before and sets the value to the amount that this query should be lower than. This is functionally the same as doing the lessThan operation, but is useful for visual queries for things like dates. :param value | <variant> :return <Query> :usage |>>> from orb import Query as Q |>>> query = Q('dateStart').before(date.today()) |>>> print query |dateStart before 2011-10-10 """ newq = self.copy() newq.setOp(Query.Op.Before) newq.setValue(value) return newq
Sets the operator type to Query.Op.Before and sets the value to the amount that this query should be lower than. This is functionally the same as doing the lessThan operation, but is useful for visual queries for things like dates. :param value | <variant> :return <Query> :usage |>>> from orb import Query as Q |>>> query = Q('dateStart').before(date.today()) |>>> print query |dateStart before 2011-10-10
def kfolds(n, k, sz, p_testset=None, seed=7238): """ return train, valid [,test] testset if p_testset :param n: :param k: :param sz: :param p_testset: :param seed: :return: """ trains, tests = split_rand(sz, p_testset, seed) ntrain = len(trains) # np.random.seed(seed) with np_seed(seed): np.random.shuffle(trains) if n == k: # no split train, valid = trains, trains else: foldsz = ntrain // k itrain = np.arange(ntrain) // foldsz != n train = trains[itrain] valid = trains[~itrain] if not p_testset: return train, valid else: return train, valid, tests
return train, valid [,test] testset if p_testset :param n: :param k: :param sz: :param p_testset: :param seed: :return:
def _encode_value(self, value): """ Encodes the value such that it can be stored into MongoDB. Any primitive types are stored directly into MongoDB, while non-primitive types are pickled and stored as GridFS objects. The id pointing to a GridFS object replaces the original value. Args: value (object): The object that should be encoded for storing in MongoDB. Returns: object: The encoded value ready to be stored in MongoDB. """ if isinstance(value, (int, float, str, bool, datetime)): return value elif isinstance(value, list): return [self._encode_value(item) for item in value] elif isinstance(value, dict): result = {} for key, item in value.items(): result[key] = self._encode_value(item) return result else: return self._gridfs.put(Binary(pickle.dumps(value)), workflow_id=self._workflow_id)
Encodes the value such that it can be stored into MongoDB. Any primitive types are stored directly into MongoDB, while non-primitive types are pickled and stored as GridFS objects. The id pointing to a GridFS object replaces the original value. Args: value (object): The object that should be encoded for storing in MongoDB. Returns: object: The encoded value ready to be stored in MongoDB.
def _callFunc(session, funcName, password, args): """Call custom cjdns admin function""" txid = _randomString() sock = session.socket sock.send(bytearray('d1:q6:cookie4:txid10:%se' % txid, 'utf-8')) msg = _getMessage(session, txid) cookie = msg['cookie'] txid = _randomString() tohash = (password + cookie).encode('utf-8') req = { 'q': funcName, 'hash': hashlib.sha256(tohash).hexdigest(), 'cookie': cookie, 'args': args, 'txid': txid } if password: req['aq'] = req['q'] req['q'] = 'auth' reqBenc = bencode(req).encode('utf-8') req['hash'] = hashlib.sha256(reqBenc).hexdigest() reqBenc = bencode(req) sock.send(bytearray(reqBenc, 'utf-8')) return _getMessage(session, txid)
Call custom cjdns admin function
def _run(self): """Run the worker function with some custom exception handling.""" try: # Run the worker self.worker() except SystemExit as ex: # sys.exit() was called if isinstance(ex.code, int): if ex.code is not None and ex.code != 0: # A custom exit code was specified self._shutdown( 'Exiting with non-zero exit code {exitcode}'.format( exitcode=ex.code), ex.code) else: # A message was passed to sys.exit() self._shutdown( 'Exiting with message: {msg}'.format(msg=ex.code), 1) except Exception as ex: if self.detach: self._shutdown('Dying due to unhandled {cls}: {msg}'.format( cls=ex.__class__.__name__, msg=str(ex)), 127) else: # We're not detached so just raise the exception raise self._shutdown('Shutting down normally')
Run the worker function with some custom exception handling.
def stitch(network, donor, P_network, P_donor, method='nearest', len_max=sp.inf, len_min=0, label_suffix=''): r''' Stitches a second a network to the current network. Parameters ---------- networK : OpenPNM Network Object The Network to which to donor Network will be attached donor : OpenPNM Network Object The Network to stitch on to the current Network P_network : array_like The pores on the current Network P_donor : array_like The pores on the donor Network label_suffix : string or None Some text to append to each label in the donor Network before inserting them into the recipient. The default is to append no text, but a common option would be to append the donor Network's name. To insert none of the donor labels, use None. len_max : float Set a length limit on length of new throats method : string (default = 'delaunay') The method to use when making pore to pore connections. Options are: - 'delaunay' : Use a Delaunay tessellation - 'nearest' : Connects each pore on the receptor network to its nearest pore on the donor network Notes ----- Before stitching it is necessary to translate the pore coordinates of one of the Networks so that it is positioned correctly relative to the other. Examples -------- >>> import openpnm as op >>> pn = op.network.Cubic(shape=[5, 5, 5]) >>> pn2 = op.network.Cubic(shape=[5, 5, 5]) >>> [pn.Np, pn.Nt] [125, 300] >>> [pn2.Np, pn2.Nt] [125, 300] >>> pn2['pore.coords'][:, 2] += 5.0 >>> op.topotools.stitch(network=pn, donor=pn2, P_network=pn.pores('top'), ... P_donor=pn2.pores('bottom'), method='nearest', ... len_max=1.0) >>> [pn.Np, pn.Nt] [250, 625] ''' # Ensure Networks have no associated objects yet if (len(network.project) > 1) or (len(donor.project) > 1): raise Exception('Cannot stitch a Network with active objects') network['throat.stitched'] = False # Get the initial number of pores and throats N_init = {} N_init['pore'] = network.Np N_init['throat'] = network.Nt if method == 'nearest': P1 = P_network P2 = P_donor + N_init['pore'] # Increment pores on donor C1 = network['pore.coords'][P_network] C2 = donor['pore.coords'][P_donor] D = sp.spatial.distance.cdist(C1, C2) [P1_ind, P2_ind] = sp.where(D <= len_max) conns = sp.vstack((P1[P1_ind], P2[P2_ind])).T else: raise Exception('<{}> method not supported'.format(method)) # Enter donor's pores into the Network extend(network=network, pore_coords=donor['pore.coords']) # Enter donor's throats into the Network extend(network=network, throat_conns=donor['throat.conns'] + N_init['pore']) # Trim throats that are longer then given len_max C1 = network['pore.coords'][conns[:, 0]] C2 = network['pore.coords'][conns[:, 1]] L = sp.sum((C1 - C2)**2, axis=1)**0.5 conns = conns[L <= len_max] # Add donor labels to recipient network if label_suffix is not None: if label_suffix != '': label_suffix = '_'+label_suffix for label in donor.labels(): element = label.split('.')[0] locations = sp.where(network._get_indices(element) >= N_init[element])[0] if label + label_suffix not in network.keys(): network[label + label_suffix] = False network[label+label_suffix][locations] = donor[label] # Add the new stitch throats to the Network extend(network=network, throat_conns=conns, labels='stitched') # Remove donor from Workspace, if present # This check allows for the reuse of a donor Network multiple times for sim in list(ws.values()): if donor in sim: del ws[sim.name]
r''' Stitches a second a network to the current network. Parameters ---------- networK : OpenPNM Network Object The Network to which to donor Network will be attached donor : OpenPNM Network Object The Network to stitch on to the current Network P_network : array_like The pores on the current Network P_donor : array_like The pores on the donor Network label_suffix : string or None Some text to append to each label in the donor Network before inserting them into the recipient. The default is to append no text, but a common option would be to append the donor Network's name. To insert none of the donor labels, use None. len_max : float Set a length limit on length of new throats method : string (default = 'delaunay') The method to use when making pore to pore connections. Options are: - 'delaunay' : Use a Delaunay tessellation - 'nearest' : Connects each pore on the receptor network to its nearest pore on the donor network Notes ----- Before stitching it is necessary to translate the pore coordinates of one of the Networks so that it is positioned correctly relative to the other. Examples -------- >>> import openpnm as op >>> pn = op.network.Cubic(shape=[5, 5, 5]) >>> pn2 = op.network.Cubic(shape=[5, 5, 5]) >>> [pn.Np, pn.Nt] [125, 300] >>> [pn2.Np, pn2.Nt] [125, 300] >>> pn2['pore.coords'][:, 2] += 5.0 >>> op.topotools.stitch(network=pn, donor=pn2, P_network=pn.pores('top'), ... P_donor=pn2.pores('bottom'), method='nearest', ... len_max=1.0) >>> [pn.Np, pn.Nt] [250, 625]
def children_sum( self, children,node ): """Calculate children's total sum""" return sum( [self.value(value,node) for value in children] )
Calculate children's total sum
async def base_combine(source, switch=False, ordered=False, task_limit=None): """Base operator for managing an asynchronous sequence of sequences. The sequences are awaited concurrently, although it's possible to limit the amount of running sequences using the `task_limit` argument. The ``switch`` argument enables the switch mecanism, which cause the previous subsequence to be discarded when a new one is created. The items can either be generated in order or as soon as they're received, depending on the ``ordered`` argument. """ # Task limit if task_limit is not None and not task_limit > 0: raise ValueError('The task limit must be None or greater than 0') # Safe context async with StreamerManager() as manager: main_streamer = await manager.enter_and_create_task(source) # Loop over events while manager.tasks: # Extract streamer groups substreamers = manager.streamers[1:] mainstreamers = [main_streamer] if main_streamer in manager.tasks else [] # Switch - use the main streamer then the substreamer if switch: filters = mainstreamers + substreamers # Concat - use the first substreamer then the main streamer elif ordered: filters = substreamers[:1] + mainstreamers # Flat - use the substreamers then the main streamer else: filters = substreamers + mainstreamers # Wait for next event streamer, task = await manager.wait_single_event(filters) # Get result try: result = task.result() # End of stream except StopAsyncIteration: # Main streamer is finished if streamer is main_streamer: main_streamer = None # A substreamer is finished else: await manager.clean_streamer(streamer) # Re-schedule the main streamer if necessary if main_streamer is not None and main_streamer not in manager.tasks: manager.create_task(main_streamer) # Process result else: # Switch mecanism if switch and streamer is main_streamer: await manager.clean_streamers(substreamers) # Setup a new source if streamer is main_streamer: await manager.enter_and_create_task(result) # Re-schedule the main streamer if task limit allows it if task_limit is None or task_limit > len(manager.tasks): manager.create_task(streamer) # Yield the result else: yield result # Re-schedule the streamer manager.create_task(streamer)
Base operator for managing an asynchronous sequence of sequences. The sequences are awaited concurrently, although it's possible to limit the amount of running sequences using the `task_limit` argument. The ``switch`` argument enables the switch mecanism, which cause the previous subsequence to be discarded when a new one is created. The items can either be generated in order or as soon as they're received, depending on the ``ordered`` argument.
def view_list(self): '''return a list of polygon indexes lists for the waypoints''' done = set() ret = [] while len(done) != self.count(): p = self.view_indexes(done) if len(p) > 0: ret.append(p) return ret
return a list of polygon indexes lists for the waypoints
def cli(ctx, path, renku_home, use_external_storage): """Check common Renku commands used in various situations.""" ctx.obj = LocalClient( path=path, renku_home=renku_home, use_external_storage=use_external_storage, )
Check common Renku commands used in various situations.
def parse_cookie(cookie: str) -> Dict[str, str]: """Parse a ``Cookie`` HTTP header into a dict of name/value pairs. This function attempts to mimic browser cookie parsing behavior; it specifically does not follow any of the cookie-related RFCs (because browsers don't either). The algorithm used is identical to that used by Django version 1.9.10. .. versionadded:: 4.4.2 """ cookiedict = {} for chunk in cookie.split(str(";")): if str("=") in chunk: key, val = chunk.split(str("="), 1) else: # Assume an empty name per # https://bugzilla.mozilla.org/show_bug.cgi?id=169091 key, val = str(""), chunk key, val = key.strip(), val.strip() if key or val: # unquote using Python's algorithm. cookiedict[key] = _unquote_cookie(val) return cookiedict
Parse a ``Cookie`` HTTP header into a dict of name/value pairs. This function attempts to mimic browser cookie parsing behavior; it specifically does not follow any of the cookie-related RFCs (because browsers don't either). The algorithm used is identical to that used by Django version 1.9.10. .. versionadded:: 4.4.2
def _footer(trigger, data, content): """ footer of the note :param trigger: trigger object :param data: data to be used :param content: add the footer of the note to the content :return: content string """ # footer of the note footer = EvernoteMgr.set_note_footer(data, trigger) content += footer return content
footer of the note :param trigger: trigger object :param data: data to be used :param content: add the footer of the note to the content :return: content string
def network_lpf(network, snapshots=None, skip_pre=False): """ Linear power flow for generic network. Parameters ---------- snapshots : list-like|single snapshot A subset or an elements of network.snapshots on which to run the power flow, defaults to network.snapshots skip_pre: bool, default False Skip the preliminary steps of computing topology, calculating dependent values and finding bus controls. Returns ------- None """ _network_prepare_and_run_pf(network, snapshots, skip_pre, linear=True)
Linear power flow for generic network. Parameters ---------- snapshots : list-like|single snapshot A subset or an elements of network.snapshots on which to run the power flow, defaults to network.snapshots skip_pre: bool, default False Skip the preliminary steps of computing topology, calculating dependent values and finding bus controls. Returns ------- None
def sequence_molecular_weight(seq): """Returns the molecular weight of the polypeptide sequence. Notes ----- Units = Daltons Parameters ---------- seq : str Sequence of amino acids. """ if 'X' in seq: warnings.warn(_nc_warning_str, NoncanonicalWarning) return sum( [residue_mwt[aa] * n for aa, n in Counter(seq).items()]) + water_mass
Returns the molecular weight of the polypeptide sequence. Notes ----- Units = Daltons Parameters ---------- seq : str Sequence of amino acids.
def overlapped_convolution(bin_template, bin_image, tollerance=0.5, splits=(4, 2)): """ As each of these images are hold only binary values, and RFFT2 works on float64 greyscale values, we can make the convolution more efficient by breaking the image up into :splits: sectons. Each one of these sections then has its greyscale value adjusted and then stacked. We then apply the convolution to this 'stack' of images, and adjust the resultant position matches. """ th, tw = bin_template.shape ih, iw = bin_image.shape hs, ws = splits h = ih // hs w = iw // ws count = numpy.count_nonzero(bin_template) assert count > 0 assert h >= th assert w >= tw yoffset = [(i * h, ((i + 1) * h) + (th - 1)) for i in range(hs)] xoffset = [(i * w, ((i + 1) * w) + (tw - 1)) for i in range(ws)] # image_stacks is Origin (x,y), array, z (height in stack) image_stacks = [((x1, y1), bin_image[y1:y2, x1:x2], float((count + 1) ** (num))) for num, (x1, x2, y1, y2) in enumerate((x1, x2, y1, y2) for (x1, x2) in xoffset for (y1, y2) in yoffset)] pad_h = max(i.shape[0] for _, i, _ in image_stacks) pad_w = max(i.shape[1] for _, i, _ in image_stacks) # rfft metrics must be an even size - why ... maths? pad_w += pad_w % 2 pad_h += pad_h % 2 overlapped_image = sum_2d_images(pad_bin_image_to_shape(i, (pad_h, pad_w)) * num for _, i, num in image_stacks) #print "Overlap splits %r, Image Size (%d,%d), #Overlapped Size (%d,%d)" % (splits,iw,ih,pad_w,pad_h) # Calculate the convolution of the FFT's of the overlapped image & template convolution_freqs = (rfft2(overlapped_image) * rfft2(bin_template[::-1, ::-1], overlapped_image.shape)) # Reverse the FFT to find the result overlapped image convolution_image = irfft2(convolution_freqs) # At this point, the maximum point in convolution_image should be the # bottom right (why?) of the area of greatest match results = set() for (x, y), _, num in image_stacks[::-1]: test = convolution_image / num filtered = ((test >= (count - tollerance)) & (test <= (count + tollerance))) match_points = numpy.transpose(numpy.nonzero(filtered)) # bottom right for (fy, fx) in match_points: if fx < (tw - 1) or fy < (th - 1): continue results.add((x + fx - (tw - 1), y + fy - (th - 1))) convolution_image %= num return list(results)
As each of these images are hold only binary values, and RFFT2 works on float64 greyscale values, we can make the convolution more efficient by breaking the image up into :splits: sectons. Each one of these sections then has its greyscale value adjusted and then stacked. We then apply the convolution to this 'stack' of images, and adjust the resultant position matches.
def sort_descendants(self, attr="name"): """ This function sort the branches of a given tree by considerening node names. After the tree is sorted, nodes are labeled using ascendent numbers. This can be used to ensure that nodes in a tree with the same node names are always labeled in the same way. Note that if duplicated names are present, extra criteria should be added to sort nodes. Unique id is stored as a node._nid attribute """ node2content = self.get_cached_content(store_attr=attr, container_type=list) for n in self.traverse(): if not n.is_leaf(): n.children.sort(key=lambda x: str(sorted(node2content[x])))
This function sort the branches of a given tree by considerening node names. After the tree is sorted, nodes are labeled using ascendent numbers. This can be used to ensure that nodes in a tree with the same node names are always labeled in the same way. Note that if duplicated names are present, extra criteria should be added to sort nodes. Unique id is stored as a node._nid attribute
def find_project_dir(): """Runs up the stack to find the location of manage.py which will be considered a project base path. :rtype: str|unicode """ frame = inspect.currentframe() while True: frame = frame.f_back fname = frame.f_globals['__file__'] if os.path.basename(fname) == 'manage.py': break return os.path.dirname(fname)
Runs up the stack to find the location of manage.py which will be considered a project base path. :rtype: str|unicode
def _str_to_windows(self, input_str, window_length, curse_forward): """ Divide an input string to a list of substrings based on window_length and curse_forward values :param input_str: str :param window_length: int :param curse_forward: int :return: list [str] """ windows = [] i = 0 len_input = len(input_str) while i < len_input: window_text = input_str[i:i+window_length] if self.sanitize_input: windows.append({ 'sanitized' : self._sanitize(window_text), 'text' : window_text }) else: windows.append({ 'text' : window_text }) i = i + curse_forward return windows
Divide an input string to a list of substrings based on window_length and curse_forward values :param input_str: str :param window_length: int :param curse_forward: int :return: list [str]
def parse_cfgstr_name_options(cfgstr): r""" Args: cfgstr (str): Returns: tuple: (cfgname, cfgopt_strs, subx) CommandLine: python -m utool.util_gridsearch --test-parse_cfgstr_name_options Example: >>> # ENABLE_DOCTEST >>> from utool.util_gridsearch import * # NOQA >>> import utool as ut >>> cfgstr = 'default' + NAMEVARSEP + 'myvar1=myval1,myvar2=myval2' >>> (cfgname, cfgopt_strs, subx) = parse_cfgstr_name_options(cfgstr) >>> result = ('(cfgname, cfg_optstrs, subx) = %s' % (ut.repr2((cfgname, cfgopt_strs, subx)),)) >>> print(result) (cfgname, cfg_optstrs, subx) = ('default', 'myvar1=myval1,myvar2=myval2', None) Example: >>> # ENABLE_DOCTEST >>> from utool.util_gridsearch import * # NOQA >>> import utool as ut >>> cfgstr = 'default[0:1]' + NAMEVARSEP + 'myvar1=myval1,myvar2=myval2' >>> (cfgname, cfgopt_strs, subx) = parse_cfgstr_name_options(cfgstr) >>> result = ('(cfgname, cfg_optstrs, subx) = %s' % (ut.repr2((cfgname, cfgopt_strs, subx)),)) >>> print(result) (cfgname, cfg_optstrs, subx) = ('default', 'myvar1=myval1,myvar2=myval2', slice(0, 1, None)) Example: >>> # ENABLE_DOCTEST >>> from utool.util_gridsearch import * # NOQA >>> import utool as ut >>> cfgstr = 'default[0]' + NAMEVARSEP + 'myvar1=myval1,myvar2=myval2' >>> (cfgname, cfgopt_strs, subx) = parse_cfgstr_name_options(cfgstr) >>> result = ('(cfgname, cfg_optstrs, subx) = %s' % (ut.repr2((cfgname, cfgopt_strs, subx)),)) >>> print(result) (cfgname, cfg_optstrs, subx) = ('default', 'myvar1=myval1,myvar2=myval2', [0]) """ import utool as ut cfgname_regex = ut.named_field('cfgname', r'[^\[:]*') # name is optional subx_regex = r'\[' + ut.named_field('subx', r'[^\]]*') + r'\]' cfgopt_regex = re.escape(NAMEVARSEP) + ut.named_field('cfgopt', '.*') regex_str = cfgname_regex + ('(%s)?' % (subx_regex,)) + ('(%s)?' % (cfgopt_regex,)) match = re.match(regex_str, cfgstr) assert match is not None, 'parsing of cfgstr failed' groupdict = match.groupdict() cfgname = groupdict['cfgname'] if cfgname == '': cfgname = 'default' cfgopt_strs = groupdict.get('cfgopt', None) subx_str = groupdict.get('subx', None) if cfgopt_strs is None: cfgopt_strs = '' subx = ut.fuzzy_subset(subx_str) return cfgname, cfgopt_strs, subx
r""" Args: cfgstr (str): Returns: tuple: (cfgname, cfgopt_strs, subx) CommandLine: python -m utool.util_gridsearch --test-parse_cfgstr_name_options Example: >>> # ENABLE_DOCTEST >>> from utool.util_gridsearch import * # NOQA >>> import utool as ut >>> cfgstr = 'default' + NAMEVARSEP + 'myvar1=myval1,myvar2=myval2' >>> (cfgname, cfgopt_strs, subx) = parse_cfgstr_name_options(cfgstr) >>> result = ('(cfgname, cfg_optstrs, subx) = %s' % (ut.repr2((cfgname, cfgopt_strs, subx)),)) >>> print(result) (cfgname, cfg_optstrs, subx) = ('default', 'myvar1=myval1,myvar2=myval2', None) Example: >>> # ENABLE_DOCTEST >>> from utool.util_gridsearch import * # NOQA >>> import utool as ut >>> cfgstr = 'default[0:1]' + NAMEVARSEP + 'myvar1=myval1,myvar2=myval2' >>> (cfgname, cfgopt_strs, subx) = parse_cfgstr_name_options(cfgstr) >>> result = ('(cfgname, cfg_optstrs, subx) = %s' % (ut.repr2((cfgname, cfgopt_strs, subx)),)) >>> print(result) (cfgname, cfg_optstrs, subx) = ('default', 'myvar1=myval1,myvar2=myval2', slice(0, 1, None)) Example: >>> # ENABLE_DOCTEST >>> from utool.util_gridsearch import * # NOQA >>> import utool as ut >>> cfgstr = 'default[0]' + NAMEVARSEP + 'myvar1=myval1,myvar2=myval2' >>> (cfgname, cfgopt_strs, subx) = parse_cfgstr_name_options(cfgstr) >>> result = ('(cfgname, cfg_optstrs, subx) = %s' % (ut.repr2((cfgname, cfgopt_strs, subx)),)) >>> print(result) (cfgname, cfg_optstrs, subx) = ('default', 'myvar1=myval1,myvar2=myval2', [0])
def i18n_system_locale(): """ Return the system locale :return: the system locale (as a string) """ log.debug('i18n_system_locale() called') lc, encoding = locale.getlocale() log.debug('locale.getlocale() = (lc="{lc}", encoding="{encoding}).'.format(lc=lc, encoding=encoding)) if lc is None: lc, encoding = locale.getdefaultlocale() log.debug('locale.getdefaultlocale() = (lc="{lc}", encoding="{encoding}).'.format(lc=lc, encoding=encoding)) return lc
Return the system locale :return: the system locale (as a string)
def makeMarkovApproxToNormalByMonteCarlo(x_grid,mu,sigma,N_draws = 10000): ''' Creates an approximation to a normal distribution with mean mu and standard deviation sigma, by Monte Carlo. Returns a stochastic vector called p_vec, corresponding to values in x_grid. If a RV is distributed x~N(mu,sigma), then the expectation of a continuous function f() is E[f(x)] = numpy.dot(p_vec,f(x_grid)). Parameters ---------- x_grid: numpy.array A sorted 1D array of floats representing discrete values that a normally distributed RV could take on. mu: float Mean of the normal distribution to be approximated. sigma: float Standard deviation of the normal distribution to be approximated. N_draws: int Number of draws to use in Monte Carlo. Returns ------- p_vec: numpy.array A stochastic vector with probability weights for each x in x_grid. ''' # Take random draws from the desired normal distribution random_draws = np.random.normal(loc = mu, scale = sigma, size = N_draws) # Compute the distance between the draws and points in x_grid distance = np.abs(x_grid[:,np.newaxis] - random_draws[np.newaxis,:]) # Find the indices of the points in x_grid that are closest to the draws distance_minimizing_index = np.argmin(distance,axis=0) # For each point in x_grid, the approximate probability of that point is the number # of Monte Carlo draws that are closest to that point p_vec = np.zeros_like(x_grid) for p_index,p in enumerate(p_vec): p_vec[p_index] = np.sum(distance_minimizing_index==p_index) / N_draws # Check for obvious errors, and return p_vec assert (np.all(p_vec>=0.)) and (np.all(p_vec<=1.)) and (np.isclose(np.sum(p_vec)),1.) return p_vec
Creates an approximation to a normal distribution with mean mu and standard deviation sigma, by Monte Carlo. Returns a stochastic vector called p_vec, corresponding to values in x_grid. If a RV is distributed x~N(mu,sigma), then the expectation of a continuous function f() is E[f(x)] = numpy.dot(p_vec,f(x_grid)). Parameters ---------- x_grid: numpy.array A sorted 1D array of floats representing discrete values that a normally distributed RV could take on. mu: float Mean of the normal distribution to be approximated. sigma: float Standard deviation of the normal distribution to be approximated. N_draws: int Number of draws to use in Monte Carlo. Returns ------- p_vec: numpy.array A stochastic vector with probability weights for each x in x_grid.
def _fill_table_entry(self, row, col): """"" Fill an entry of the observation table. Args: row (str): The row of the observation table col (str): The column of the observation table Returns: None """ prefix = self._membership_query(row) full_output = self._membership_query(row + col) length = len(commonprefix([prefix, full_output])) self.observation_table[row, col] = full_output[length:]
Fill an entry of the observation table. Args: row (str): The row of the observation table col (str): The column of the observation table Returns: None
def translify(in_string, strict=True): """ Translify russian text @param in_string: input string @type in_string: C{unicode} @param strict: raise error if transliteration is incomplete. (True by default) @type strict: C{bool} @return: transliterated string @rtype: C{str} @raise ValueError: when string doesn't transliterate completely. Raised only if strict=True """ translit = in_string for symb_in, symb_out in TRANSTABLE: translit = translit.replace(symb_in, symb_out) if strict and any(ord(symb) > 128 for symb in translit): raise ValueError("Unicode string doesn't transliterate completely, " + \ "is it russian?") return translit
Translify russian text @param in_string: input string @type in_string: C{unicode} @param strict: raise error if transliteration is incomplete. (True by default) @type strict: C{bool} @return: transliterated string @rtype: C{str} @raise ValueError: when string doesn't transliterate completely. Raised only if strict=True
def set(self, key, val): """ Sets a header field with the given value, removing previous values. Usage:: headers = HTTPHeaderDict(foo='bar') headers.set('Foo', 'baz') headers['foo'] > 'baz' """ key_lower = key.lower() new_vals = key, val # Keep the common case aka no item present as fast as possible vals = self._container.setdefault(key_lower, new_vals) if new_vals is not vals: self._container[key_lower] = [vals[0], vals[1], val]
Sets a header field with the given value, removing previous values. Usage:: headers = HTTPHeaderDict(foo='bar') headers.set('Foo', 'baz') headers['foo'] > 'baz'
def get_widget_label_for(self, fieldname, default=None): """Lookup the widget of the field and return the label """ widget = self.get_widget_for(fieldname) if widget is None: return default return widget.label
Lookup the widget of the field and return the label
def relateObjectLocs(obj, entities, selectF): """calculate the minimum distance to reach any iterable of entities with a loc""" #if obj in entities: return 0 # is already one of the entities try: obj = obj.loc # get object's location, if it has one except AttributeError: pass # assume obj is already a MapPoint try: func = obj.direct2dDistance # assume obj is a MapPoint except AttributeError: raise ValueError("object %s (%s) does not possess and is not a %s"%(obj, type(obj), MapPoint)) try: return selectF([(func(b.loc), b) for b in entities]) except AttributeError: return selectF([(func(b) , b) for b in entities])
calculate the minimum distance to reach any iterable of entities with a loc
def Brokaw(T, ys, mus, MWs, molecular_diameters, Stockmayers): r'''Calculates viscosity of a gas mixture according to mixing rules in [1]_. .. math:: \eta_{mix} = \sum_{i=1}^n \frac{y_i \eta_i}{\sum_{j=1}^n y_j \phi_{ij}} \phi_{ij} = \left( \frac{\eta_i}{\eta_j} \right)^{0.5} S_{ij} A_{ij} A_{ij} = m_{ij} M_{ij}^{-0.5} \left[1 + \frac{M_{ij} - M_{ij}^{0.45}} {2(1+M_{ij}) + \frac{(1 + M_{ij}^{0.45}) m_{ij}^{-0.5}}{1 + m_{ij}}} \right] m_{ij} = \left[ \frac{4}{(1+M_{ij}^{-1})(1+M_{ij})}\right]^{0.25} M_{ij} = \frac{M_i}{M_j} S_{ij} = \frac{1 + (T_i^* T_j^*)^{0.5} + (\delta_i \delta_j/4)} {[1+T_i^* + (\delta_i^2/4)]^{0.5}[1+T_j^*+(\delta_j^2/4)]^{0.5}} T^* = kT/\epsilon Parameters ---------- T : float Temperature of fluid, [K] ys : float Mole fractions of gas components mus : float Gas viscosities of all components, [Pa*S] MWs : float Molecular weights of all components, [g/mol] molecular_diameters : float L-J molecular diameter of all components, [angstroms] Stockmayers : float L-J Stockmayer energy parameters of all components, [] Returns ------- mug : float Viscosity of gas mixture, [Pa*S] Notes ----- This equation is entirely dimensionless; all dimensions cancel. The original source has not been reviewed. This is DIPPR Procedure 8D: Method for the Viscosity of Nonhydrocarbon Vapor Mixtures at Low Pressure (Polar and Nonpolar) Examples -------- >>> Brokaw(308.2, [0.05, 0.95], [1.34E-5, 9.5029E-6], [64.06, 46.07], [0.42, 0.19], [347, 432]) 9.699085099801568e-06 References ---------- .. [1] Brokaw, R. S. "Predicting Transport Properties of Dilute Gases." Industrial & Engineering Chemistry Process Design and Development 8, no. 2 (April 1, 1969): 240-53. doi:10.1021/i260030a015. .. [2] Brokaw, R. S. Viscosity of Gas Mixtures, NASA-TN-D-4496, 1968. .. [3] Danner, Ronald P, and Design Institute for Physical Property Data. Manual for Predicting Chemical Process Design Data. New York, N.Y, 1982. ''' cmps = range(len(ys)) MDs = molecular_diameters if not none_and_length_check([ys, mus, MWs, molecular_diameters, Stockmayers]): # check same-length inputs raise Exception('Function inputs are incorrect format') Tsts = [T/Stockmayer_i for Stockmayer_i in Stockmayers] Sij = [[0 for i in cmps] for j in cmps] Mij = [[0 for i in cmps] for j in cmps] mij = [[0 for i in cmps] for j in cmps] Aij = [[0 for i in cmps] for j in cmps] phiij =[[0 for i in cmps] for j in cmps] for i in cmps: for j in cmps: Sij[i][j] = (1+(Tsts[i]*Tsts[j])**0.5 + (MDs[i]*MDs[j])/4.)/(1 + Tsts[i] + (MDs[i]**2/4.))**0.5/(1 + Tsts[j] + (MDs[j]**2/4.))**0.5 if MDs[i] <= 0.1 and MDs[j] <= 0.1: Sij[i][j] = 1 Mij[i][j] = MWs[i]/MWs[j] mij[i][j] = (4./(1+Mij[i][j]**-1)/(1+Mij[i][j]))**0.25 Aij[i][j] = mij[i][j]*Mij[i][j]**-0.5*(1 + (Mij[i][j]-Mij[i][j]**0.45)/(2*(1+Mij[i][j]) + (1+Mij[i][j]**0.45)*mij[i][j]**-0.5/(1+mij[i][j]))) phiij[i][j] = (mus[i]/mus[j])**0.5*Sij[i][j]*Aij[i][j] return sum([ys[i]*mus[i]/sum([ys[j]*phiij[i][j] for j in cmps]) for i in cmps])
r'''Calculates viscosity of a gas mixture according to mixing rules in [1]_. .. math:: \eta_{mix} = \sum_{i=1}^n \frac{y_i \eta_i}{\sum_{j=1}^n y_j \phi_{ij}} \phi_{ij} = \left( \frac{\eta_i}{\eta_j} \right)^{0.5} S_{ij} A_{ij} A_{ij} = m_{ij} M_{ij}^{-0.5} \left[1 + \frac{M_{ij} - M_{ij}^{0.45}} {2(1+M_{ij}) + \frac{(1 + M_{ij}^{0.45}) m_{ij}^{-0.5}}{1 + m_{ij}}} \right] m_{ij} = \left[ \frac{4}{(1+M_{ij}^{-1})(1+M_{ij})}\right]^{0.25} M_{ij} = \frac{M_i}{M_j} S_{ij} = \frac{1 + (T_i^* T_j^*)^{0.5} + (\delta_i \delta_j/4)} {[1+T_i^* + (\delta_i^2/4)]^{0.5}[1+T_j^*+(\delta_j^2/4)]^{0.5}} T^* = kT/\epsilon Parameters ---------- T : float Temperature of fluid, [K] ys : float Mole fractions of gas components mus : float Gas viscosities of all components, [Pa*S] MWs : float Molecular weights of all components, [g/mol] molecular_diameters : float L-J molecular diameter of all components, [angstroms] Stockmayers : float L-J Stockmayer energy parameters of all components, [] Returns ------- mug : float Viscosity of gas mixture, [Pa*S] Notes ----- This equation is entirely dimensionless; all dimensions cancel. The original source has not been reviewed. This is DIPPR Procedure 8D: Method for the Viscosity of Nonhydrocarbon Vapor Mixtures at Low Pressure (Polar and Nonpolar) Examples -------- >>> Brokaw(308.2, [0.05, 0.95], [1.34E-5, 9.5029E-6], [64.06, 46.07], [0.42, 0.19], [347, 432]) 9.699085099801568e-06 References ---------- .. [1] Brokaw, R. S. "Predicting Transport Properties of Dilute Gases." Industrial & Engineering Chemistry Process Design and Development 8, no. 2 (April 1, 1969): 240-53. doi:10.1021/i260030a015. .. [2] Brokaw, R. S. Viscosity of Gas Mixtures, NASA-TN-D-4496, 1968. .. [3] Danner, Ronald P, and Design Institute for Physical Property Data. Manual for Predicting Chemical Process Design Data. New York, N.Y, 1982.
def _shrink(v, gamma): """Soft-shrinkage of an array with parameter gamma. Parameters ---------- v : array Array containing the values to be applied to the shrinkage operator gamma : float Shrinkage parameter. Returns ------- v : array The same input array after the shrinkage operator was applied. """ pos = v > gamma neg = v < -gamma v[pos] -= gamma v[neg] += gamma v[np.logical_and(~pos, ~neg)] = .0 return v
Soft-shrinkage of an array with parameter gamma. Parameters ---------- v : array Array containing the values to be applied to the shrinkage operator gamma : float Shrinkage parameter. Returns ------- v : array The same input array after the shrinkage operator was applied.
def list_logs(self): '''return a list of logs. We return any file that ends in .log ''' results = [] for image in self._bucket.list_blobs(): if image.name.endswith('log'): results.append(image) if len(results) == 0: bot.info("No containers found, based on extension .log") return results
return a list of logs. We return any file that ends in .log
def get_merge_requests(self): "http://doc.gitlab.com/ce/api/merge_requests.html" g = self.gitlab merges = self.get(g['url'] + "/projects/" + g['repo'] + "/merge_requests", {'private_token': g['token'], 'state': 'all'}, cache=False) return dict([(str(merge['id']), merge) for merge in merges])
http://doc.gitlab.com/ce/api/merge_requests.html
def _get_biallelic_variant(self, variant, info, _check_alleles=True): """Creates a bi-allelic variant.""" info = info.iloc[0, :] assert not info.multiallelic # Seeking and parsing the file self._impute2_file.seek(info.seek) genotypes = self._parse_impute2_line(self._impute2_file.readline()) variant_alleles = variant._encode_alleles([ genotypes.reference, genotypes.coded, ]) if (_check_alleles and variant_alleles != variant.alleles): # Variant with requested alleles is unavailable. logging.variant_not_found(variant) return [] return [genotypes]
Creates a bi-allelic variant.
def model_returns_t_alpha_beta(data, bmark, samples=2000, progressbar=True): """ Run Bayesian alpha-beta-model with T distributed returns. This model estimates intercept (alpha) and slope (beta) of two return sets. Usually, these will be algorithm returns and benchmark returns (e.g. S&P500). The data is assumed to be T distributed and thus is robust to outliers and takes tail events into account. If a pandas.DataFrame is passed as a benchmark, then multiple linear regression is used to estimate alpha and beta. Parameters ---------- returns : pandas.Series Series of simple returns of an algorithm or stock. bmark : pandas.DataFrame DataFrame of benchmark returns (e.g., S&P500) or risk factors (e.g., Fama-French SMB, HML, and UMD). If bmark has more recent returns than returns_train, these dates will be treated as missing values and predictions will be generated for them taking market correlations into account. samples : int (optional) Number of posterior samples to draw. Returns ------- model : pymc.Model object PyMC3 model containing all random variables. trace : pymc3.sampling.BaseTrace object A PyMC3 trace object that contains samples for each parameter of the posterior. """ data_bmark = pd.concat([data, bmark], axis=1).dropna() with pm.Model() as model: sigma = pm.HalfCauchy( 'sigma', beta=1) nu = pm.Exponential('nu_minus_two', 1. / 10.) # alpha and beta X = data_bmark.iloc[:, 1] y = data_bmark.iloc[:, 0] alpha_reg = pm.Normal('alpha', mu=0, sd=.1) beta_reg = pm.Normal('beta', mu=0, sd=1) mu_reg = alpha_reg + beta_reg * X pm.StudentT('returns', nu=nu + 2, mu=mu_reg, sd=sigma, observed=y) trace = pm.sample(samples, progressbar=progressbar) return model, trace
Run Bayesian alpha-beta-model with T distributed returns. This model estimates intercept (alpha) and slope (beta) of two return sets. Usually, these will be algorithm returns and benchmark returns (e.g. S&P500). The data is assumed to be T distributed and thus is robust to outliers and takes tail events into account. If a pandas.DataFrame is passed as a benchmark, then multiple linear regression is used to estimate alpha and beta. Parameters ---------- returns : pandas.Series Series of simple returns of an algorithm or stock. bmark : pandas.DataFrame DataFrame of benchmark returns (e.g., S&P500) or risk factors (e.g., Fama-French SMB, HML, and UMD). If bmark has more recent returns than returns_train, these dates will be treated as missing values and predictions will be generated for them taking market correlations into account. samples : int (optional) Number of posterior samples to draw. Returns ------- model : pymc.Model object PyMC3 model containing all random variables. trace : pymc3.sampling.BaseTrace object A PyMC3 trace object that contains samples for each parameter of the posterior.
def next_event_indexer(all_dates, data_query_cutoff, all_sids, event_dates, event_timestamps, event_sids): """ Construct an index array that, when applied to an array of values, produces a 2D array containing the values associated with the next event for each sid at each moment in time. Locations where no next event was known will be filled with -1. Parameters ---------- all_dates : ndarray[datetime64[ns], ndim=1] Row labels for the target output. data_query_cutoff : pd.DatetimeIndex The boundaries for the given trading sessions in ``all_dates``. all_sids : ndarray[int, ndim=1] Column labels for the target output. event_dates : ndarray[datetime64[ns], ndim=1] Dates on which each input events occurred/will occur. ``event_dates`` must be in sorted order, and may not contain any NaT values. event_timestamps : ndarray[datetime64[ns], ndim=1] Dates on which we learned about each input event. event_sids : ndarray[int, ndim=1] Sids assocated with each input event. Returns ------- indexer : ndarray[int, ndim=2] An array of shape (len(all_dates), len(all_sids)) of indices into ``event_{dates,timestamps,sids}``. """ validate_event_metadata(event_dates, event_timestamps, event_sids) out = np.full((len(all_dates), len(all_sids)), -1, dtype=np.int64) sid_ixs = all_sids.searchsorted(event_sids) # side='right' here ensures that we include the event date itself # if it's in all_dates. dt_ixs = all_dates.searchsorted(event_dates, side='right') ts_ixs = data_query_cutoff.searchsorted(event_timestamps, side='right') # Walk backward through the events, writing the index of the event into # slots ranging from the event's timestamp to its asof. This depends for # correctness on the fact that event_dates is sorted in ascending order, # because we need to overwrite later events with earlier ones if their # eligible windows overlap. for i in range(len(event_sids) - 1, -1, -1): start_ix = ts_ixs[i] end_ix = dt_ixs[i] out[start_ix:end_ix, sid_ixs[i]] = i return out
Construct an index array that, when applied to an array of values, produces a 2D array containing the values associated with the next event for each sid at each moment in time. Locations where no next event was known will be filled with -1. Parameters ---------- all_dates : ndarray[datetime64[ns], ndim=1] Row labels for the target output. data_query_cutoff : pd.DatetimeIndex The boundaries for the given trading sessions in ``all_dates``. all_sids : ndarray[int, ndim=1] Column labels for the target output. event_dates : ndarray[datetime64[ns], ndim=1] Dates on which each input events occurred/will occur. ``event_dates`` must be in sorted order, and may not contain any NaT values. event_timestamps : ndarray[datetime64[ns], ndim=1] Dates on which we learned about each input event. event_sids : ndarray[int, ndim=1] Sids assocated with each input event. Returns ------- indexer : ndarray[int, ndim=2] An array of shape (len(all_dates), len(all_sids)) of indices into ``event_{dates,timestamps,sids}``.
def phaseshift_isc(data, pairwise=False, summary_statistic='median', n_shifts=1000, tolerate_nans=True, random_state=None): """Phase randomization for one-sample ISC test For each voxel or ROI, compute the actual ISC and p-values from a null distribution of ISCs where response time series are phase randomized prior to computing ISC. If pairwise, apply phase randomization to each subject and compute pairwise ISCs. If leave-one-out approach is used (pairwise=False), only apply phase randomization to the left-out subject in each iteration of the leave-one-out procedure. Input data should be a list where each item is a time-points by voxels ndarray for a given subject. Multiple input ndarrays must be the same shape. If a single ndarray is supplied, the last dimension is assumed to correspond to subjects. When using leave-one-out approach, NaNs are ignored when computing mean time series of N-1 subjects (default: tolerate_nans=True). Alternatively, you may supply a float between 0 and 1 indicating a threshold proportion of N subjects with non-NaN values required when computing the average time series for a given voxel. For example, if tolerate_nans=.8, ISCs will be computed for any voxel where >= 80% of subjects have non-NaN values, while voxels with < 80% non-NaN values will be assigned NaNs. If set to False, NaNs are not tolerated and voxels with one or more NaNs among the N-1 subjects will be assigned NaN. Setting tolerate_nans to True or False will not affect the pairwise approach; however, if a threshold float is provided, voxels that do not reach this threshold will be excluded. Note that accommodating NaNs may be notably slower than setting tolerate_nans to False. Returns the observed ISC and p-values (two-tailed test), as well as the null distribution of ISCs computed on phase-randomized data. The implementation is based on the work in [Lerner2011]_ and [Simony2016]_. .. [Lerner2011] "Topographic mapping of a hierarchy of temporal receptive windows using a narrated story.", Y. Lerner, C. J. Honey, L. J. Silbert, U. Hasson, 2011, Journal of Neuroscience, 31, 2906-2915. https://doi.org/10.1523/jneurosci.3684-10.2011 Parameters ---------- data : list or ndarray (n_TRs x n_voxels x n_subjects) fMRI data for which to compute ISFC pairwise : bool, default: False Whether to use pairwise (True) or leave-one-out (False) approach summary_statistic : str, default: 'median' Summary statistic, either 'median' (default) or 'mean' n_shifts : int, default: 1000 Number of randomly shifted samples tolerate_nans : bool or float, default: True Accommodate NaNs (when averaging in leave-one-out approach) random_state = int, None, or np.random.RandomState, default: None Initial random seed Returns ------- observed : float, observed ISC (without time-shifting) Actual ISCs p : float, p-value p-value based on time-shifting randomization test distribution : ndarray, time-shifts by voxels (optional) Time-shifted null distribution if return_bootstrap=True """ # Check response time series input format data, n_TRs, n_voxels, n_subjects = _check_timeseries_input(data) # Get actual observed ISC observed = isc(data, pairwise=pairwise, summary_statistic=summary_statistic, tolerate_nans=tolerate_nans) # Iterate through randomized shifts to create null distribution distribution = [] for i in np.arange(n_shifts): # Random seed to be deterministically re-randomized at each iteration if isinstance(random_state, np.random.RandomState): prng = random_state else: prng = np.random.RandomState(random_state) # Get shifted version of data shifted_data = phase_randomize(data, random_state=prng) # In pairwise approach, apply all shifts then compute pairwise ISCs if pairwise: # Compute null ISC on shifted data for pairwise approach shifted_isc = isc(shifted_data, pairwise=True, summary_statistic=summary_statistic, tolerate_nans=tolerate_nans) # In leave-one-out, apply shift only to each left-out participant elif not pairwise: # Roll subject axis of phase-randomized data shifted_data = np.rollaxis(shifted_data, 2, 0) shifted_isc = [] for s, shifted_subject in enumerate(shifted_data): # ISC of shifted left-out subject vs mean of N-1 subjects nonshifted_mean = np.mean(np.delete(data, s, axis=2), axis=2) loo_isc = isc(np.dstack((shifted_subject, nonshifted_mean)), pairwise=False, summary_statistic=None, tolerate_nans=tolerate_nans) shifted_isc.append(loo_isc) # Get summary statistics across left-out subjects shifted_isc = compute_summary_statistic( np.dstack(shifted_isc), summary_statistic=summary_statistic, axis=2) distribution.append(shifted_isc) # Update random state for next iteration random_state = np.random.RandomState(prng.randint(0, MAX_RANDOM_SEED)) # Convert distribution to numpy array distribution = np.vstack(distribution) # Get p-value for actual median from shifted distribution p = p_from_null(observed, distribution, side='two-sided', exact=False, axis=0) return observed, p, distribution
Phase randomization for one-sample ISC test For each voxel or ROI, compute the actual ISC and p-values from a null distribution of ISCs where response time series are phase randomized prior to computing ISC. If pairwise, apply phase randomization to each subject and compute pairwise ISCs. If leave-one-out approach is used (pairwise=False), only apply phase randomization to the left-out subject in each iteration of the leave-one-out procedure. Input data should be a list where each item is a time-points by voxels ndarray for a given subject. Multiple input ndarrays must be the same shape. If a single ndarray is supplied, the last dimension is assumed to correspond to subjects. When using leave-one-out approach, NaNs are ignored when computing mean time series of N-1 subjects (default: tolerate_nans=True). Alternatively, you may supply a float between 0 and 1 indicating a threshold proportion of N subjects with non-NaN values required when computing the average time series for a given voxel. For example, if tolerate_nans=.8, ISCs will be computed for any voxel where >= 80% of subjects have non-NaN values, while voxels with < 80% non-NaN values will be assigned NaNs. If set to False, NaNs are not tolerated and voxels with one or more NaNs among the N-1 subjects will be assigned NaN. Setting tolerate_nans to True or False will not affect the pairwise approach; however, if a threshold float is provided, voxels that do not reach this threshold will be excluded. Note that accommodating NaNs may be notably slower than setting tolerate_nans to False. Returns the observed ISC and p-values (two-tailed test), as well as the null distribution of ISCs computed on phase-randomized data. The implementation is based on the work in [Lerner2011]_ and [Simony2016]_. .. [Lerner2011] "Topographic mapping of a hierarchy of temporal receptive windows using a narrated story.", Y. Lerner, C. J. Honey, L. J. Silbert, U. Hasson, 2011, Journal of Neuroscience, 31, 2906-2915. https://doi.org/10.1523/jneurosci.3684-10.2011 Parameters ---------- data : list or ndarray (n_TRs x n_voxels x n_subjects) fMRI data for which to compute ISFC pairwise : bool, default: False Whether to use pairwise (True) or leave-one-out (False) approach summary_statistic : str, default: 'median' Summary statistic, either 'median' (default) or 'mean' n_shifts : int, default: 1000 Number of randomly shifted samples tolerate_nans : bool or float, default: True Accommodate NaNs (when averaging in leave-one-out approach) random_state = int, None, or np.random.RandomState, default: None Initial random seed Returns ------- observed : float, observed ISC (without time-shifting) Actual ISCs p : float, p-value p-value based on time-shifting randomization test distribution : ndarray, time-shifts by voxels (optional) Time-shifted null distribution if return_bootstrap=True
def _set_ipv6_gateway_address(self, v, load=False): """ Setter method for ipv6_gateway_address, mapped from YANG variable /interface_vlan/interface/ve/ipv6/ipv6_anycast_gateway/ipv6_gateway_address (list) If this variable is read-only (config: false) in the source YANG file, then _set_ipv6_gateway_address is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_ipv6_gateway_address() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=YANGListType("ipv6_gw_addr",ipv6_gateway_address.ipv6_gateway_address, yang_name="ipv6-gateway-address", rest_name="gateway-address", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='ipv6-gw-addr', extensions={u'tailf-common': {u'info': u'Set IPv6 gateway Address/Prefix', u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'cli-full-no': None, u'alt-name': u'gateway-address', u'cli-no-match-completion': None, u'callpoint': u'AnycastGatewayGlobalVeIpv6GatewayAddress'}}), is_container='list', yang_name="ipv6-gateway-address", rest_name="gateway-address", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Set IPv6 gateway Address/Prefix', u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'cli-full-no': None, u'alt-name': u'gateway-address', u'cli-no-match-completion': None, u'callpoint': u'AnycastGatewayGlobalVeIpv6GatewayAddress'}}, namespace='urn:brocade.com:mgmt:brocade-anycast-gateway', defining_module='brocade-anycast-gateway', yang_type='list', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """ipv6_gateway_address must be of a type compatible with list""", 'defined-type': "list", 'generated-type': """YANGDynClass(base=YANGListType("ipv6_gw_addr",ipv6_gateway_address.ipv6_gateway_address, yang_name="ipv6-gateway-address", rest_name="gateway-address", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='ipv6-gw-addr', extensions={u'tailf-common': {u'info': u'Set IPv6 gateway Address/Prefix', u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'cli-full-no': None, u'alt-name': u'gateway-address', u'cli-no-match-completion': None, u'callpoint': u'AnycastGatewayGlobalVeIpv6GatewayAddress'}}), is_container='list', yang_name="ipv6-gateway-address", rest_name="gateway-address", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Set IPv6 gateway Address/Prefix', u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'cli-full-no': None, u'alt-name': u'gateway-address', u'cli-no-match-completion': None, u'callpoint': u'AnycastGatewayGlobalVeIpv6GatewayAddress'}}, namespace='urn:brocade.com:mgmt:brocade-anycast-gateway', defining_module='brocade-anycast-gateway', yang_type='list', is_config=True)""", }) self.__ipv6_gateway_address = t if hasattr(self, '_set'): self._set()
Setter method for ipv6_gateway_address, mapped from YANG variable /interface_vlan/interface/ve/ipv6/ipv6_anycast_gateway/ipv6_gateway_address (list) If this variable is read-only (config: false) in the source YANG file, then _set_ipv6_gateway_address is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_ipv6_gateway_address() directly.
def close(self, signalnum=None, frame=None): self._running = False """Closes all currently open Tail objects""" self._log_debug("Closing all tail objects") self._active = False for fid in self._tails: self._tails[fid].close() for n in range(0,self._number_of_consumer_processes): if self._proc[n] is not None and self._proc[n].is_alive(): self._logger.debug("Terminate Process: " + str(n)) self._proc[n].terminate() self._proc[n].join()
Closes all currently open Tail objects
def ListRecursivelyViaWalking(top): """Walks a directory tree, yielding (dir_path, file_paths) tuples. For each of `top` and its subdirectories, yields a tuple containing the path to the directory and the path to each of the contained files. Note that unlike os.Walk()/tf.io.gfile.walk()/ListRecursivelyViaGlobbing, this does not list subdirectories. The file paths are all absolute. If the directory does not exist, this yields nothing. Walking may be incredibly slow on certain file systems. Args: top: A path to a directory. Yields: A (dir_path, file_paths) tuple for each directory/subdirectory. """ for dir_path, _, filenames in tf.io.gfile.walk(top, topdown=True): yield (dir_path, (os.path.join(dir_path, filename) for filename in filenames))
Walks a directory tree, yielding (dir_path, file_paths) tuples. For each of `top` and its subdirectories, yields a tuple containing the path to the directory and the path to each of the contained files. Note that unlike os.Walk()/tf.io.gfile.walk()/ListRecursivelyViaGlobbing, this does not list subdirectories. The file paths are all absolute. If the directory does not exist, this yields nothing. Walking may be incredibly slow on certain file systems. Args: top: A path to a directory. Yields: A (dir_path, file_paths) tuple for each directory/subdirectory.
def is_valid_ipv6(ip_str): """ Check the validity of an IPv6 address """ try: socket.inet_pton(socket.AF_INET6, ip_str) except socket.error: return False return True
Check the validity of an IPv6 address
def install(self): """Confirm add-on install.""" with self.selenium.context(self.selenium.CONTEXT_CHROME): self.find_primary_button().click()
Confirm add-on install.
def parse_hstring(hs): """ Parse a single item from the telescope server into name, value, comment. """ # split the string on = and /, also stripping whitespace and annoying quotes name, value, comment = yield_three( [val.strip().strip("'") for val in filter(None, re.split("[=/]+", hs))] ) # if comment has a slash in it, put it back together try: len(comment) except: pass else: comment = '/'.join(comment) return name, value, comment
Parse a single item from the telescope server into name, value, comment.
def updateAltHistory(self): '''Updates the altitude history plot.''' self.altHist.append(self.relAlt) self.timeHist.append(self.relAltTime) # Delete entries older than x seconds histLim = 10 currentTime = time.time() point = 0 for i in range(0,len(self.timeHist)): if (self.timeHist[i] > (currentTime - 10.0)): break # Remove old entries self.altHist = self.altHist[i:] self.timeHist = self.timeHist[i:] # Transform Data x = [] y = [] tmin = min(self.timeHist) tmax = max(self.timeHist) x1 = self.leftPos+(self.vertSize/10.0) y1 = -0.25 altMin = 0 altMax = max(self.altHist) # Keep alt max for whole mission if altMax > self.altMax: self.altMax = altMax else: altMax = self.altMax if tmax != tmin: mx = 0.5/(tmax-tmin) else: mx = 0.0 if altMax != altMin: my = 0.5/(altMax-altMin) else: my = 0.0 for t in self.timeHist: x.append(mx*(t-tmin)+x1) for alt in self.altHist: val = my*(alt-altMin)+y1 # Crop extreme noise if val < -0.25: val = -0.25 elif val > 0.25: val = 0.25 y.append(val) # Display Plot self.altHistRect.set_x(self.leftPos+(self.vertSize/10.0)) self.altPlot.set_data(x,y) self.altMarker.set_data(self.leftPos+(self.vertSize/10.0)+0.5,val) self.altText2.set_position((self.leftPos+(4*self.vertSize/10.0)+0.5,val)) self.altText2.set_size(self.fontSize) self.altText2.set_text('%.f m' % self.relAlt)
Updates the altitude history plot.
def make_plus_fields(obj): """ Add a '+' to the key of non-standard fields. dispatch to recursive _make_plus_helper based on _type field """ fields = standard_fields.get(obj['_type'], dict()) return _make_plus_helper(obj, fields)
Add a '+' to the key of non-standard fields. dispatch to recursive _make_plus_helper based on _type field
def build_fred(self): '''Build a flat recurrent encoder-decoder dialogue model''' encoder = Encoder(data=self.dataset, config=self.model_config) decoder = Decoder(data=self.dataset, config=self.model_config, encoder=encoder) return EncoderDecoder(config=self.model_config, encoder=encoder, decoder=decoder, num_gpus=self.num_gpus) return EncoderDecoder(config=self.model_config, encoder=encoder, decoder=decoder)
Build a flat recurrent encoder-decoder dialogue model
def GetNetworkAddressWithTime(self): """ Get a network address object. Returns: NetworkAddressWithTime: if we have a connection to a node. None: otherwise. """ if self.port is not None and self.host is not None and self.Version is not None: return NetworkAddressWithTime(self.host, self.port, self.Version.Services) return None
Get a network address object. Returns: NetworkAddressWithTime: if we have a connection to a node. None: otherwise.
def infer(self, sensations, stats=None, objname=None): """ Attempt to recognize the object given a list of sensations. You may use :meth:`getCurrentClassification` to extract the current object classification from the network :param sensations: Array of sensations, where each sensation is composed of displacement vector and feature SDR for each column. For example: [[[1,1,1],[101,205,523, ..., 1021]],...] Note: Each column must have the same number of sensations as the other columns. :type sensations: list[tuple[list[int], list[int]]] :param stats: Dictionary holding statistics information. See '_updateInferenceStats' for information on the statistics collected :type stats: defaultdict[str, list] :param objname: Name of the inferred object, if known :type objname: str or None """ self.setLearning(False) prevLoc = [None] * self.numColumns numFeatures = len(sensations[0]) for sensation in xrange(numFeatures): for col in xrange(self.numColumns): assert numFeatures == len(sensations[col]) location, feature = sensations[col][sensation] # Compute displacement from previous location location = np.array(location) displacement = [0] * self.dimensions if prevLoc[col] is not None: displacement = location - prevLoc[col] prevLoc[col] = location self.motorInput[col].addDataToQueue(displacement) self.sensorInput[col].addDataToQueue(feature, False, 0) self.network.run(1) if stats is not None: self._updateInferenceStats(stats=stats, objectName=objname)
Attempt to recognize the object given a list of sensations. You may use :meth:`getCurrentClassification` to extract the current object classification from the network :param sensations: Array of sensations, where each sensation is composed of displacement vector and feature SDR for each column. For example: [[[1,1,1],[101,205,523, ..., 1021]],...] Note: Each column must have the same number of sensations as the other columns. :type sensations: list[tuple[list[int], list[int]]] :param stats: Dictionary holding statistics information. See '_updateInferenceStats' for information on the statistics collected :type stats: defaultdict[str, list] :param objname: Name of the inferred object, if known :type objname: str or None
def get_channel_id(self): """Fetches id :return: id of youtube channel """ soup = BeautifulSoup( self.get_channel_page(), "lxml" ) # parser for source page channel_id = soup.find_all( "span", { "class": "channel-header-subscription-button-container" } ) # get all good spans channel_id = channel_id[0].find_all("button")[ 0] # get button in first span channel_id = channel_id["data-channel-external-id"] # get id return channel_id
Fetches id :return: id of youtube channel
def _render_extended_error_message_list(self, extended_error): """Parse the ExtendedError object and retruns the message. Build a list of decoded messages from the extended_error using the message registries. An ExtendedError JSON object is a response from the with its own schema. This function knows how to parse the ExtendedError object and, using any loaded message registries, render an array of plain language strings that represent the response. """ messages = [] if isinstance(extended_error, dict): if ('Type' in extended_error and extended_error['Type'].startswith('ExtendedError.')): for msg in extended_error['Messages']: message_id = msg['MessageID'] x = message_id.split('.') registry = x[0] msgkey = x[len(x) - 1] # if the correct message registry is loaded, # do string resolution if (registry in self.message_registries and msgkey in self.message_registries[registry]['Messages']): rmsgs = self.message_registries[registry]['Messages'] msg_dict = rmsgs[msgkey] msg_str = message_id + ': ' + msg_dict['Message'] for argn in range(0, msg_dict['NumberOfArgs']): subst = '%' + str(argn+1) m = str(msg['MessageArgs'][argn]) msg_str = msg_str.replace(subst, m) if ('Resolution' in msg_dict and msg_dict['Resolution'] != 'None'): msg_str += ' ' + msg_dict['Resolution'] messages.append(msg_str) else: # no message registry, simply return the msg object # in string form messages.append(str(message_id)) return messages
Parse the ExtendedError object and retruns the message. Build a list of decoded messages from the extended_error using the message registries. An ExtendedError JSON object is a response from the with its own schema. This function knows how to parse the ExtendedError object and, using any loaded message registries, render an array of plain language strings that represent the response.
def __print_step_by_console(self, step): """ print the step by console if the show variable is enabled :param step: step text """ step_list = step.split(u'\n') for s in step_list: self.logger.by_console(u' %s' % repr(s).replace("u'", "").replace("'", ""))
print the step by console if the show variable is enabled :param step: step text
def hexdump(logger, s, width=16, skip=True, hexii=False, begin=0, highlight=None): r""" Return a hexdump-dump of a string. Arguments: logger(FastLogger): Logger object s(str): The data to hexdump. width(int): The number of characters per line skip(bool): Set to True, if repeated lines should be replaced by a "*" hexii(bool): Set to True, if a hexii-dump should be returned instead of a hexdump. begin(int): Offset of the first byte to print in the left column highlight(iterable): Byte values to highlight. Returns: A hexdump-dump in the form of a string. Examples: >>> print hexdump("abc") 00000000 61 62 63 │abc│ 00000003 >>> print hexdump('A'*32) 00000000 41 41 41 41 41 41 41 41 41 41 41 41 41 41 41 41 │AAAA│AAAA│AAAA│AAAA│ * 00000020 >>> print hexdump('A'*32, width=8) 00000000 41 41 41 41 41 41 41 41 │AAAA│AAAA│ * 00000020 >>> print hexdump(list(map(chr, range(256)))) 00000000 00 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f │····│····│····│····│ 00000010 10 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f │····│····│····│····│ 00000020 20 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f │ !"#│$%&'│()*+│,-./│ 00000030 30 31 32 33 34 35 36 37 38 39 3a 3b 3c 3d 3e 3f │0123│4567│89:;│<=>?│ 00000040 40 41 42 43 44 45 46 47 48 49 4a 4b 4c 4d 4e 4f │@ABC│DEFG│HIJK│LMNO│ 00000050 50 51 52 53 54 55 56 57 58 59 5a 5b 5c 5d 5e 5f │PQRS│TUVW│XYZ[│\]^_│ 00000060 60 61 62 63 64 65 66 67 68 69 6a 6b 6c 6d 6e 6f │`abc│defg│hijk│lmno│ 00000070 70 71 72 73 74 75 76 77 78 79 7a 7b 7c 7d 7e 7f │pqrs│tuvw│xyz{│|}~·│ 00000080 80 81 82 83 84 85 86 87 88 89 8a 8b 8c 8d 8e 8f │····│····│····│····│ 00000090 90 91 92 93 94 95 96 97 98 99 9a 9b 9c 9d 9e 9f │····│····│····│····│ 000000a0 a0 a1 a2 a3 a4 a5 a6 a7 a8 a9 aa ab ac ad ae af │····│····│····│····│ 000000b0 b0 b1 b2 b3 b4 b5 b6 b7 b8 b9 ba bb bc bd be bf │····│····│····│····│ 000000c0 c0 c1 c2 c3 c4 c5 c6 c7 c8 c9 ca cb cc cd ce cf │····│····│····│····│ 000000d0 d0 d1 d2 d3 d4 d5 d6 d7 d8 d9 da db dc dd de df │····│····│····│····│ 000000e0 e0 e1 e2 e3 e4 e5 e6 e7 e8 e9 ea eb ec ed ee ef │····│····│····│····│ 000000f0 f0 f1 f2 f3 f4 f5 f6 f7 f8 f9 fa fb fc fd fe ff │····│····│····│····│ 00000100 >>> print hexdump(list(map(chr, range(256))), hexii=True) 00000000 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f │ 00000010 10 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f │ 00000020 20 .! ." .# .$ .% .& .' .( .) .* .+ ., .- .. ./ │ 00000030 .0 .1 .2 .3 .4 .5 .6 .7 .8 .9 .: .; .< .= .> .? │ 00000040 .@ .A .B .C .D .E .F .G .H .I .J .K .L .M .N .O │ 00000050 .P .Q .R .S .T .U .V .W .X .Y .Z .[ .\ .] .^ ._ │ 00000060 .` .a .b .c .d .e .f .g .h .i .j .k .l .m .n .o │ 00000070 .p .q .r .s .t .u .v .w .x .y .z .{ .| .} .~ 7f │ 00000080 80 81 82 83 84 85 86 87 88 89 8a 8b 8c 8d 8e 8f │ 00000090 90 91 92 93 94 95 96 97 98 99 9a 9b 9c 9d 9e 9f │ 000000a0 a0 a1 a2 a3 a4 a5 a6 a7 a8 a9 aa ab ac ad ae af │ 000000b0 b0 b1 b2 b3 b4 b5 b6 b7 b8 b9 ba bb bc bd be bf │ 000000c0 c0 c1 c2 c3 c4 c5 c6 c7 c8 c9 ca cb cc cd ce cf │ 000000d0 d0 d1 d2 d3 d4 d5 d6 d7 d8 d9 da db dc dd de df │ 000000e0 e0 e1 e2 e3 e4 e5 e6 e7 e8 e9 ea eb ec ed ee ef │ 000000f0 f0 f1 f2 f3 f4 f5 f6 f7 f8 f9 fa fb fc fd fe ## │ 00000100 >>> print hexdump('X' * 64) 00000000 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 │XXXX│XXXX│XXXX│XXXX│ * 00000040 >>> print hexdump('X' * 64, skip=False) 00000000 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 │XXXX│XXXX│XXXX│XXXX│ 00000010 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 │XXXX│XXXX│XXXX│XXXX│ 00000020 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 │XXXX│XXXX│XXXX│XXXX│ 00000030 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 │XXXX│XXXX│XXXX│XXXX│ 00000040 >>> print hexdump(fit({0x10: 'X'*0x20, 0x50-1: '\xff'*20}, length=0xc0) + '\x00'*32, cyclic=1, hexii=1) 00000000 .a .a .a .a .b .a .a .a .c .a .a .a .d .a .a .a │ 00000010 .X .X .X .X .X .X .X .X .X .X .X .X .X .X .X .X │ * 00000030 .m .a .a .a .n .a .a .a .o .a .a .a .p .a .a .a │ 00000040 .q .a .a .a .r .a .a .a .s .a .a .a .t .a .a ## │ 00000050 ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## │ 00000060 ## ## ## .a .z .a .a .b .b .a .a .b .c .a .a .b │ 00000070 .d .a .a .b .e .a .a .b .f .a .a .b .g .a .a .b │ * 000000c0 │ * 000000e0 >>> print hexdump('A'*16, width=9) 00000000 41 41 41 41 41 41 41 41 41 │AAAA│AAAA│A│ 00000009 41 41 41 41 41 41 41 │AAAA│AAA│ 00000010 >>> print hexdump('A'*16, width=10) 00000000 41 41 41 41 41 41 41 41 41 41 │AAAA│AAAA│AA│ 0000000a 41 41 41 41 41 41 │AAAA│AA│ 00000010 >>> print hexdump('A'*16, width=11) 00000000 41 41 41 41 41 41 41 41 41 41 41 │AAAA│AAAA│AAA│ 0000000b 41 41 41 41 41 │AAAA│A│ 00000010 >>> print hexdump('A'*16, width=12) 00000000 41 41 41 41 41 41 41 41 41 41 41 41 │AAAA│AAAA│AAAA│ 0000000c 41 41 41 41 │AAAA││ 00000010 >>> print hexdump('A'*16, width=13) 00000000 41 41 41 41 41 41 41 41 41 41 41 41 41 │AAAA│AAAA│AAAA│A│ 0000000d 41 41 41 │AAA│ 00000010 >>> print hexdump('A'*16, width=14) 00000000 41 41 41 41 41 41 41 41 41 41 41 41 41 41 │AAAA│AAAA│AAAA│AA│ 0000000e 41 41 │AA│ 00000010 >>> print hexdump('A'*16, width=15) 00000000 41 41 41 41 41 41 41 41 41 41 41 41 41 41 41 │AAAA│AAAA│AAAA│AAA│ 0000000f 41 │A│ 00000010 """ s = _flat(s) return '\n'.join(hexdump_iter(logger, StringIO(s), width, skip, hexii, begin, highlight))
r""" Return a hexdump-dump of a string. Arguments: logger(FastLogger): Logger object s(str): The data to hexdump. width(int): The number of characters per line skip(bool): Set to True, if repeated lines should be replaced by a "*" hexii(bool): Set to True, if a hexii-dump should be returned instead of a hexdump. begin(int): Offset of the first byte to print in the left column highlight(iterable): Byte values to highlight. Returns: A hexdump-dump in the form of a string. Examples: >>> print hexdump("abc") 00000000 61 62 63 │abc│ 00000003 >>> print hexdump('A'*32) 00000000 41 41 41 41 41 41 41 41 41 41 41 41 41 41 41 41 │AAAA│AAAA│AAAA│AAAA│ * 00000020 >>> print hexdump('A'*32, width=8) 00000000 41 41 41 41 41 41 41 41 │AAAA│AAAA│ * 00000020 >>> print hexdump(list(map(chr, range(256)))) 00000000 00 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f │····│····│····│····│ 00000010 10 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f │····│····│····│····│ 00000020 20 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f │ !"#│$%&'│()*+│,-./│ 00000030 30 31 32 33 34 35 36 37 38 39 3a 3b 3c 3d 3e 3f │0123│4567│89:;│<=>?│ 00000040 40 41 42 43 44 45 46 47 48 49 4a 4b 4c 4d 4e 4f │@ABC│DEFG│HIJK│LMNO│ 00000050 50 51 52 53 54 55 56 57 58 59 5a 5b 5c 5d 5e 5f │PQRS│TUVW│XYZ[│\]^_│ 00000060 60 61 62 63 64 65 66 67 68 69 6a 6b 6c 6d 6e 6f │`abc│defg│hijk│lmno│ 00000070 70 71 72 73 74 75 76 77 78 79 7a 7b 7c 7d 7e 7f │pqrs│tuvw│xyz{│|}~·│ 00000080 80 81 82 83 84 85 86 87 88 89 8a 8b 8c 8d 8e 8f │····│····│····│····│ 00000090 90 91 92 93 94 95 96 97 98 99 9a 9b 9c 9d 9e 9f │····│····│····│····│ 000000a0 a0 a1 a2 a3 a4 a5 a6 a7 a8 a9 aa ab ac ad ae af │····│····│····│····│ 000000b0 b0 b1 b2 b3 b4 b5 b6 b7 b8 b9 ba bb bc bd be bf │····│····│····│····│ 000000c0 c0 c1 c2 c3 c4 c5 c6 c7 c8 c9 ca cb cc cd ce cf │····│····│····│····│ 000000d0 d0 d1 d2 d3 d4 d5 d6 d7 d8 d9 da db dc dd de df │····│····│····│····│ 000000e0 e0 e1 e2 e3 e4 e5 e6 e7 e8 e9 ea eb ec ed ee ef │····│····│····│····│ 000000f0 f0 f1 f2 f3 f4 f5 f6 f7 f8 f9 fa fb fc fd fe ff │····│····│····│····│ 00000100 >>> print hexdump(list(map(chr, range(256))), hexii=True) 00000000 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f │ 00000010 10 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f │ 00000020 20 .! ." .# .$ .% .& .' .( .) .* .+ ., .- .. ./ │ 00000030 .0 .1 .2 .3 .4 .5 .6 .7 .8 .9 .: .; .< .= .> .? │ 00000040 .@ .A .B .C .D .E .F .G .H .I .J .K .L .M .N .O │ 00000050 .P .Q .R .S .T .U .V .W .X .Y .Z .[ .\ .] .^ ._ │ 00000060 .` .a .b .c .d .e .f .g .h .i .j .k .l .m .n .o │ 00000070 .p .q .r .s .t .u .v .w .x .y .z .{ .| .} .~ 7f │ 00000080 80 81 82 83 84 85 86 87 88 89 8a 8b 8c 8d 8e 8f │ 00000090 90 91 92 93 94 95 96 97 98 99 9a 9b 9c 9d 9e 9f │ 000000a0 a0 a1 a2 a3 a4 a5 a6 a7 a8 a9 aa ab ac ad ae af │ 000000b0 b0 b1 b2 b3 b4 b5 b6 b7 b8 b9 ba bb bc bd be bf │ 000000c0 c0 c1 c2 c3 c4 c5 c6 c7 c8 c9 ca cb cc cd ce cf │ 000000d0 d0 d1 d2 d3 d4 d5 d6 d7 d8 d9 da db dc dd de df │ 000000e0 e0 e1 e2 e3 e4 e5 e6 e7 e8 e9 ea eb ec ed ee ef │ 000000f0 f0 f1 f2 f3 f4 f5 f6 f7 f8 f9 fa fb fc fd fe ## │ 00000100 >>> print hexdump('X' * 64) 00000000 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 │XXXX│XXXX│XXXX│XXXX│ * 00000040 >>> print hexdump('X' * 64, skip=False) 00000000 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 │XXXX│XXXX│XXXX│XXXX│ 00000010 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 │XXXX│XXXX│XXXX│XXXX│ 00000020 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 │XXXX│XXXX│XXXX│XXXX│ 00000030 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 │XXXX│XXXX│XXXX│XXXX│ 00000040 >>> print hexdump(fit({0x10: 'X'*0x20, 0x50-1: '\xff'*20}, length=0xc0) + '\x00'*32, cyclic=1, hexii=1) 00000000 .a .a .a .a .b .a .a .a .c .a .a .a .d .a .a .a │ 00000010 .X .X .X .X .X .X .X .X .X .X .X .X .X .X .X .X │ * 00000030 .m .a .a .a .n .a .a .a .o .a .a .a .p .a .a .a │ 00000040 .q .a .a .a .r .a .a .a .s .a .a .a .t .a .a ## │ 00000050 ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## │ 00000060 ## ## ## .a .z .a .a .b .b .a .a .b .c .a .a .b │ 00000070 .d .a .a .b .e .a .a .b .f .a .a .b .g .a .a .b │ * 000000c0 │ * 000000e0 >>> print hexdump('A'*16, width=9) 00000000 41 41 41 41 41 41 41 41 41 │AAAA│AAAA│A│ 00000009 41 41 41 41 41 41 41 │AAAA│AAA│ 00000010 >>> print hexdump('A'*16, width=10) 00000000 41 41 41 41 41 41 41 41 41 41 │AAAA│AAAA│AA│ 0000000a 41 41 41 41 41 41 │AAAA│AA│ 00000010 >>> print hexdump('A'*16, width=11) 00000000 41 41 41 41 41 41 41 41 41 41 41 │AAAA│AAAA│AAA│ 0000000b 41 41 41 41 41 │AAAA│A│ 00000010 >>> print hexdump('A'*16, width=12) 00000000 41 41 41 41 41 41 41 41 41 41 41 41 │AAAA│AAAA│AAAA│ 0000000c 41 41 41 41 │AAAA││ 00000010 >>> print hexdump('A'*16, width=13) 00000000 41 41 41 41 41 41 41 41 41 41 41 41 41 │AAAA│AAAA│AAAA│A│ 0000000d 41 41 41 │AAA│ 00000010 >>> print hexdump('A'*16, width=14) 00000000 41 41 41 41 41 41 41 41 41 41 41 41 41 41 │AAAA│AAAA│AAAA│AA│ 0000000e 41 41 │AA│ 00000010 >>> print hexdump('A'*16, width=15) 00000000 41 41 41 41 41 41 41 41 41 41 41 41 41 41 41 │AAAA│AAAA│AAAA│AAA│ 0000000f 41 │A│ 00000010
def set_dm(self, num): """ Make GUI changes based on data model num. Get info from WD in appropriate format. """ #enable or disable self.btn1a if self.data_model_num == 3: self.btn1a.Enable() else: self.btn1a.Disable() # # set pmag_gui_dialogs global pmag_gui_dialogs if self.data_model_num == 2: pmag_gui_dialogs = pgd2 wx.CallAfter(self.get_wd_data2) elif self.data_model_num == 3: pmag_gui_dialogs = pgd3 wx.CallAfter(self.get_wd_data) # do / re-do menubar menubar = pmag_gui_menu.MagICMenu(self, data_model_num=self.data_model_num) self.SetMenuBar(menubar) self.menubar = menubar
Make GUI changes based on data model num. Get info from WD in appropriate format.
def resubmit(self, indices_or_msg_ids=None, subheader=None, block=None): """Resubmit one or more tasks. in-flight tasks may not be resubmitted. Parameters ---------- indices_or_msg_ids : integer history index, str msg_id, or list of either The indices or msg_ids of indices to be retrieved block : bool Whether to wait for the result to be done Returns ------- AsyncHubResult A subclass of AsyncResult that retrieves results from the Hub """ block = self.block if block is None else block if indices_or_msg_ids is None: indices_or_msg_ids = -1 if not isinstance(indices_or_msg_ids, (list,tuple)): indices_or_msg_ids = [indices_or_msg_ids] theids = [] for id in indices_or_msg_ids: if isinstance(id, int): id = self.history[id] if not isinstance(id, basestring): raise TypeError("indices must be str or int, not %r"%id) theids.append(id) content = dict(msg_ids = theids) self.session.send(self._query_socket, 'resubmit_request', content) zmq.select([self._query_socket], [], []) idents,msg = self.session.recv(self._query_socket, zmq.NOBLOCK) if self.debug: pprint(msg) content = msg['content'] if content['status'] != 'ok': raise self._unwrap_exception(content) mapping = content['resubmitted'] new_ids = [ mapping[msg_id] for msg_id in theids ] ar = AsyncHubResult(self, msg_ids=new_ids) if block: ar.wait() return ar
Resubmit one or more tasks. in-flight tasks may not be resubmitted. Parameters ---------- indices_or_msg_ids : integer history index, str msg_id, or list of either The indices or msg_ids of indices to be retrieved block : bool Whether to wait for the result to be done Returns ------- AsyncHubResult A subclass of AsyncResult that retrieves results from the Hub
def connect_async(self, connection_id, connection_string, callback): """Asynchronously connect to a device Args: connection_id (int): A unique identifier that will refer to this connection connection_string (string): A DeviceAdapter specific string that can be used to connect to a device using this DeviceAdapter. callback (callable): A function that will be called when the connection attempt finishes as callback(connection_id, adapter_id, success: bool, failure_reason: string or None) """ if callback is not None: callback(connection_id, self.id, False, "connect command is not supported in device adapter")
Asynchronously connect to a device Args: connection_id (int): A unique identifier that will refer to this connection connection_string (string): A DeviceAdapter specific string that can be used to connect to a device using this DeviceAdapter. callback (callable): A function that will be called when the connection attempt finishes as callback(connection_id, adapter_id, success: bool, failure_reason: string or None)
def strlen(self, name): """ Return the number of bytes stored in the value of the key :param name: str the name of the redis key :return: Future() """ with self.pipe as pipe: return pipe.strlen(self.redis_key(name))
Return the number of bytes stored in the value of the key :param name: str the name of the redis key :return: Future()
def _uneven_transform_deriv_shape(systematic_utilities, alt_IDs, rows_to_alts, shape_params, output_array=None, *args, **kwargs): """ Parameters ---------- systematic_utilities : 1D ndarray. All elements should be ints, floats, or longs. Should contain the systematic utilities of each observation per available alternative. Note that this vector is formed by the dot product of the design matrix with the vector of utility coefficients. alt_IDs : 1D ndarray. All elements should be ints. There should be one row per obervation per available alternative for the given observation. Elements denote the alternative corresponding to the given row of the design matrix. rows_to_alts : 2D scipy sparse matrix. There should be one row per observation per available alternative and one column per possible alternative. This matrix maps the rows of the design matrix to the possible alternatives for this dataset. All elements should be zeros or ones. shape_params : None or 1D ndarray. If an array, each element should be an int, float, or long. There should be one value per shape parameter of the model being used. output_array : 2D scipy sparse array. The array should have shape `(systematic_utilities.shape[0], shape_params.shape[0])`. It's data is to be replaced with the correct derivatives of the transformation vector with respect to the vector of shape parameters. This argument is NOT optional. Returns ------- output_array : 2D scipy sparse array. The shape of the returned array is `(systematic_utilities.shape[0], shape_params.shape[0])`. The returned array specifies the derivative of the transformed utilities with respect to the shape parameters. All elements are ints, floats, or longs. """ # Convert the shape parameters back into their 'natural parametrization' natural_shapes = np.exp(shape_params) natural_shapes[np.isposinf(natural_shapes)] = max_comp_value # Figure out what shape values correspond to each row of the # systematic utilities long_shapes = rows_to_alts.dot(natural_shapes) # Get the exponentiated (utilities times the shape parameter) exp_shape_utilities = np.exp(long_shapes * systematic_utilities) # Calculate the derivative of h_ij with respect to shape_j. derivs = (systematic_utilities / (1.0 + exp_shape_utilities)) # Guard against overflow. Only for cases of systematic_utilities becomming # huge. It is unlikely this safeguard will be needed. derivs[np.isposinf(systematic_utilities)] = 0 # Guard against underflow from v --> -inf. huge_index = np.isneginf(systematic_utilities) derivs[huge_index] = -max_comp_value # Return the matrix of dh_dshapes. Note the matrix should be of dimension # (systematic_utilities.shape[0], shape_params.shape[0]) # Note that the "* long_shapes" accounts for the fact that the derivative # of the natural shape parameters with resepect to the actual shape # parameters being estimated is simply # exp(actual shape parameters) = natural shape parameters. The # multiplication comes from the chain rule. output_array.data = derivs * long_shapes return output_array
Parameters ---------- systematic_utilities : 1D ndarray. All elements should be ints, floats, or longs. Should contain the systematic utilities of each observation per available alternative. Note that this vector is formed by the dot product of the design matrix with the vector of utility coefficients. alt_IDs : 1D ndarray. All elements should be ints. There should be one row per obervation per available alternative for the given observation. Elements denote the alternative corresponding to the given row of the design matrix. rows_to_alts : 2D scipy sparse matrix. There should be one row per observation per available alternative and one column per possible alternative. This matrix maps the rows of the design matrix to the possible alternatives for this dataset. All elements should be zeros or ones. shape_params : None or 1D ndarray. If an array, each element should be an int, float, or long. There should be one value per shape parameter of the model being used. output_array : 2D scipy sparse array. The array should have shape `(systematic_utilities.shape[0], shape_params.shape[0])`. It's data is to be replaced with the correct derivatives of the transformation vector with respect to the vector of shape parameters. This argument is NOT optional. Returns ------- output_array : 2D scipy sparse array. The shape of the returned array is `(systematic_utilities.shape[0], shape_params.shape[0])`. The returned array specifies the derivative of the transformed utilities with respect to the shape parameters. All elements are ints, floats, or longs.
def mfpt(T, target, origin=None, tau=1, mu=None): r"""Mean first passage times (from a set of starting states - optional) to a set of target states. Parameters ---------- T : ndarray or scipy.sparse matrix, shape=(n,n) Transition matrix. target : int or list of int Target states for mfpt calculation. origin : int or list of int (optional) Set of starting states. tau : int (optional) The time-lag (in elementary time steps of the microstate trajectory) at which the given transition matrix was constructed. mu : (n,) ndarray (optional) The stationary distribution of the transition matrix T. Returns ------- m_t : ndarray, shape=(n,) or shape(1,) Mean first passage time or vector of mean first passage times. Notes ----- The mean first passage time :math:`\mathbf{E}_x[T_Y]` is the expected hitting time of one state :math:`y` in :math:`Y` when starting in state :math:`x`. For a fixed target state :math:`y` it is given by .. math :: \mathbb{E}_x[T_y] = \left \{ \begin{array}{cc} 0 & x=y \\ 1+\sum_{z} T_{x,z} \mathbb{E}_z[T_y] & x \neq y \end{array} \right. For a set of target states :math:`Y` it is given by .. math :: \mathbb{E}_x[T_Y] = \left \{ \begin{array}{cc} 0 & x \in Y \\ 1+\sum_{z} T_{x,z} \mathbb{E}_z[T_Y] & x \notin Y \end{array} \right. The mean first passage time between sets, :math:`\mathbf{E}_X[T_Y]`, is given by .. math :: \mathbb{E}_X[T_Y] = \sum_{x \in X} \frac{\mu_x \mathbb{E}_x[T_Y]}{\sum_{z \in X} \mu_z} References ---------- .. [1] Hoel, P G and S C Port and C J Stone. 1972. Introduction to Stochastic Processes. Examples -------- >>> import numpy as np >>> from msmtools.analysis import mfpt >>> T = np.array([[0.9, 0.1, 0.0], [0.5, 0.0, 0.5], [0.0, 0.1, 0.9]]) >>> m_t = mfpt(T, 0) >>> m_t array([ 0., 12., 22.]) """ # check inputs T = _types.ensure_ndarray_or_sparse(T, ndim=2, uniform=True, kind='numeric') target = _types.ensure_int_vector(target) origin = _types.ensure_int_vector_or_None(origin) # go if _issparse(T): if origin is None: t_tau = sparse.mean_first_passage_time.mfpt(T, target) else: t_tau = sparse.mean_first_passage_time.mfpt_between_sets(T, target, origin, mu=mu) else: if origin is None: t_tau = dense.mean_first_passage_time.mfpt(T, target) else: t_tau = dense.mean_first_passage_time.mfpt_between_sets(T, target, origin, mu=mu) # scale answer by lag time used. return tau * t_tau
r"""Mean first passage times (from a set of starting states - optional) to a set of target states. Parameters ---------- T : ndarray or scipy.sparse matrix, shape=(n,n) Transition matrix. target : int or list of int Target states for mfpt calculation. origin : int or list of int (optional) Set of starting states. tau : int (optional) The time-lag (in elementary time steps of the microstate trajectory) at which the given transition matrix was constructed. mu : (n,) ndarray (optional) The stationary distribution of the transition matrix T. Returns ------- m_t : ndarray, shape=(n,) or shape(1,) Mean first passage time or vector of mean first passage times. Notes ----- The mean first passage time :math:`\mathbf{E}_x[T_Y]` is the expected hitting time of one state :math:`y` in :math:`Y` when starting in state :math:`x`. For a fixed target state :math:`y` it is given by .. math :: \mathbb{E}_x[T_y] = \left \{ \begin{array}{cc} 0 & x=y \\ 1+\sum_{z} T_{x,z} \mathbb{E}_z[T_y] & x \neq y \end{array} \right. For a set of target states :math:`Y` it is given by .. math :: \mathbb{E}_x[T_Y] = \left \{ \begin{array}{cc} 0 & x \in Y \\ 1+\sum_{z} T_{x,z} \mathbb{E}_z[T_Y] & x \notin Y \end{array} \right. The mean first passage time between sets, :math:`\mathbf{E}_X[T_Y]`, is given by .. math :: \mathbb{E}_X[T_Y] = \sum_{x \in X} \frac{\mu_x \mathbb{E}_x[T_Y]}{\sum_{z \in X} \mu_z} References ---------- .. [1] Hoel, P G and S C Port and C J Stone. 1972. Introduction to Stochastic Processes. Examples -------- >>> import numpy as np >>> from msmtools.analysis import mfpt >>> T = np.array([[0.9, 0.1, 0.0], [0.5, 0.0, 0.5], [0.0, 0.1, 0.9]]) >>> m_t = mfpt(T, 0) >>> m_t array([ 0., 12., 22.])
def destroy_venv(env_path, venvscache=None): """Destroy a venv.""" # remove the venv itself in disk logger.debug("Destroying virtualenv at: %s", env_path) shutil.rmtree(env_path, ignore_errors=True) # remove venv from cache if venvscache is not None: venvscache.remove(env_path)
Destroy a venv.
def create(self, validated_data): """ This is a standard method called indirectly by calling 'save' on the serializer. This method expects the 'parent_field' and 'parent_instance' to be included in the Serializer context. """ if self.context.get('parent_field') \ and self.context.get('parent_instance'): validated_data.update({ self.context.get('parent_field'): self.context.get('parent_instance')}) instance = self.Meta.model(**validated_data) instance.full_clean() instance.save() return instance
This is a standard method called indirectly by calling 'save' on the serializer. This method expects the 'parent_field' and 'parent_instance' to be included in the Serializer context.
def _encrypt(self): """Use your key thing to encrypt things.""" from M2Crypto import BIO, SMIME, X509 # Iterate through the fields and pull out the ones that have a value. plaintext = 'cert_id=%s\n' % self.cert_id for name, field in self.fields.items(): value = None if name in self.initial: value = self.initial[name] elif field.initial is not None: value = field.initial if value is not None: plaintext += u'%s=%s\n' % (name, value) plaintext = plaintext.encode('utf-8') # Begin crypto weirdness. s = SMIME.SMIME() s.load_key_bio(BIO.openfile(self.private_cert), BIO.openfile(self.public_cert)) p7 = s.sign(BIO.MemoryBuffer(plaintext), flags=SMIME.PKCS7_BINARY) x509 = X509.load_cert_bio(BIO.openfile(self.paypal_cert)) sk = X509.X509_Stack() sk.push(x509) s.set_x509_stack(sk) s.set_cipher(SMIME.Cipher('des_ede3_cbc')) tmp = BIO.MemoryBuffer() p7.write_der(tmp) p7 = s.encrypt(tmp, flags=SMIME.PKCS7_BINARY) out = BIO.MemoryBuffer() p7.write(out) return out.read().decode()
Use your key thing to encrypt things.
def from_boto_instance(cls, instance): """ Loads a ``HostEntry`` from a boto instance. :param instance: A boto instance object. :type instance: :py:class:`boto.ec2.instanceInstance` :rtype: :py:class:`HostEntry` """ return cls( name=instance.tags.get('Name'), private_ip=instance.private_ip_address, public_ip=instance.ip_address, instance_type=instance.instance_type, instance_id=instance.id, hostname=instance.dns_name, stack_id=instance.tags.get('aws:cloudformation:stack-id'), stack_name=instance.tags.get('aws:cloudformation:stack-name'), logical_id=instance.tags.get('aws:cloudformation:logical-id'), security_groups=[g.name for g in instance.groups], launch_time=instance.launch_time, ami_id=instance.image_id, tags={k.lower(): v for k, v in six.iteritems(instance.tags)} )
Loads a ``HostEntry`` from a boto instance. :param instance: A boto instance object. :type instance: :py:class:`boto.ec2.instanceInstance` :rtype: :py:class:`HostEntry`
def clear_decimal_value(self, label): """stub""" if label not in self.my_osid_object_form._my_map['decimalValues']: raise NotFound() del self.my_osid_object_form._my_map['decimalValues'][label]
stub
def setup(self, data_manager): """ Hook to setup this service with a specific DataManager. Will recursively setup sub-services. """ self._data_manager = data_manager if self._data_manager: self._dal = self._data_manager.get_dal() else: self._dal = None for key, service in self._services.items(): service.setup(self._data_manager)
Hook to setup this service with a specific DataManager. Will recursively setup sub-services.
def _has_actions(self, event): """Check if a notification type has any enabled actions.""" event_actions = self._aconfig.get(event) return event_actions is None or bool(event_actions)
Check if a notification type has any enabled actions.
def find_any_reports(self, usage_page = 0, usage_id = 0): """Find any report type referencing HID usage control/data item. Results are returned in a dictionary mapping report_type to usage lists. """ items = [ (HidP_Input, self.find_input_reports(usage_page, usage_id)), (HidP_Output, self.find_output_reports(usage_page, usage_id)), (HidP_Feature, self.find_feature_reports(usage_page, usage_id)), ] return dict([(t, r) for t, r in items if r])
Find any report type referencing HID usage control/data item. Results are returned in a dictionary mapping report_type to usage lists.
def errReceived(self, data): """ :api:`twisted.internet.protocol.ProcessProtocol <ProcessProtocol>` API """ if self.stderr: self.stderr.write(data) if self.kill_on_stderr: self.transport.loseConnection() raise RuntimeError( "Received stderr output from slave Tor process: " + data.decode('utf8') )
:api:`twisted.internet.protocol.ProcessProtocol <ProcessProtocol>` API
def rgb_to_xy(self, red, green, blue): """Converts red, green and blue integer values to approximate CIE 1931 x and y coordinates. """ point = self.color.get_xy_point_from_rgb(red, green, blue) return (point.x, point.y)
Converts red, green and blue integer values to approximate CIE 1931 x and y coordinates.
def is_tensor_final(self, tensor_name): """Whether a tensor is a final output of the computation. Args: tensor_name: a string, name of a tensor in the graph. Returns: a boolean indicating whether the tensor was a final output. """ tensor = self._name_to_tensor(tensor_name) return tensor in self._final_tensors
Whether a tensor is a final output of the computation. Args: tensor_name: a string, name of a tensor in the graph. Returns: a boolean indicating whether the tensor was a final output.
def pct_decode(s): """ Return the percent-decoded version of string s. >>> pct_decode('%43%6F%75%63%6F%75%2C%20%6A%65%20%73%75%69%73%20%63%6F%6E%76%69%76%69%61%6C') 'Coucou, je suis convivial' >>> pct_decode('') '' >>> pct_decode('%2525') '%25' """ if s is None: return None elif not isinstance(s, unicode): s = str(s) else: s = s.encode('utf8') return PERCENT_CODE_SUB(lambda mo: chr(int(mo.group(0)[1:], 16)), s)
Return the percent-decoded version of string s. >>> pct_decode('%43%6F%75%63%6F%75%2C%20%6A%65%20%73%75%69%73%20%63%6F%6E%76%69%76%69%61%6C') 'Coucou, je suis convivial' >>> pct_decode('') '' >>> pct_decode('%2525') '%25'
def rulefor(self, addr): """Return the rule object for an address from our deps graph.""" return self.rule.subgraph.node[self.rule.makeaddress(addr)][ 'target_obj']
Return the rule object for an address from our deps graph.
def _init_datastores(): """ Initialize all datastores. """ global _DATASTORES array = settings.DATASTORES for config in array: cls = _lookup(config['ENGINE']) ds = _get_datastore(cls, DataStore, config) _DATASTORES.append(ds) legacy_settings = getattr(settings, 'MACHINE_CATEGORY_DATASTORES', None) if legacy_settings is not None: warnings.warn( "MACHINE_CATEGORY_DATASTORES is deprecated, " "please change to use DATASTORES", ) for name in ['ldap']: array = settings.MACHINE_CATEGORY_DATASTORES.get(name, []) for config in array: cls = _lookup(config['ENGINE']) ds = _get_datastore(cls, DataStore, config) _DATASTORES.append(ds)
Initialize all datastores.
def clip_by_global_norm_per_ctx(self, max_norm=1.0, param_names=None): """Clips gradient norm. The norm is computed over all gradients together, as if they were concatenated into a single vector. Gradients are modified in-place. The method is first used in `[ICML2013] On the difficulty of training recurrent neural networks` Note that the gradients are concatenated per context in this implementation. Examples -------- An example of using clip_grad_norm to clip the gradient before updating the parameters:: >>> #Get the gradient via back-propagation >>> net.forward_backward(data_batch=data_batch) >>> norm_val = net.clip_by_global_norm(max_norm=2.0, param_names='w0') >>> net.update() """ assert self.binded and self.params_initialized and self.optimizer_initialized num_ctx = len(self._exec_group.grad_arrays[0]) grad_array_per_ctx = [[] for i in range(num_ctx)] assert(param_names is not None) for param_name in param_names: param_idx = self._exec_group.param_names.index(param_name) grad_val = self._exec_group.grad_arrays[param_idx] assert(len(grad_val) == num_ctx) for i in range(num_ctx): grad_array_per_ctx[i].append(grad_val[i]) norm_vals = [] for i in range(num_ctx): mx.gluon.utils.clip_global_norm(grad_array_per_ctx[i], max_norm)
Clips gradient norm. The norm is computed over all gradients together, as if they were concatenated into a single vector. Gradients are modified in-place. The method is first used in `[ICML2013] On the difficulty of training recurrent neural networks` Note that the gradients are concatenated per context in this implementation. Examples -------- An example of using clip_grad_norm to clip the gradient before updating the parameters:: >>> #Get the gradient via back-propagation >>> net.forward_backward(data_batch=data_batch) >>> norm_val = net.clip_by_global_norm(max_norm=2.0, param_names='w0') >>> net.update()
def check_format(self, sm_format): """ Return ``True`` if the given sync map format is allowed, and ``False`` otherwise. :param sm_format: the sync map format to be checked :type sm_format: Unicode string :rtype: bool """ if sm_format not in SyncMapFormat.ALLOWED_VALUES: self.print_error(u"Sync map format '%s' is not allowed" % (sm_format)) self.print_info(u"Allowed formats:") self.print_generic(u" ".join(SyncMapFormat.ALLOWED_VALUES)) return False return True
Return ``True`` if the given sync map format is allowed, and ``False`` otherwise. :param sm_format: the sync map format to be checked :type sm_format: Unicode string :rtype: bool
def format_price(price, currency='$'): """ Format the price to have the appropriate currency and digits.. :param price: The price amount. :param currency: The currency for the price. :return: A formatted price string, i.e. '$10', '$10.52'. """ if int(price) == price: return '{}{}'.format(currency, int(price)) return '{}{:0.2f}'.format(currency, price)
Format the price to have the appropriate currency and digits.. :param price: The price amount. :param currency: The currency for the price. :return: A formatted price string, i.e. '$10', '$10.52'.
def canonical_chrom_sorted(in_chroms): """ Sort a list of chromosomes in the order 1..22, X, Y, M/MT :param list in_chroms: Input chromosomes :return: Sorted chromosomes :rtype: list[str] """ if len(in_chroms) == 0: return [] chr_prefix = False mt = False if in_chroms[0].startswith('chr'): in_chroms = [x.lstrip('chr') for x in in_chroms] chr_prefix = True if 'MT' in in_chroms: in_chroms[in_chroms.index('MT')] = 'M' mt = True in_chroms = sorted(in_chroms, key=lambda c: int(c) if c not in ('X', 'Y', 'M') else c) try: m_index = in_chroms.index('M') except ValueError: pass else: in_chroms.pop(m_index) in_chroms.append('M') # At this point it should be nicely sorted if mt: in_chroms[in_chroms.index('M')] = 'MT' if chr_prefix: in_chroms = [''.join(['chr', x]) for x in in_chroms] return in_chroms
Sort a list of chromosomes in the order 1..22, X, Y, M/MT :param list in_chroms: Input chromosomes :return: Sorted chromosomes :rtype: list[str]
def _context_menu_make(self, pos): """Reimplement the IPython context menu""" menu = super(ShellWidget, self)._context_menu_make(pos) return self.ipyclient.add_actions_to_context_menu(menu)
Reimplement the IPython context menu
def ID_colored_tube(color): """Look up the inner diameter of Ismatec 3-stop tubing given its color code. :param color: Color of the 3-stop tubing :type color: string :returns: Inner diameter of the 3-stop tubing (mm) :rtype: float :Examples: >>> from aguaclara.research.peristaltic_pump import ID_colored_tube >>> from aguaclara.core.units import unit_registry as u >>> ID_colored_tube("yellow-blue") <Quantity(1.52, 'millimeter')> >>> ID_colored_tube("orange-yellow") <Quantity(0.51, 'millimeter')> >>> ID_colored_tube("purple-white") <Quantity(2.79, 'millimeter')> """ tubing_data_path = os.path.join(os.path.dirname(__file__), "data", "3_stop_tubing.txt") df = pd.read_csv(tubing_data_path, delimiter='\t') idx = df["Color"] == color return df[idx]['Diameter (mm)'].values[0] * u.mm
Look up the inner diameter of Ismatec 3-stop tubing given its color code. :param color: Color of the 3-stop tubing :type color: string :returns: Inner diameter of the 3-stop tubing (mm) :rtype: float :Examples: >>> from aguaclara.research.peristaltic_pump import ID_colored_tube >>> from aguaclara.core.units import unit_registry as u >>> ID_colored_tube("yellow-blue") <Quantity(1.52, 'millimeter')> >>> ID_colored_tube("orange-yellow") <Quantity(0.51, 'millimeter')> >>> ID_colored_tube("purple-white") <Quantity(2.79, 'millimeter')>
async def set_topic_channel(self, channel): """Set the topic channel for this server""" data = datatools.get_data() data["discord"]["servers"][self.server_id][_data.modulename]["topic_id"] = channel.id datatools.write_data(data) self.topicchannel = channel await self.set_topic(self.topic) await client.send_typing(channel) embed = ui_embed.topic_update(channel, self.topicchannel) await embed.send()
Set the topic channel for this server
def import_locations(self, data): """Parse `GNU miscfiles`_ cities data files. ``import_locations()`` returns a list containing :class:`City` objects. It expects data files in the same format that `GNU miscfiles`_ provides, that is:: ID : 1 Type : City Population : 210700 Size : Name : Aberdeen Country : UK Region : Scotland Location : Earth Longitude : -2.083 Latitude : 57.150 Elevation : Date : 19961206 Entered-By : [email protected] // ID : 2 Type : City Population : 1950000 Size : Name : Abidjan Country : Ivory Coast Region : Location : Earth Longitude : -3.867 Latitude : 5.333 Elevation : Date : 19961206 Entered-By : [email protected] When processed by ``import_locations()`` will return ``list`` object in the following style:: [City(1, "City", 210700, None, "Aberdeen", "UK", "Scotland", "Earth", -2.083, 57.15, None, (1996, 12, 6, 0, 0, 0, 4, 341, -1), "[email protected]"), City(2, "City", 1950000, None, "Abidjan", "Ivory Coast", "", "Earth", -3.867, 5.333, None, (1996, 12, 6, 0, 0, 0, 4, 341, -1), "[email protected]")]) Args: data (iter): :abbr:`NOAA (National Oceanographic and Atmospheric Administration)` station data to read Returns: list: Places as ``City`` objects Raises: TypeError: Invalid value for data .. _GNU miscfiles: http://directory.fsf.org/project/miscfiles/ """ self._data = data if hasattr(data, 'read'): data = data.read().split('//\n') elif isinstance(data, list): pass elif isinstance(data, basestring): data = open(data).read().split('//\n') else: raise TypeError('Unable to handle data of type %r' % type(data)) keys = ('identifier', 'ptype', 'population', 'size', 'name', 'country', 'region', 'location', 'longitude', 'latitude', 'altitude', 'date', 'entered') for record in data: # We truncate after splitting because the v1.4.2 datafile contains # a broken separator between 229 and 230 that would otherwise break # the import data = [i.split(':')[1].strip() for i in record.splitlines()[:13]] entries = dict(zip(keys, data)) # Entry for Utrecht has the incorrect value of 0.000 for elevation. if entries['altitude'] == '0.000': logging.debug("Ignoring `0.000' value for elevation in %r " 'entry' % record) entries['altitude'] = '' for i in ('identifier', 'population', 'size', 'altitude'): entries[i] = int(entries[i]) if entries[i] else None for i in ('longitude', 'latitude'): entries[i] = float(entries[i]) if entries[i] else None entries['date'] = time.strptime(entries['date'], '%Y%m%d') self.append(City(**entries))
Parse `GNU miscfiles`_ cities data files. ``import_locations()`` returns a list containing :class:`City` objects. It expects data files in the same format that `GNU miscfiles`_ provides, that is:: ID : 1 Type : City Population : 210700 Size : Name : Aberdeen Country : UK Region : Scotland Location : Earth Longitude : -2.083 Latitude : 57.150 Elevation : Date : 19961206 Entered-By : [email protected] // ID : 2 Type : City Population : 1950000 Size : Name : Abidjan Country : Ivory Coast Region : Location : Earth Longitude : -3.867 Latitude : 5.333 Elevation : Date : 19961206 Entered-By : [email protected] When processed by ``import_locations()`` will return ``list`` object in the following style:: [City(1, "City", 210700, None, "Aberdeen", "UK", "Scotland", "Earth", -2.083, 57.15, None, (1996, 12, 6, 0, 0, 0, 4, 341, -1), "[email protected]"), City(2, "City", 1950000, None, "Abidjan", "Ivory Coast", "", "Earth", -3.867, 5.333, None, (1996, 12, 6, 0, 0, 0, 4, 341, -1), "[email protected]")]) Args: data (iter): :abbr:`NOAA (National Oceanographic and Atmospheric Administration)` station data to read Returns: list: Places as ``City`` objects Raises: TypeError: Invalid value for data .. _GNU miscfiles: http://directory.fsf.org/project/miscfiles/
def section(title, bar=OVERLINE, strm=sys.stdout): """Helper function for testing demo routines """ width = utils.term.width printy(bold(title.center(width))) printy(bold((bar * width)[:width]))
Helper function for testing demo routines
def GetFileObject(self, data_stream_name=''): """Retrieves the file-like object. Args: data_stream_name (Optional[str]): name of the data stream, where an empty string represents the default data stream. Returns: FileIO: a file-like object or None if not available. """ if data_stream_name: return None return resolver.Resolver.OpenFileObject( self.path_spec, resolver_context=self._resolver_context)
Retrieves the file-like object. Args: data_stream_name (Optional[str]): name of the data stream, where an empty string represents the default data stream. Returns: FileIO: a file-like object or None if not available.
def build_signature(self, user_api_key, user_secret, request): """Return the signature for the request.""" path = request.get_full_path() sent_signature = request.META.get( self.header_canonical('Authorization')) signature_headers = self.get_headers_from_signature(sent_signature) unsigned = self.build_dict_to_sign(request, signature_headers) # Sign string and compare. signer = HeaderSigner( key_id=user_api_key, secret=user_secret, headers=signature_headers, algorithm=self.ALGORITHM) signed = signer.sign(unsigned, method=request.method, path=path) return signed['authorization']
Return the signature for the request.
def join(input_files, output_file): ''' Join geojsons into one. The spatial reference system of the output file is the same as the one of the last file in the list. Args: input_files (list): List of file name strings. output_file (str): Output file name. ''' # get feature collections final_features = [] for file in input_files: with open(file) as f: feat_collection = geojson.load(f) final_features += feat_collection['features'] feat_collection['features'] = final_features # write to output file with open(output_file, 'w') as f: geojson.dump(feat_collection, f)
Join geojsons into one. The spatial reference system of the output file is the same as the one of the last file in the list. Args: input_files (list): List of file name strings. output_file (str): Output file name.
def process(self, salt_data, token, opts): ''' Process events and publish data ''' log.debug('In process %s', threading.current_thread()) log.debug(salt_data['tag']) log.debug(salt_data) parts = salt_data['tag'].split('/') if len(parts) < 2: return # TBD: Simplify these conditional expressions if parts[1] == 'job': log.debug('In job part 1') if parts[3] == 'new': log.debug('In new job') self.process_new_job_event(salt_data) # if salt_data['data']['fun'] == 'grains.items': # self.minions = {} elif parts[3] == 'ret': log.debug('In ret') self.process_ret_job_event(salt_data) if salt_data['data']['fun'] == 'grains.items': self.process_minion_update(salt_data) elif parts[1] == 'key': log.debug('In key') self.process_key_event(salt_data) elif parts[1] == 'presence': self.process_presence_events(salt_data, token, opts)
Process events and publish data