code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def to_html( self, suppress_newlines=False, in_div_flag=False): # pylint: disable=W0221 """Render a MessageElement as html. :param suppress_newlines: Whether to suppress any newlines in the output. If this option is enabled, the entire html output will be rendered on a single line. :type suppress_newlines: bool :param in_div_flag: Whether the message should be placed into an outer div element. :type in_div_flag: bool :returns: HTML representation of the message. :rtype: str """ if in_div_flag or self.in_div_flag: message = '<div %s>' % self.html_attributes() else: message = '' last_was_text = False for m in self.message: if last_was_text and not isinstance(m, Text): message += '\n' message += m.to_html() if isinstance(m, Text): last_was_text = True else: message += '\n' last_was_text = False if in_div_flag: message += '</div>' if suppress_newlines: return message.replace('\n', '') return message
Render a MessageElement as html. :param suppress_newlines: Whether to suppress any newlines in the output. If this option is enabled, the entire html output will be rendered on a single line. :type suppress_newlines: bool :param in_div_flag: Whether the message should be placed into an outer div element. :type in_div_flag: bool :returns: HTML representation of the message. :rtype: str
def xyz2lonlat(x, y, z): """Convert cartesian to lon lat.""" lon = xu.rad2deg(xu.arctan2(y, x)) lat = xu.rad2deg(xu.arctan2(z, xu.sqrt(x**2 + y**2))) return lon, lat
Convert cartesian to lon lat.
def authenticate(self, username=None, password=None, actions=None, response=None, authorization=None): # pylint: disable=too-many-arguments,too-many-locals """ Authenticate to the registry using a username and password, an authorization header or otherwise as the anonymous user. :param username: User name to authenticate as. :type username: str :param password: User's password. :type password: str :param actions: If you know which types of operation you need to make on the registry, specify them here. Valid actions are ``pull``, ``push`` and ``*``. :type actions: list :param response: When the ``auth`` function you passed to :class:`DXFBase`'s constructor is called, it is passed a HTTP response object. Pass it back to :meth:`authenticate` to have it automatically detect which actions are required. :type response: requests.Response :param authorization: ``Authorization`` header value. :type authorization: str :rtype: str :returns: Authentication token, if the registry supports bearer tokens. Otherwise ``None``, and HTTP Basic auth is used (if the registry requires authentication). """ if response is None: with warnings.catch_warnings(): _ignore_warnings(self) response = self._sessions[0].get(self._base_url, verify=self._tlsverify) if response.ok: return None # pylint: disable=no-member if response.status_code != requests.codes.unauthorized: raise exceptions.DXFUnexpectedStatusCodeError(response.status_code, requests.codes.unauthorized) if self._insecure: raise exceptions.DXFAuthInsecureError() parsed = www_authenticate.parse(response.headers['www-authenticate']) if username is not None and password is not None: headers = { 'Authorization': 'Basic ' + base64.b64encode(_to_bytes_2and3(username + ':' + password)).decode('utf-8') } elif authorization is not None: headers = { 'Authorization': authorization } else: headers = {} if 'bearer' in parsed: info = parsed['bearer'] if actions and self._repo: scope = 'repository:' + self._repo + ':' + ','.join(actions) elif 'scope' in info: scope = info['scope'] else: scope = '' url_parts = list(urlparse.urlparse(info['realm'])) query = urlparse.parse_qs(url_parts[4]) query.update({ 'service': info['service'], 'scope': scope }) url_parts[4] = urlencode(query, True) url_parts[0] = 'https' if self._auth_host: url_parts[1] = self._auth_host auth_url = urlparse.urlunparse(url_parts) with warnings.catch_warnings(): _ignore_warnings(self) r = self._sessions[0].get(auth_url, headers=headers, verify=self._tlsverify) _raise_for_status(r) rjson = r.json() self.token = rjson['access_token'] if 'access_token' in rjson else rjson['token'] return self._token self._headers = headers return None
Authenticate to the registry using a username and password, an authorization header or otherwise as the anonymous user. :param username: User name to authenticate as. :type username: str :param password: User's password. :type password: str :param actions: If you know which types of operation you need to make on the registry, specify them here. Valid actions are ``pull``, ``push`` and ``*``. :type actions: list :param response: When the ``auth`` function you passed to :class:`DXFBase`'s constructor is called, it is passed a HTTP response object. Pass it back to :meth:`authenticate` to have it automatically detect which actions are required. :type response: requests.Response :param authorization: ``Authorization`` header value. :type authorization: str :rtype: str :returns: Authentication token, if the registry supports bearer tokens. Otherwise ``None``, and HTTP Basic auth is used (if the registry requires authentication).
def dataReceived(self, data): """ Takes "data" which we assume is json encoded If data has a subject_id attribute, we pass that to the dispatcher as the subject_id so it will get carried through into any return communications and be identifiable to the client falls back to just passing the message along... """ try: address = self.guid data = json.loads(data) threads.deferToThread(send_signal, self.dispatcher, data) if 'hx_subscribe' in data: return self.dispatcher.subscribe(self.transport, data) if 'address' in data: address = data['address'] else: address = self.guid self.dispatcher.send(address, data) except Exception as e: raise self.dispatcher.send( self.guid, {'message': data, 'error': str(e)} )
Takes "data" which we assume is json encoded If data has a subject_id attribute, we pass that to the dispatcher as the subject_id so it will get carried through into any return communications and be identifiable to the client falls back to just passing the message along...
def stem(self, word, alternate_vowels=False): """Return Snowball German stem. Parameters ---------- word : str The word to stem alternate_vowels : bool Composes ae as ä, oe as ö, and ue as ü before running the algorithm Returns ------- str Word stem Examples -------- >>> stmr = SnowballGerman() >>> stmr.stem('lesen') 'les' >>> stmr.stem('graues') 'grau' >>> stmr.stem('buchstabieren') 'buchstabi' """ # lowercase, normalize, and compose word = normalize('NFC', word.lower()) word = word.replace('ß', 'ss') if len(word) > 2: for i in range(2, len(word)): if word[i] in self._vowels and word[i - 2] in self._vowels: if word[i - 1] == 'u': word = word[: i - 1] + 'U' + word[i:] elif word[i - 1] == 'y': word = word[: i - 1] + 'Y' + word[i:] if alternate_vowels: word = word.replace('ae', 'ä') word = word.replace('oe', 'ö') word = word.replace('que', 'Q') word = word.replace('ue', 'ü') word = word.replace('Q', 'que') r1_start = max(3, self._sb_r1(word)) r2_start = self._sb_r2(word) # Step 1 niss_flag = False if word[-3:] == 'ern': if len(word[r1_start:]) >= 3: word = word[:-3] elif word[-2:] == 'em': if len(word[r1_start:]) >= 2: word = word[:-2] elif word[-2:] == 'er': if len(word[r1_start:]) >= 2: word = word[:-2] elif word[-2:] == 'en': if len(word[r1_start:]) >= 2: word = word[:-2] niss_flag = True elif word[-2:] == 'es': if len(word[r1_start:]) >= 2: word = word[:-2] niss_flag = True elif word[-1:] == 'e': if len(word[r1_start:]) >= 1: word = word[:-1] niss_flag = True elif word[-1:] == 's': if ( len(word[r1_start:]) >= 1 and len(word) >= 2 and word[-2] in self._s_endings ): word = word[:-1] if niss_flag and word[-4:] == 'niss': word = word[:-1] # Step 2 if word[-3:] == 'est': if len(word[r1_start:]) >= 3: word = word[:-3] elif word[-2:] == 'en': if len(word[r1_start:]) >= 2: word = word[:-2] elif word[-2:] == 'er': if len(word[r1_start:]) >= 2: word = word[:-2] elif word[-2:] == 'st': if ( len(word[r1_start:]) >= 2 and len(word) >= 6 and word[-3] in self._st_endings ): word = word[:-2] # Step 3 if word[-4:] == 'isch': if len(word[r2_start:]) >= 4 and word[-5] != 'e': word = word[:-4] elif word[-4:] in {'lich', 'heit'}: if len(word[r2_start:]) >= 4: word = word[:-4] if word[-2:] in {'er', 'en'} and len(word[r1_start:]) >= 2: word = word[:-2] elif word[-4:] == 'keit': if len(word[r2_start:]) >= 4: word = word[:-4] if word[-4:] == 'lich' and len(word[r2_start:]) >= 4: word = word[:-4] elif word[-2:] == 'ig' and len(word[r2_start:]) >= 2: word = word[:-2] elif word[-3:] in {'end', 'ung'}: if len(word[r2_start:]) >= 3: word = word[:-3] if ( word[-2:] == 'ig' and len(word[r2_start:]) >= 2 and word[-3] != 'e' ): word = word[:-2] elif word[-2:] in {'ig', 'ik'}: if len(word[r2_start:]) >= 2 and word[-3] != 'e': word = word[:-2] # Change 'Y' and 'U' back to lowercase if survived stemming for i in range(0, len(word)): if word[i] == 'Y': word = word[:i] + 'y' + word[i + 1 :] elif word[i] == 'U': word = word[:i] + 'u' + word[i + 1 :] # Remove umlauts _umlauts = dict(zip((ord(_) for _ in 'äöü'), 'aou')) word = word.translate(_umlauts) return word
Return Snowball German stem. Parameters ---------- word : str The word to stem alternate_vowels : bool Composes ae as ä, oe as ö, and ue as ü before running the algorithm Returns ------- str Word stem Examples -------- >>> stmr = SnowballGerman() >>> stmr.stem('lesen') 'les' >>> stmr.stem('graues') 'grau' >>> stmr.stem('buchstabieren') 'buchstabi'
def set_shape(self, id, new_shape): """Copies the turtle data from the old shape buffer to the new""" old_shape = self.id_to_shape[id] old_buffer = self.get_buffer(old_shape) model, color = old_buffer.get(id) new_data = self._create_turtle(id, new_shape, model, color) old_buffer.remove(id) self.id_to_shape[id] = new_shape return new_data
Copies the turtle data from the old shape buffer to the new
def _Initialize(self, http, url): """Initialize this download by setting self.http and self.url. We want the user to be able to override self.http by having set the value in the constructor; in that case, we ignore the provided http. Args: http: An httplib2.Http instance or None. url: The url for this transfer. Returns: None. Initializes self. """ self.EnsureUninitialized() if self.http is None: self.__http = http or http_wrapper.GetHttp() self.__url = url
Initialize this download by setting self.http and self.url. We want the user to be able to override self.http by having set the value in the constructor; in that case, we ignore the provided http. Args: http: An httplib2.Http instance or None. url: The url for this transfer. Returns: None. Initializes self.
def offer(self, item): """Offer to the buffer It is a non-blocking operation, and when the buffer is full, it raises Queue.Full exception """ try: # non-blocking self._buffer.put(item, block=False) if self._consumer_callback is not None: self._consumer_callback() return True except Queue.Full: Log.debug("%s: Full in offer()" % str(self)) raise Queue.Full
Offer to the buffer It is a non-blocking operation, and when the buffer is full, it raises Queue.Full exception
def _clean_up_gene_id(geneid, sp, curie_map): """ A series of identifier rewriting to conform with standard gene identifiers. :param geneid: :param sp: :return: """ # special case for MGI geneid = re.sub(r'MGI:MGI:', 'MGI:', geneid) # rewrite Ensembl --> ENSEMBL geneid = re.sub(r'Ensembl', 'ENSEMBL', geneid) # rewrite Gene:CELE --> WormBase # these are old-school cosmid identifier geneid = re.sub(r'Gene:CELE', 'WormBase:', geneid) if sp == 'CAEEL': if re.match(r'(Gene|ENSEMBLGenome):\w+\.\d+', geneid): geneid = re.sub( r'(?:Gene|ENSEMBLGenome):(\w+\.\d+)', r'WormBase:\1', geneid) if sp == 'DROME': if re.match(r'(ENSEMBLGenome):\w+\.\d+', geneid): geneid = re.sub( r'(?:ENSEMBLGenome):(\w+\.\d+)', r'FlyBase:\1', geneid) # rewrite GeneID --> NCBIGene geneid = re.sub(r'GeneID', 'NCBIGene', geneid) # rewrite Gene:Dmel --> FlyBase geneid = re.sub(r'Gene:Dmel_', 'FlyBase:', geneid) # rewrite Gene:CG --> FlyBase:CG geneid = re.sub(r'Gene:CG', 'FlyBase:CG', geneid) # rewrite ENSEMBLGenome:FBgn --> FlyBase:FBgn geneid = re.sub(r'ENSEMBLGenome:FBgn', 'FlyBase:FBgn', geneid) # rewrite Gene:<ensembl ids> --> ENSEMBL:<id> geneid = re.sub(r'Gene:ENS', 'ENSEMBL:ENS', geneid) # rewrite Gene:<Xenbase ids> --> Xenbase:<id> geneid = re.sub(r'Gene:Xenbase:', 'Xenbase:', geneid) # TODO this would be much better done as # if foo not in selfcurie_map: # if re.match(r'(Gene|ENSEMBLGenome):', geneid) or \ # re.match(r'Gene_ORFName', geneid) or \ # re.match(r'Gene_Name', geneid): # # LOG.warning( # #"Found an identifier I don't know how to fix (species %s): %s", # # sp, geneid) pfxlcl = re.split(r':', geneid) pfx = pfxlcl[0] if pfx is None or pfx not in curie_map: # LOG.warning( "No curie prefix for (species %s): %s", sp, geneid) geneid = None return geneid
A series of identifier rewriting to conform with standard gene identifiers. :param geneid: :param sp: :return:
def do_mumble(self, args): """Mumbles what you tell me to.""" repetitions = args.repeat or 1 for i in range(min(repetitions, self.maxrepeats)): output = [] if random.random() < .33: output.append(random.choice(self.MUMBLE_FIRST)) for word in args.words: if random.random() < .40: output.append(random.choice(self.MUMBLES)) output.append(word) if random.random() < .25: output.append(random.choice(self.MUMBLE_LAST)) self.poutput(' '.join(output))
Mumbles what you tell me to.
def MergeData(self, merge_data, raw_data=None): """Merges data read from a config file into the current config.""" self.FlushCache() if raw_data is None: raw_data = self.raw_data for k, v in iteritems(merge_data): # A context clause. if isinstance(v, dict) and k not in self.type_infos: if k not in self.valid_contexts: raise InvalidContextError("Invalid context specified: %s" % k) context_data = raw_data.setdefault(k, collections.OrderedDict()) self.MergeData(v, context_data) else: # Find the descriptor for this field. descriptor = self.type_infos.get(k) if descriptor is None: msg = ("Missing config definition for %s. This option is likely " "deprecated or renamed. Check the release notes." % k) if flags.FLAGS.disallow_missing_config_definitions: raise MissingConfigDefinitionError(msg) if isinstance(v, string_types): v = v.strip() # If we are already initialized and someone tries to modify a constant # value (e.g. via Set()), break loudly. if self.initialized and k in self.constants: raise ConstModificationError( "Attempting to modify constant value %s" % k) raw_data[k] = v
Merges data read from a config file into the current config.
def printDuplicatedTPEDandTFAM(tped, tfam, samples, oldSamples, prefix): """Print the TPED and TFAM of the duplicated samples. :param tped: the ``tped`` containing duplicated samples. :param tfam: the ``tfam`` containing duplicated samples. :param samples: the updated position of the samples in the tped containing only duplicated samples. :param oldSamples: the original duplicated sample positions. :param prefix: the prefix of all the files. :type tped: :py:class:`numpy.array` :type tfam: :py:class:`numpy.array` :type samples: dict :type oldSamples: dict :type prefix: str The ``tped`` and ``tfam`` files are written in ``prefix.duplicated_samples.tped`` and ``prefix.duplicated_samples.tfam``, respectively. """ # Print the TPED outputTPED = None try: outputTPED = open(prefix + ".duplicated_samples.tped", "w") except IOError: msg = "%(prefix)s.duplicated_samples.tped: can't write " \ "file" % locals() raise ProgramError(msg) for row in tped: print >>outputTPED, "\t".join(row) outputTPED.close() # Print the TFAM nbSamples = len(tped[0][4:]) newTFAM = [0 for i in xrange(nbSamples)] for samples, indexes in samples.iteritems(): oldIndexes = oldSamples[samples] for i, index in enumerate(indexes): oldIndex = oldIndexes[i] newTFAM[index] = tfam[oldIndex] outputTFAM = None try: outputTFAM = open(prefix + ".duplicated_samples.tfam", "w") except IOError: msg = "%(prefix)s.duplicated_samples.tfam: can't write " \ "file" % locals() raise ProgramError(msg) for row in newTFAM: print >>outputTFAM, "\t".join(row) outputTFAM.close()
Print the TPED and TFAM of the duplicated samples. :param tped: the ``tped`` containing duplicated samples. :param tfam: the ``tfam`` containing duplicated samples. :param samples: the updated position of the samples in the tped containing only duplicated samples. :param oldSamples: the original duplicated sample positions. :param prefix: the prefix of all the files. :type tped: :py:class:`numpy.array` :type tfam: :py:class:`numpy.array` :type samples: dict :type oldSamples: dict :type prefix: str The ``tped`` and ``tfam`` files are written in ``prefix.duplicated_samples.tped`` and ``prefix.duplicated_samples.tfam``, respectively.
def increment(self, delta=1, text=None): """Redraw the progress bar, incrementing the value by delta (default=1) and optionally changing the text. Returns the ProgressBar's new value. See also .update().""" return self.update(value=min(self.max, self.value + delta), text=text)
Redraw the progress bar, incrementing the value by delta (default=1) and optionally changing the text. Returns the ProgressBar's new value. See also .update().
def _proc_member(self, tarfile): """Choose the right processing method depending on the type and call it. """ if self.type in (GNUTYPE_LONGNAME, GNUTYPE_LONGLINK): return self._proc_gnulong(tarfile) elif self.type == GNUTYPE_SPARSE: return self._proc_sparse(tarfile) elif self.type in (XHDTYPE, XGLTYPE, SOLARIS_XHDTYPE): return self._proc_pax(tarfile) else: return self._proc_builtin(tarfile)
Choose the right processing method depending on the type and call it.
def attach_tracker(self, stanza, tracker=None): """ Return a new tracker or modify one to track the stanza. :param stanza: Stanza to track. :type stanza: :class:`aioxmpp.Message` :param tracker: Existing tracker to attach to. :type tracker: :class:`.tracking.MessageTracker` :raises ValueError: if the stanza is of type :attr:`~aioxmpp.MessageType.ERROR` :raises ValueError: if the stanza contains a delivery receipt :return: The message tracker for the stanza. :rtype: :class:`.tracking.MessageTracker` The `stanza` gets a :xep:`184` reciept request attached and internal handlers are set up to update the `tracker` state once a confirmation is received. .. warning:: See the :ref:`api-tracking-memory`. """ if stanza.xep0184_received is not None: raise ValueError( "requesting delivery receipts for delivery receipts is not " "allowed" ) if stanza.type_ == aioxmpp.MessageType.ERROR: raise ValueError( "requesting delivery receipts for errors is not supported" ) if tracker is None: tracker = aioxmpp.tracking.MessageTracker() stanza.xep0184_request_receipt = True stanza.autoset_id() self._bare_jid_maps[stanza.to, stanza.id_] = tracker return tracker
Return a new tracker or modify one to track the stanza. :param stanza: Stanza to track. :type stanza: :class:`aioxmpp.Message` :param tracker: Existing tracker to attach to. :type tracker: :class:`.tracking.MessageTracker` :raises ValueError: if the stanza is of type :attr:`~aioxmpp.MessageType.ERROR` :raises ValueError: if the stanza contains a delivery receipt :return: The message tracker for the stanza. :rtype: :class:`.tracking.MessageTracker` The `stanza` gets a :xep:`184` reciept request attached and internal handlers are set up to update the `tracker` state once a confirmation is received. .. warning:: See the :ref:`api-tracking-memory`.
def remove(self, elem): """Removes an item from the list. Similar to list.remove().""" self._values.remove(elem) self._message_listener.Modified()
Removes an item from the list. Similar to list.remove().
def _lstrip_word(word, prefix): ''' Return a copy of the string after the specified prefix was removed from the beginning of the string ''' if six.text_type(word).startswith(prefix): return six.text_type(word)[len(prefix):] return word
Return a copy of the string after the specified prefix was removed from the beginning of the string
def stat(filename, retry_params=None, _account_id=None): """Get GCSFileStat of a Google Cloud storage file. Args: filename: A Google Cloud Storage filename of form '/bucket/filename'. retry_params: An api_utils.RetryParams for this call to GCS. If None, the default one is used. _account_id: Internal-use only. Returns: a GCSFileStat object containing info about this file. Raises: errors.AuthorizationError: if authorization failed. errors.NotFoundError: if an object that's expected to exist doesn't. """ common.validate_file_path(filename) api = storage_api._get_storage_api(retry_params=retry_params, account_id=_account_id) status, headers, content = api.head_object( api_utils._quote_filename(filename)) errors.check_status(status, [200], filename, resp_headers=headers, body=content) file_stat = common.GCSFileStat( filename=filename, st_size=common.get_stored_content_length(headers), st_ctime=common.http_time_to_posix(headers.get('last-modified')), etag=headers.get('etag'), content_type=headers.get('content-type'), metadata=common.get_metadata(headers)) return file_stat
Get GCSFileStat of a Google Cloud storage file. Args: filename: A Google Cloud Storage filename of form '/bucket/filename'. retry_params: An api_utils.RetryParams for this call to GCS. If None, the default one is used. _account_id: Internal-use only. Returns: a GCSFileStat object containing info about this file. Raises: errors.AuthorizationError: if authorization failed. errors.NotFoundError: if an object that's expected to exist doesn't.
def get_heat_capacity(self, temperature, structure, n, u, cutoff=1e2): """ Gets the directional heat capacity for a higher order tensor expansion as a function of direction and polarization. Args: temperature (float): Temperature in kelvin structure (float): Structure to be used in directional heat capacity determination n (3x1 array-like): direction for Cv determination u (3x1 array-like): polarization direction, note that no attempt for verification of eigenvectors is made cutoff (float): cutoff for scale of kt / (hbar * omega) if lower than this value, returns 0 """ k = 1.38065e-23 kt = k*temperature hbar_w = 1.05457e-34*self.omega(structure, n, u) if hbar_w > kt * cutoff: return 0.0 c = k * (hbar_w / kt) ** 2 c *= np.exp(hbar_w / kt) / (np.exp(hbar_w / kt) - 1)**2 return c * 6.022e23
Gets the directional heat capacity for a higher order tensor expansion as a function of direction and polarization. Args: temperature (float): Temperature in kelvin structure (float): Structure to be used in directional heat capacity determination n (3x1 array-like): direction for Cv determination u (3x1 array-like): polarization direction, note that no attempt for verification of eigenvectors is made cutoff (float): cutoff for scale of kt / (hbar * omega) if lower than this value, returns 0
def nodeprep(string, allow_unassigned=False): """ Process the given `string` using the Nodeprep (`RFC 6122`_) profile. In the error cases defined in `RFC 3454`_ (stringprep), a :class:`ValueError` is raised. """ chars = list(string) _nodeprep_do_mapping(chars) do_normalization(chars) check_prohibited_output( chars, ( stringprep.in_table_c11, stringprep.in_table_c12, stringprep.in_table_c21, stringprep.in_table_c22, stringprep.in_table_c3, stringprep.in_table_c4, stringprep.in_table_c5, stringprep.in_table_c6, stringprep.in_table_c7, stringprep.in_table_c8, stringprep.in_table_c9, lambda x: x in _nodeprep_prohibited )) check_bidi(chars) if not allow_unassigned: check_unassigned( chars, ( stringprep.in_table_a1, ) ) return "".join(chars)
Process the given `string` using the Nodeprep (`RFC 6122`_) profile. In the error cases defined in `RFC 3454`_ (stringprep), a :class:`ValueError` is raised.
def parse(self): """Parse and return an nbt literal from the token stream.""" token_type = self.current_token.type.lower() handler = getattr(self, f'parse_{token_type}', None) if handler is None: raise self.error(f'Invalid literal {self.current_token.value!r}') return handler()
Parse and return an nbt literal from the token stream.
def get_queue_info(self, instance, cursor): """Collects metrics for all queues on the connected database.""" cursor.execute(self.QUEUE_INFO_STATEMENT) for queue_name, ticker_lag, ev_per_sec in cursor: yield queue_name, { 'ticker_lag': ticker_lag, 'ev_per_sec': ev_per_sec, }
Collects metrics for all queues on the connected database.
def _check_valid_translation(self, translation): """Checks that the translation vector is valid. """ if not isinstance(translation, np.ndarray) or not np.issubdtype(translation.dtype, np.number): raise ValueError('Translation must be specified as numeric numpy array') t = translation.squeeze() if len(t.shape) != 1 or t.shape[0] != 3: raise ValueError('Translation must be specified as a 3-vector, 3x1 ndarray, or 1x3 ndarray')
Checks that the translation vector is valid.
def merge_requests(self, **kwargs): """List the merge requests related to this milestone. Args: all (bool): If True, return all the items, without pagination per_page (int): Number of items to retrieve per request page (int): ID of the page to return (starts with page 1) as_list (bool): If set to False and no pagination option is defined, return a generator instead of a list **kwargs: Extra options to send to the server (e.g. sudo) Raises: GitlabAuthenticationError: If authentication is not correct GitlabListError: If the list could not be retrieved Returns: RESTObjectList: The list of merge requests """ path = '%s/%s/merge_requests' % (self.manager.path, self.get_id()) data_list = self.manager.gitlab.http_list(path, as_list=False, **kwargs) manager = ProjectMergeRequestManager(self.manager.gitlab, parent=self.manager._parent) # FIXME(gpocentek): the computed manager path is not correct return RESTObjectList(manager, ProjectMergeRequest, data_list)
List the merge requests related to this milestone. Args: all (bool): If True, return all the items, without pagination per_page (int): Number of items to retrieve per request page (int): ID of the page to return (starts with page 1) as_list (bool): If set to False and no pagination option is defined, return a generator instead of a list **kwargs: Extra options to send to the server (e.g. sudo) Raises: GitlabAuthenticationError: If authentication is not correct GitlabListError: If the list could not be retrieved Returns: RESTObjectList: The list of merge requests
def retrieve(self, cursor): """ Retrieve items from query """ assert isinstance(cursor, dict), "expected cursor type 'dict'" # look for record in query query = self.get_query() assert isinstance(query, peewee.Query) query return query.get(**cursor)
Retrieve items from query
def listurl_get(self, q, **kwargs): '''taobao.taobaoke.listurl.get 淘宝客关键词搜索URL 淘宝客关键词搜索URL''' request = TOPRequest('taobao.taobaoke.listurl.get') request['q'] = q for k, v in kwargs.iteritems(): if k not in ('nick', 'outer_code', 'pid') and v==None: continue request[k] = v self.create(self.execute(request), fields=['taobaoke_item'], models={'taobaoke_item':TaobaokeItem}) return self.taobaoke_item
taobao.taobaoke.listurl.get 淘宝客关键词搜索URL 淘宝客关键词搜索URL
def request(self, location, fragment_enc=False): """ Given a URL this method will add a fragment, a query part or extend a query part if it already exists with the information in this instance. :param location: A URL :param fragment_enc: Whether the information should be placed in a fragment (True) or in a query part (False) :return: The extended URL """ _l = as_unicode(location) _qp = as_unicode(self.to_urlencoded()) if fragment_enc: return "%s#%s" % (_l, _qp) else: if "?" in location: return "%s&%s" % (_l, _qp) else: return "%s?%s" % (_l, _qp)
Given a URL this method will add a fragment, a query part or extend a query part if it already exists with the information in this instance. :param location: A URL :param fragment_enc: Whether the information should be placed in a fragment (True) or in a query part (False) :return: The extended URL
def fetch(self): """ Fetch a VariableInstance :returns: Fetched VariableInstance :rtype: twilio.rest.serverless.v1.service.environment.variable.VariableInstance """ params = values.of({}) payload = self._version.fetch( 'GET', self._uri, params=params, ) return VariableInstance( self._version, payload, service_sid=self._solution['service_sid'], environment_sid=self._solution['environment_sid'], sid=self._solution['sid'], )
Fetch a VariableInstance :returns: Fetched VariableInstance :rtype: twilio.rest.serverless.v1.service.environment.variable.VariableInstance
def size(config, accounts=(), day=None, group=None, human=True, region=None): """size of exported records for a given day.""" config = validate.callback(config) destination = config.get('destination') client = boto3.Session().client('s3') day = parse(day) def export_size(client, account): paginator = client.get_paginator('list_objects_v2') count = 0 size = 0 session = get_session(account['role'], region) account_id = session.client('sts').get_caller_identity()['Account'] prefix = destination.get('prefix', '').rstrip('/') + '/%s' % account_id prefix = "%s/%s/%s" % (prefix, group, day.strftime("%Y/%m/%d")) account['account_id'] = account_id for page in paginator.paginate( Bucket=destination['bucket'], Prefix=prefix): for k in page.get('Contents', ()): size += k['Size'] count += 1 return (count, size) total_size = 0 accounts_report = [] logging.getLogger('botocore').setLevel(logging.ERROR) with ThreadPoolExecutor(max_workers=16) as w: futures = {} for account in config.get('accounts'): if accounts and account['name'] not in accounts: continue futures[w.submit(export_size, client, account)] = account for f in as_completed(futures): account = futures[f] count, size = f.result() account.pop('role') account.pop('groups') total_size += size if human: account['size'] = GetHumanSize(size) else: account['size'] = size account['count'] = count accounts_report.append(account) accounts_report.sort(key=operator.itemgetter('count'), reverse=True) print(tabulate(accounts_report, headers='keys')) log.info("total size:%s", GetHumanSize(total_size))
size of exported records for a given day.
def request(self, url, method='GET', params=None, data=None, expected_response_code=200): """Make a http request to API.""" url = "{0}/{1}".format(self._baseurl, url) if params is None: params = {} auth = { 'u': self._username, 'p': self._password } params.update(auth) if data is not None and not isinstance(data, str): data = json.dumps(data) retry = True _try = 0 # Try to send the request more than once by default (see #103) while retry: try: response = session.request( method=method, url=url, params=params, data=data, headers=self._headers, verify=self._verify_ssl, timeout=self._timeout ) break except (requests.exceptions.ConnectionError, requests.exceptions.Timeout): _try += 1 if self._retries != 0: retry = _try < self._retries else: raise requests.exceptions.ConnectionError if response.status_code == expected_response_code: return response else: raise InfluxDBClientError(response.content, response.status_code)
Make a http request to API.
def get_parent_tag(mention): """Return the HTML tag of the Mention's parent. These may be tags such as 'p', 'h2', 'table', 'div', etc. If a candidate is passed in, only the tag of its first Mention is returned. :param mention: The Mention to evaluate :rtype: string """ span = _to_span(mention) i = _get_node(span.sentence) return str(i.getparent().tag) if i.getparent() is not None else None
Return the HTML tag of the Mention's parent. These may be tags such as 'p', 'h2', 'table', 'div', etc. If a candidate is passed in, only the tag of its first Mention is returned. :param mention: The Mention to evaluate :rtype: string
def _item_list(profile=None, **connection_args): ''' Template for writing list functions Return a list of available items (keystone items-list) CLI Example: .. code-block:: bash salt '*' keystone.item_list ''' kstone = auth(profile, **connection_args) ret = [] for item in kstone.items.list(): ret.append(item.__dict__) # ret[item.name] = { # 'id': item.id, # 'name': item.name, # } return ret
Template for writing list functions Return a list of available items (keystone items-list) CLI Example: .. code-block:: bash salt '*' keystone.item_list
def endswith(self, name: str) -> List[str]: """Return a list of all keywords ending with the given string. >>> from hydpy.core.devicetools import Keywords >>> keywords = Keywords('first_keyword', 'second_keyword', ... 'keyword_3', 'keyword_4', ... 'keyboard') >>> keywords.endswith('keyword') ['first_keyword', 'second_keyword'] """ return sorted(keyword for keyword in self if keyword.endswith(name))
Return a list of all keywords ending with the given string. >>> from hydpy.core.devicetools import Keywords >>> keywords = Keywords('first_keyword', 'second_keyword', ... 'keyword_3', 'keyword_4', ... 'keyboard') >>> keywords.endswith('keyword') ['first_keyword', 'second_keyword']
def parse_clubs(self, clubs_page): """Parses the DOM and returns character clubs attributes. :type clubs_page: :class:`bs4.BeautifulSoup` :param clubs_page: MAL character clubs page's DOM :rtype: dict :return: character clubs attributes. """ character_info = self.parse_sidebar(clubs_page) second_col = clubs_page.find(u'div', {'id': 'content'}).find(u'table').find(u'tr').find_all(u'td', recursive=False)[1] try: clubs_header = second_col.find(u'div', text=u'Related Clubs') character_info[u'clubs'] = [] if clubs_header: curr_elt = clubs_header.nextSibling while curr_elt is not None: if curr_elt.name == u'div': link = curr_elt.find(u'a') club_id = int(re.match(r'/clubs\.php\?cid=(?P<id>[0-9]+)', link.get(u'href')).group(u'id')) num_members = int(re.match(r'(?P<num>[0-9]+) members', curr_elt.find(u'small').text).group(u'num')) character_info[u'clubs'].append(self.session.club(club_id).set({'name': link.text, 'num_members': num_members})) curr_elt = curr_elt.nextSibling except: if not self.session.suppress_parse_exceptions: raise return character_info
Parses the DOM and returns character clubs attributes. :type clubs_page: :class:`bs4.BeautifulSoup` :param clubs_page: MAL character clubs page's DOM :rtype: dict :return: character clubs attributes.
def deserialize(self, node: SchemaNode, cstruct: Union[str, ColanderNullType]) \ -> Optional[Pendulum]: """ Deserializes string representation to Python object. """ if not cstruct: return colander.null try: result = coerce_to_pendulum(cstruct, assume_local=self.use_local_tz) except (ValueError, ParserError) as e: raise Invalid(node, "Invalid date/time: value={!r}, error=" "{!r}".format(cstruct, e)) return result
Deserializes string representation to Python object.
def content_recommendations(access_token, content_item_id): ''' Name: content_recommendations Parameters: access_token, content_item_id Return: dictionary ''' headers = {'Authorization': 'Bearer ' + str(access_token)} recommendations_url =\ construct_content_recommendations_url(enrichment_url, content_item_id) request = requests.get(recommendations_url, headers=headers) if request.status_code == 200: recommendations = request.json() return recommendations return {'status': request.status_code, "message": request.text}
Name: content_recommendations Parameters: access_token, content_item_id Return: dictionary
def restore(self, value, context=None): """ Restores the value from a table cache for usage. :param value | <variant> context | <orb.Context> || None """ value = super(DatetimeWithTimezoneColumn, self).restore(value, context) if value in ('today', 'now'): value = datetime.date.now() if isinstance(value, datetime.datetime): tz = pytz.timezone(context.timezone) if tz is not None: if value.tzinfo is None: base_tz = pytz.timezone(orb.system.settings().server_timezone) # the machine timezone and preferred timezone match, so create off utc time if base_tz == tz: value = tz.fromutc(value) # convert the server timezone to a preferred timezone else: value = base_tz.fromutc(value).astimezone(tz) else: value = value.astimezone(tz) else: log.warning('No local timezone defined') return value
Restores the value from a table cache for usage. :param value | <variant> context | <orb.Context> || None
def v_unique_name_children(ctx, stmt): """Make sure that each child of stmt has a unique name""" def sort_pos(p1, p2): if p1.line < p2.line: return (p1,p2) else: return (p2,p1) dict = {} chs = stmt.i_children def check(c): key = (c.i_module.i_modulename, c.arg) if key in dict: dup = dict[key] (minpos, maxpos) = sort_pos(c.pos, dup.pos) pos = chk_uses_pos(c, maxpos) err_add(ctx.errors, pos, 'DUPLICATE_CHILD_NAME', (stmt.arg, stmt.pos, c.arg, minpos)) else: dict[key] = c # also check all data nodes in the cases if c.keyword == 'choice': for case in c.i_children: for cc in case.i_children: check(cc) for c in chs: check(c)
Make sure that each child of stmt has a unique name
def print_partlist(input, timeout=20, showgui=False): '''print partlist text delivered by eagle :param input: .sch or .brd file name :param timeout: int :param showgui: Bool, True -> do not hide eagle GUI :rtype: None ''' print raw_partlist(input=input, timeout=timeout, showgui=showgui)
print partlist text delivered by eagle :param input: .sch or .brd file name :param timeout: int :param showgui: Bool, True -> do not hide eagle GUI :rtype: None
def c_join(self, other, psi=-40.76, omega=-178.25, phi=-65.07, o_c_n_angle=None, c_n_ca_angle=None, c_n_length=None, relabel=True): """Joins other to self at the C-terminus via a peptide bond. Notes ----- This function directly modifies self. It does not return a new object. Parameters ---------- other: Residue or Polypeptide psi: float, optional Psi torsion angle (degrees) between final `Residue` of self and first `Residue` of other. omega: float, optional Omega torsion angle (degrees) between final `Residue` of self and first `Residue` of other. phi: float, optional Phi torsion angle (degrees) between final `Residue` of self and first `Residue` of other. o_c_n_angle: float or None, optional Desired angle between O, C (final `Residue` of self) and N (first `Residue` of other) atoms. If `None`, default value is taken from `ideal_backbone_bond_angles`. c_n_ca_angle: float or None, optional Desired angle between C (final `Residue` of self) and N, CA (first `Residue` of other) atoms. If `None`, default value is taken from `ideal_backbone_bond_angles`. c_n_length: float or None, optional Desired peptide bond length between final `Residue` of self and first `Residue` of other. If `None`, default value is taken from `ideal_backbone_bond_lengths`. relabel: bool, optional If `True`, `relabel_all` is run on self before returning. Raises ------ TypeError: If other is not a `Residue` or a Polypeptide. """ if isinstance(other, Residue): other = Polypeptide([other]) if not isinstance(other, Polypeptide): raise TypeError( 'Only Polypeptide or Residue objects can be joined to a Polypeptide') if abs(omega) >= 90: peptide_conformation = 'trans' else: peptide_conformation = 'cis' if o_c_n_angle is None: o_c_n_angle = ideal_backbone_bond_angles[peptide_conformation]['o_c_n'] if c_n_ca_angle is None: c_n_ca_angle = ideal_backbone_bond_angles[peptide_conformation]['c_n_ca'] if c_n_length is None: c_n_length = ideal_backbone_bond_lengths['c_n'] r1 = self[-1] r1_ca = r1['CA']._vector r1_c = r1['C']._vector r1_o = r1['O']._vector # p1 is point that will be used to position the N atom of r2. p1 = r1_o[:] # rotate p1 by o_c_n_angle, about axis perpendicular to the # r1_ca, r1_c, r1_o plane, passing through r1_c. axis = numpy.cross((r1_ca - r1_c), (r1_o - r1_c)) q = Quaternion.angle_and_axis(angle=o_c_n_angle, axis=axis) p1 = q.rotate_vector(v=p1, point=r1_c) # Ensure p1 is separated from r1_c by the correct distance. p1 = r1_c + (c_n_length * unit_vector(p1 - r1_c)) # rotate p1 and r1['O'] by to obtain desired psi value at the join. measured_psi = dihedral(r1['N'], r1['CA'], r1['C'], p1) q = Quaternion.angle_and_axis( angle=(psi - measured_psi), axis=(r1_c - r1_ca)) p1 = q.rotate_vector(v=p1, point=r1_c) r1['O']._vector = q.rotate_vector(v=r1_o, point=r1_c) # translate other so that its first N atom is at p1 other.translate(vector=(p1 - other[0]['N']._vector)) # rotate other so that c_n_ca angle is correct. v1 = r1_c - other[0]['N']._vector v2 = other[0]['CA']._vector - other[0]['N']._vector measured_c_n_ca = angle_between_vectors(v1, v2) axis = numpy.cross(v1, v2) other.rotate(angle=(c_n_ca_angle - measured_c_n_ca), axis=axis, point=other[0]['N']._vector) # rotate other to obtain desired omega and phi values at the join measured_omega = dihedral( r1['CA'], r1['C'], other[0]['N'], other[0]['CA']) other.rotate(angle=(omega - measured_omega), axis=(other[0]['N'] - r1['C']), point=other[0]['N']._vector) measured_phi = dihedral( r1['C'], other[0]['N'], other[0]['CA'], other[0]['C']) other.rotate(angle=(phi - measured_phi), axis=(other[0]['CA'] - other[0]['N']), point=other[0]['CA']._vector) self.extend(other) if relabel: self.relabel_all() self.tags['assigned_ff'] = False return
Joins other to self at the C-terminus via a peptide bond. Notes ----- This function directly modifies self. It does not return a new object. Parameters ---------- other: Residue or Polypeptide psi: float, optional Psi torsion angle (degrees) between final `Residue` of self and first `Residue` of other. omega: float, optional Omega torsion angle (degrees) between final `Residue` of self and first `Residue` of other. phi: float, optional Phi torsion angle (degrees) between final `Residue` of self and first `Residue` of other. o_c_n_angle: float or None, optional Desired angle between O, C (final `Residue` of self) and N (first `Residue` of other) atoms. If `None`, default value is taken from `ideal_backbone_bond_angles`. c_n_ca_angle: float or None, optional Desired angle between C (final `Residue` of self) and N, CA (first `Residue` of other) atoms. If `None`, default value is taken from `ideal_backbone_bond_angles`. c_n_length: float or None, optional Desired peptide bond length between final `Residue` of self and first `Residue` of other. If `None`, default value is taken from `ideal_backbone_bond_lengths`. relabel: bool, optional If `True`, `relabel_all` is run on self before returning. Raises ------ TypeError: If other is not a `Residue` or a Polypeptide.
def initializeSessionAsBob(sessionState, sessionVersion, parameters): """ :type sessionState: SessionState :type sessionVersion: int :type parameters: BobAxolotlParameters """ sessionState.setSessionVersion(sessionVersion) sessionState.setRemoteIdentityKey(parameters.getTheirIdentityKey()) sessionState.setLocalIdentityKey(parameters.getOurIdentityKey().getPublicKey()) secrets = bytearray() if sessionVersion >= 3: secrets.extend(RatchetingSession.getDiscontinuityBytes()) secrets.extend(Curve.calculateAgreement(parameters.getTheirIdentityKey().getPublicKey(), parameters.getOurSignedPreKey().getPrivateKey())) secrets.extend(Curve.calculateAgreement(parameters.getTheirBaseKey(), parameters.getOurIdentityKey().getPrivateKey())) secrets.extend(Curve.calculateAgreement(parameters.getTheirBaseKey(), parameters.getOurSignedPreKey().getPrivateKey())) if sessionVersion >= 3 and parameters.getOurOneTimePreKey() is not None: secrets.extend(Curve.calculateAgreement(parameters.getTheirBaseKey(), parameters.getOurOneTimePreKey().getPrivateKey())) derivedKeys = RatchetingSession.calculateDerivedKeys(sessionVersion, secrets) sessionState.setSenderChain(parameters.getOurRatchetKey(), derivedKeys.getChainKey()) sessionState.setRootKey(derivedKeys.getRootKey())
:type sessionState: SessionState :type sessionVersion: int :type parameters: BobAxolotlParameters
def a_alpha_and_derivatives(self, T, full=True, quick=True): r'''Method to calculate `a_alpha` and its first and second derivatives for this EOS. Returns `a_alpha`, `da_alpha_dT`, and `d2a_alpha_dT2`. See `GCEOS.a_alpha_and_derivatives` for more documentation. Uses the set values of `a`. .. math:: a\alpha = a \frac{d a\alpha}{dT} = 0 \frac{d^2 a\alpha}{dT^2} = 0 ''' if not full: return self.a else: a_alpha = self.a da_alpha_dT = 0.0 d2a_alpha_dT2 = 0.0 return a_alpha, da_alpha_dT, d2a_alpha_dT2
r'''Method to calculate `a_alpha` and its first and second derivatives for this EOS. Returns `a_alpha`, `da_alpha_dT`, and `d2a_alpha_dT2`. See `GCEOS.a_alpha_and_derivatives` for more documentation. Uses the set values of `a`. .. math:: a\alpha = a \frac{d a\alpha}{dT} = 0 \frac{d^2 a\alpha}{dT^2} = 0
def team(self): """Team to which the scope is assigned.""" team_dict = self._json_data.get('team') if team_dict and team_dict.get('id'): return self._client.team(id=team_dict.get('id')) else: return None
Team to which the scope is assigned.
def doc2md(docstr, title, min_level=1, more_info=False, toc=True, maxdepth=0): """ Convert a docstring to a markdown text. """ text = doctrim(docstr) lines = text.split('\n') sections = find_sections(lines) if sections: level = min(n for n,t in sections) - 1 else: level = 1 shiftlevel = 0 if level < min_level: shiftlevel = min_level - level level = min_level sections = [(lev+shiftlevel, tit) for lev,tit in sections] head = next((i for i, l in enumerate(lines) if is_heading(l)), 0) md = [ make_heading(level, title), "", ] + lines[:head] if toc: md += make_toc(sections, maxdepth) md += [''] md += _doc2md(lines[head:], shiftlevel) if more_info: return (md, sections) else: return "\n".join(md)
Convert a docstring to a markdown text.
def getTableMisnestedNodePosition(self): """Get the foster parent element, and sibling to insert before (or None) when inserting a misnested table node""" # The foster parent element is the one which comes before the most # recently opened table element # XXX - this is really inelegant lastTable = None fosterParent = None insertBefore = None for elm in self.openElements[::-1]: if elm.name == "table": lastTable = elm break if lastTable: # XXX - we should really check that this parent is actually a # node here if lastTable.parent: fosterParent = lastTable.parent insertBefore = lastTable else: fosterParent = self.openElements[ self.openElements.index(lastTable) - 1] else: fosterParent = self.openElements[0] return fosterParent, insertBefore
Get the foster parent element, and sibling to insert before (or None) when inserting a misnested table node
def _ParseValueData(self, knowledge_base, value_data): """Parses Windows Registry value data for a preprocessing attribute. Args: knowledge_base (KnowledgeBase): to fill with preprocessing information. value_data (object): Windows Registry value data. Raises: errors.PreProcessFail: if the preprocessing fails. """ if not isinstance(value_data, py2to3.UNICODE_TYPE): raise errors.PreProcessFail( 'Unsupported Windows Registry value type: {0:s} for ' 'artifact: {1:s}.'.format( type(value_data), self.ARTIFACT_DEFINITION_NAME)) if not knowledge_base.GetHostname(): hostname_artifact = artifacts.HostnameArtifact(name=value_data) knowledge_base.SetHostname(hostname_artifact)
Parses Windows Registry value data for a preprocessing attribute. Args: knowledge_base (KnowledgeBase): to fill with preprocessing information. value_data (object): Windows Registry value data. Raises: errors.PreProcessFail: if the preprocessing fails.
def remove_request(self, uuid): """Remove any RPC request(s) using this uuid. :param str uuid: Rpc Identifier. :return: """ for key in list(self._request): if self._request[key] == uuid: del self._request[key]
Remove any RPC request(s) using this uuid. :param str uuid: Rpc Identifier. :return:
def fix(self, *args, **kwargs): """ Turns parameters to constants. As arguments, parameters must be strings. As keyword arguments, they can be set at the same time. Note this will NOT work when specifying a non-string fit function, because there is no flexibility in the number of arguments. To get around this, suppose you've defined a function stuff(x,a,b). Instead of sending the stuff object to self.set_functions() directly, make it a string function, e.g.: self.set_functions('stuff(x,a,b)', 'a,b', stuff=stuff) """ # first set all the keyword argument values self.set(**kwargs) # get everything into one big list pnames = list(args) + list(kwargs.keys()) # move each pname to the constants for pname in pnames: if not pname in self._pnames: self._error("Naughty. '"+pname+"' is not a valid fit parameter name.") else: n = self._pnames.index(pname) # use the fit result if it exists if self.results: value = self.results[0][n] # otherwise use the guess value else: value = self._pguess[n] # make the switcheroo if type(self._pnames) is not list: self._pnames = list(self._pnames) if type(self._pguess) is not list: self._pguess = list(self._pguess) if type(self._cnames) is not list: self._cnames = list(self._cnames) if type(self._constants) is not list: self._constants = list(self._constants) self._pnames.pop(n) self._pguess.pop(n) self._cnames.append(pname) self._constants.append(value) # update self._update_functions() return self
Turns parameters to constants. As arguments, parameters must be strings. As keyword arguments, they can be set at the same time. Note this will NOT work when specifying a non-string fit function, because there is no flexibility in the number of arguments. To get around this, suppose you've defined a function stuff(x,a,b). Instead of sending the stuff object to self.set_functions() directly, make it a string function, e.g.: self.set_functions('stuff(x,a,b)', 'a,b', stuff=stuff)
def parent_link_record_exists(self): # type: () -> bool ''' Determine whether this Rock Ridge entry has a parent link record (used for relocating deep directory records). Parameters: None: Returns: True if this Rock Ridge entry has a parent link record, False otherwise. ''' if not self._initialized: raise pycdlibexception.PyCdlibInternalError('Rock Ridge extension not yet initialized') return self.dr_entries.pl_record is not None or self.ce_entries.pl_record is not None
Determine whether this Rock Ridge entry has a parent link record (used for relocating deep directory records). Parameters: None: Returns: True if this Rock Ridge entry has a parent link record, False otherwise.
def monitor_experiment(args): '''monitor the experiment''' if args.time <= 0: print_error('please input a positive integer as time interval, the unit is second.') exit(1) while True: try: os.system('clear') update_experiment() show_experiment_info() time.sleep(args.time) except KeyboardInterrupt: exit(0) except Exception as exception: print_error(exception) exit(1)
monitor the experiment
def to_cartesian(r, theta, theta_units="radians"): """ Converts polar r, theta to cartesian x, y. """ assert theta_units in ['radians', 'degrees'],\ "kwarg theta_units must specified in radians or degrees" # Convert to radians if theta_units == "degrees": theta = to_radians(theta) theta = to_proper_radians(theta) x = r * cos(theta) y = r * sin(theta) return x, y
Converts polar r, theta to cartesian x, y.
def cpp_app_builder(build_context, target): """Pack a C++ binary as a Docker image with its runtime dependencies. TODO(itamar): Dynamically analyze the binary and copy shared objects from its buildenv image to the runtime image, unless they're installed. """ yprint(build_context.conf, 'Build CppApp', target) if target.props.executable and target.props.main: raise KeyError( '`main` and `executable` arguments are mutually exclusive') if target.props.executable: if target.props.executable not in target.artifacts.get(AT.app): target.artifacts.add(AT.app, target.props.executable) entrypoint = [target.props.executable] elif target.props.main: prog = build_context.targets[target.props.main] binary = list(prog.artifacts.get(AT.binary).keys())[0] entrypoint = ['/usr/src/bin/' + binary] else: raise KeyError('Must specify either `main` or `executable` argument') build_app_docker_and_bin( build_context, target, entrypoint=entrypoint)
Pack a C++ binary as a Docker image with its runtime dependencies. TODO(itamar): Dynamically analyze the binary and copy shared objects from its buildenv image to the runtime image, unless they're installed.
def _carregar(self): """Carrega (ou recarrega) a biblioteca SAT. Se a convenção de chamada ainda não tiver sido definida, será determinada pela extensão do arquivo da biblioteca. :raises ValueError: Se a convenção de chamada não puder ser determinada ou se não for um valor válido. """ if self._convencao is None: if self._caminho.endswith(('.DLL', '.dll')): self._convencao = constantes.WINDOWS_STDCALL else: self._convencao = constantes.STANDARD_C if self._convencao == constantes.STANDARD_C: loader = ctypes.CDLL elif self._convencao == constantes.WINDOWS_STDCALL: loader = ctypes.WinDLL else: raise ValueError('Convencao de chamada desconhecida: {!r}'.format( self._convencao)) self._libsat = loader(self._caminho)
Carrega (ou recarrega) a biblioteca SAT. Se a convenção de chamada ainda não tiver sido definida, será determinada pela extensão do arquivo da biblioteca. :raises ValueError: Se a convenção de chamada não puder ser determinada ou se não for um valor válido.
def fa(a, b, alpha=2): """Returns the factor of 'alpha' (2 or 5 normally) """ return np.sum((a > b / alpha) & (a < b * alpha), dtype=float) / len(a) * 100
Returns the factor of 'alpha' (2 or 5 normally)
def map(self, f_list: List[Callable[[np.ndarray], int]], *, axis: int = 0, chunksize: int = 1000, selection: np.ndarray = None) -> List[np.ndarray]: """ Apply a function along an axis without loading the entire dataset in memory. Args: f: Function(s) that takes a numpy ndarray as argument axis: Axis along which to apply the function (0 = rows, 1 = columns) chunksize: Number of rows (columns) to load per chunk selection: Columns (rows) to include Returns: numpy.ndarray result of function application The result is a list of numpy arrays, one per supplied function in f_list. This is more efficient than repeatedly calling map() one function at a time. """ return self.layers[""].map(f_list, axis, chunksize, selection)
Apply a function along an axis without loading the entire dataset in memory. Args: f: Function(s) that takes a numpy ndarray as argument axis: Axis along which to apply the function (0 = rows, 1 = columns) chunksize: Number of rows (columns) to load per chunk selection: Columns (rows) to include Returns: numpy.ndarray result of function application The result is a list of numpy arrays, one per supplied function in f_list. This is more efficient than repeatedly calling map() one function at a time.
async def close(self): """Stop serving the :attr:`.Server.sockets` and close all concurrent connections. """ if self._server: self._server.close() self._server = None self.event('stop').fire()
Stop serving the :attr:`.Server.sockets` and close all concurrent connections.
def unzip(self, directory): """ Write contents of zipfile to directory """ if not os.path.exists(directory): os.makedirs(directory) shutil.copytree(self.src_dir, directory)
Write contents of zipfile to directory
def get_all_item_data(items, conn, graph=None, output='json', **kwargs): """ queries a triplestore with the provided template or uses a generic template that returns triples 3 edges out in either direction from the provided item_uri args: items: the starting uri or list of uris to the query conn: the rdfframework triplestore connection to query against output: 'json' or 'rdf' kwargs: template: template to use in place of the generic template rdfclass: rdfclass the items are based on. filters: list of filters to apply """ # set the jinja2 template to use if kwargs.get('template'): template = kwargs.pop('template') else: template = "sparqlAllItemDataTemplate.rq" # build the keyword arguments for the templace template_kwargs = {"prefix": NSM.prefix(), "output": output} if isinstance(items, list): template_kwargs['uri_list'] = items else: template_kwargs['item_uri'] = Uri(items).sparql if kwargs.get("special_union"): template_kwargs['special_union'] = kwargs.get("special_union") if kwargs.get('rdfclass'): # pdb.set_trace() template_kwargs.update(kwargs['rdfclass'].query_kwargs) if kwargs.get("filters"): template_kwargs['filters'] = make_sparql_filter(kwargs.get('filters')) sparql = render_without_request(template, **template_kwargs) return conn.query(sparql, **kwargs)
queries a triplestore with the provided template or uses a generic template that returns triples 3 edges out in either direction from the provided item_uri args: items: the starting uri or list of uris to the query conn: the rdfframework triplestore connection to query against output: 'json' or 'rdf' kwargs: template: template to use in place of the generic template rdfclass: rdfclass the items are based on. filters: list of filters to apply
def _worker_fn(samples, batchify_fn, dataset=None): """Function for processing data in worker process.""" # pylint: disable=unused-argument # it is required that each worker process has to fork a new MXIndexedRecordIO handle # preserving dataset as global variable can save tons of overhead and is safe in new process global _worker_dataset batch = batchify_fn([_worker_dataset[i] for i in samples]) buf = io.BytesIO() ForkingPickler(buf, pickle.HIGHEST_PROTOCOL).dump(batch) return buf.getvalue()
Function for processing data in worker process.
def is_up(coordinate, current_time): """ Given the position and time determin if the given target is up. @param coordinate: the J2000 location of the source @param current_time: The time of the observations @return: True/False """ cfht.date = current_time.iso.replace('-', '/') cfht.horizon = math.radians(-7) sun.compute(cfht) sun_rise = Time(str(sun.rise_time).replace('/', '-')) sun_set = Time(str(sun.set_time).replace('/', '-')) if current_time < sun_set or current_time > sun_rise: return False fb._ra = coordinate.ra.radian fb._dec = coordinate.dec.radian cfht.horizon = math.radians(40) fb.compute(cfht) fb_rise_time = Time(str(fb.rise_time).replace('/', '-')) fb_set_time = Time(str(fb.set_time).replace('/', '-')) if (current_time > fb_set_time > fb_set_time or fb_rise_time > current_time > fb_set_time): return False return True
Given the position and time determin if the given target is up. @param coordinate: the J2000 location of the source @param current_time: The time of the observations @return: True/False
def create_comment_commit(self, body, commit_id, path, position, pr_id): """ Posts a comment to a given commit at a certain pull request. Check https://developer.github.com/v3/pulls/comments/#create-a-comment param body: str -> Comment text param commit_id: str -> SHA of the commit param path: str -> Relative path of the file to be commented param position: int -> The position in the diff to add a review comment param pr_id: int -> Github pull request id """ comments_url = f"{self.GITHUB_API_URL}/repos/{self.user}/{self.repo}/pulls/{pr_id}/comments" data = {'body': body, 'commit_id': commit_id, 'path': path, 'position': position} return requests.post(comments_url, json=data, headers=self.auth_header)
Posts a comment to a given commit at a certain pull request. Check https://developer.github.com/v3/pulls/comments/#create-a-comment param body: str -> Comment text param commit_id: str -> SHA of the commit param path: str -> Relative path of the file to be commented param position: int -> The position in the diff to add a review comment param pr_id: int -> Github pull request id
def library(self): """ Library to browse or search your media. """ if not self._library: try: data = self.query(Library.key) self._library = Library(self, data) except BadRequest: data = self.query('/library/sections/') # Only the owner has access to /library # so just return the library without the data. return Library(self, data) return self._library
Library to browse or search your media.
def is_consonant(note1, note2, include_fourths=True): """Return True if the interval is consonant. A consonance is a harmony, chord, or interval considered stable, as opposed to a dissonance. This function tests whether the given interval is consonant. This basically means that it checks whether the interval is (or sounds like) a unison, third, sixth, perfect fourth or perfect fifth. In classical music the fourth is considered dissonant when used contrapuntal, which is why you can choose to exclude it. """ return (is_perfect_consonant(note1, note2, include_fourths) or is_imperfect_consonant(note1, note2))
Return True if the interval is consonant. A consonance is a harmony, chord, or interval considered stable, as opposed to a dissonance. This function tests whether the given interval is consonant. This basically means that it checks whether the interval is (or sounds like) a unison, third, sixth, perfect fourth or perfect fifth. In classical music the fourth is considered dissonant when used contrapuntal, which is why you can choose to exclude it.
def unique_items(seq): """Return the unique items from iterable *seq* (in order).""" seen = set() return [x for x in seq if not (x in seen or seen.add(x))]
Return the unique items from iterable *seq* (in order).
def remote_image_request(self, image_url, params=None): """ Send an image for classification. The imagewill be retrieved from the URL specified. The params parameter is optional. On success this method will immediately return a job information. Its status will initially be :py:data:`cloudsight.STATUS_NOT_COMPLETED` as it usually takes 6-12 seconds for the server to process an image. In order to retrieve the annotation data, you need to keep updating the job status using the :py:meth:`cloudsight.API.image_response` method until the status changes. You may also use the :py:meth:`cloudsight.API.wait` method which does this automatically. :param image_url: Image URL. :param params: Additional parameters for CloudSight API. """ data = self._init_data(params) data['image_request[remote_image_url]'] = image_url response = requests.post(REQUESTS_URL, headers={ 'Authorization': self.auth.authorize('POST', REQUESTS_URL, data), 'User-Agent': USER_AGENT, }, data=data) return self._unwrap_error(response)
Send an image for classification. The imagewill be retrieved from the URL specified. The params parameter is optional. On success this method will immediately return a job information. Its status will initially be :py:data:`cloudsight.STATUS_NOT_COMPLETED` as it usually takes 6-12 seconds for the server to process an image. In order to retrieve the annotation data, you need to keep updating the job status using the :py:meth:`cloudsight.API.image_response` method until the status changes. You may also use the :py:meth:`cloudsight.API.wait` method which does this automatically. :param image_url: Image URL. :param params: Additional parameters for CloudSight API.
def doi_input(doi_string, download=True): """ This method accepts a DOI string and attempts to download the appropriate xml file. If successful, it returns a path to that file. As with all URL input types, the success of this method depends on supporting per-publisher conventions and will fail on unsupported publishers """ log.debug('DOI Input - {0}'.format(doi_string)) doi_string = doi_string[4:] if '10.1371' in doi_string: # Corresponds to PLoS log.debug('DOI string shows PLoS') xml_url = plos_doi_to_xmlurl(doi_string) else: log.critical('DOI input for this publisher is not supported') sys.exit('This publisher is not yet supported by OpenAccess_EPUB') return url_input(xml_url, download)
This method accepts a DOI string and attempts to download the appropriate xml file. If successful, it returns a path to that file. As with all URL input types, the success of this method depends on supporting per-publisher conventions and will fail on unsupported publishers
def check_folder_exists(project, path, folder_name): ''' :param project: project id :type project: string :param path: path to where we should look for the folder in question :type path: string :param folder_name: name of the folder in question :type folder_name: string :returns: A boolean True or False whether the folder exists at the specified path :type: boolean :raises: :exc:'ResolutionError' if dxpy.api.container_list_folder raises an exception This function returns a boolean value that indicates whether a folder of the specified name exists at the specified path Note: this function will NOT work on the root folder case, i.e. '/' ''' if folder_name is None or path is None: return False try: folder_list = dxpy.api.container_list_folder(project, {"folder": path, "only": "folders"}) except dxpy.exceptions.DXAPIError as e: if e.name == 'ResourceNotFound': raise ResolutionError(str(e.msg)) else: raise e target_folder = path + '/' + folder_name # sanitize input if necessary target_folder, _skip = clean_folder_path(target_folder, 'folder') # Check that folder name exists in return from list folder API call return target_folder in folder_list['folders']
:param project: project id :type project: string :param path: path to where we should look for the folder in question :type path: string :param folder_name: name of the folder in question :type folder_name: string :returns: A boolean True or False whether the folder exists at the specified path :type: boolean :raises: :exc:'ResolutionError' if dxpy.api.container_list_folder raises an exception This function returns a boolean value that indicates whether a folder of the specified name exists at the specified path Note: this function will NOT work on the root folder case, i.e. '/'
def apply(self, builder): """Apply the Slide Configuration to a Builder.""" if 'theme' in self.attributes: builder.apply_theme( self.attributes['theme'], builder.theme_options, )
Apply the Slide Configuration to a Builder.
def write(self, buf, url): """Write buffer to storage at a given url""" (store_name, path) = self._split_url(url) adapter = self._create_adapter(store_name) with adapter.open(path, 'wb') as f: f.write(buf.encode())
Write buffer to storage at a given url
def __extract_tags(self): """ Extract tags from the DocBlock. """ tags = list() current = None for line in self._comment: parts = re.match(r'^@(\w+)', line) if parts: current = (parts.group(1), list()) tags.append(current) if current: if line == '': current = None else: current[1].append(line) for tag in tags: self._tags.append((tag[0], os.linesep.join(tag[1])))
Extract tags from the DocBlock.
def subroutine(*effects): """ Returns an effect performing a list of effects. The value passed to each effect is a result of the previous effect. """ def subroutine(value, context, *args, **kwargs): d = defer.succeed(value) for effect in effects: d.addCallback(effect, context, *args, **kwargs) return d return subroutine
Returns an effect performing a list of effects. The value passed to each effect is a result of the previous effect.
def scatter_plot(data, index_x, index_y, percent=100.0, seed=1, size=50, title=None, outfile=None, wait=True): """ Plots two attributes against each other. TODO: click events http://matplotlib.org/examples/event_handling/data_browser.html :param data: the dataset :type data: Instances :param index_x: the 0-based index of the attribute on the x axis :type index_x: int :param index_y: the 0-based index of the attribute on the y axis :type index_y: int :param percent: the percentage of the dataset to use for plotting :type percent: float :param seed: the seed value to use for subsampling :type seed: int :param size: the size of the circles in point :type size: int :param title: an optional title :type title: str :param outfile: the (optional) file to save the generated plot to. The extension determines the file format. :type outfile: str :param wait: whether to wait for the user to close the plot :type wait: bool """ if not plot.matplotlib_available: logger.error("Matplotlib is not installed, plotting unavailable!") return # create subsample data = plot.create_subsample(data, percent=percent, seed=seed) # collect data x = [] y = [] if data.class_index == -1: c = None else: c = [] for i in range(data.num_instances): inst = data.get_instance(i) x.append(inst.get_value(index_x)) y.append(inst.get_value(index_y)) if c is not None: c.append(inst.get_value(inst.class_index)) # plot data fig, ax = plt.subplots() if c is None: ax.scatter(x, y, s=size, alpha=0.5) else: ax.scatter(x, y, c=c, s=size, alpha=0.5) ax.set_xlabel(data.attribute(index_x).name) ax.set_ylabel(data.attribute(index_y).name) if title is None: title = "Attribute scatter plot" if percent != 100: title += " (%0.1f%%)" % percent ax.set_title(title) ax.plot(ax.get_xlim(), ax.get_ylim(), ls="--", c="0.3") ax.grid(True) fig.canvas.set_window_title(data.relationname) plt.draw() if outfile is not None: plt.savefig(outfile) if wait: plt.show()
Plots two attributes against each other. TODO: click events http://matplotlib.org/examples/event_handling/data_browser.html :param data: the dataset :type data: Instances :param index_x: the 0-based index of the attribute on the x axis :type index_x: int :param index_y: the 0-based index of the attribute on the y axis :type index_y: int :param percent: the percentage of the dataset to use for plotting :type percent: float :param seed: the seed value to use for subsampling :type seed: int :param size: the size of the circles in point :type size: int :param title: an optional title :type title: str :param outfile: the (optional) file to save the generated plot to. The extension determines the file format. :type outfile: str :param wait: whether to wait for the user to close the plot :type wait: bool
def in_template_path(fn): """ Return `fn` in template context, or in other words add `fn` to template path, so you don't need to write absolute path of `fn` in template directory manually. Args: fn (str): Name of the file in template dir. Return: str: Absolute path to the file. """ return os.path.join( os.path.abspath(os.path.dirname(__file__)), "../templates", fn, )
Return `fn` in template context, or in other words add `fn` to template path, so you don't need to write absolute path of `fn` in template directory manually. Args: fn (str): Name of the file in template dir. Return: str: Absolute path to the file.
def validatefeatures(self,features): """Returns features in validated form, or raises an Exception. Mostly for internal use""" validatedfeatures = [] for feature in features: if isinstance(feature, int) or isinstance(feature, float): validatedfeatures.append( str(feature) ) elif self.delimiter in feature and not self.sklearn: raise ValueError("Feature contains delimiter: " + feature) elif self.sklearn and isinstance(feature, str): #then is sparse added together validatedfeatures.append(feature) else: validatedfeatures.append(feature) return validatedfeatures
Returns features in validated form, or raises an Exception. Mostly for internal use
def _CompileTemplate( template_str, builder, meta='{}', format_char='|', default_formatter='str', whitespace='smart'): """Compile the template string, calling methods on the 'program builder'. Args: template_str: The template string. It should not have any compilation options in the header -- those are parsed by FromString/FromFile builder: The interface of _ProgramBuilder isn't fixed. Use at your own risk. meta: The metacharacters to use, e.g. '{}', '[]'. default_formatter: The formatter to use for substitutions that are missing a formatter. The 'str' formatter the "default default" -- it just tries to convert the context value to a string in some unspecified manner. whitespace: 'smart' or 'strip-line'. In smart mode, if a directive is alone on a line, with only whitespace on either side, then the whitespace is removed. In 'strip-line' mode, every line is stripped of its leading and trailing whitespace. Returns: The compiled program (obtained from the builder) Raises: The various subclasses of CompilationError. For example, if default_formatter=None, and a variable is missing a formatter, then MissingFormatter is raised. This function is public so it can be used by other tools, e.g. a syntax checking tool run before submitting a template to source control. """ meta_left, meta_right = SplitMeta(meta) # : is meant to look like Python 3000 formatting {foo:.3f}. According to # PEP 3101, that's also what .NET uses. # | is more readable, but, more importantly, reminiscent of pipes, which is # useful for multiple formatters, e.g. {name|js-string|html} if format_char not in (':', '|'): raise ConfigurationError( 'Only format characters : and | are accepted (got %r)' % format_char) if whitespace not in ('smart', 'strip-line'): raise ConfigurationError('Invalid whitespace mode %r' % whitespace) # If we go to -1, then we got too many {end}. If end at 1, then we're missing # an {end}. balance_counter = 0 comment_counter = 0 # ditto for ##BEGIN/##END has_defines = False for token_type, token in _Tokenize(template_str, meta_left, meta_right, whitespace): if token_type == COMMENT_BEGIN_TOKEN: comment_counter += 1 continue if token_type == COMMENT_END_TOKEN: comment_counter -= 1 if comment_counter < 0: raise CompilationError('Got too many ##END markers') continue # Don't process any tokens if comment_counter > 0: continue if token_type in (LITERAL_TOKEN, META_LITERAL_TOKEN): if token: builder.Append(token) continue if token_type in (SECTION_TOKEN, REPEATED_SECTION_TOKEN, DEF_TOKEN): parts = [p.strip() for p in token.split(format_char)] if len(parts) == 1: name = parts[0] formatters = [] else: name = parts[0] formatters = parts[1:] builder.NewSection(token_type, name, formatters) balance_counter += 1 if token_type == DEF_TOKEN: has_defines = True continue if token_type == PREDICATE_TOKEN: # {.attr?} lookups builder.NewPredicateSection(token, test_attr=True) balance_counter += 1 continue if token_type == IF_TOKEN: builder.NewPredicateSection(token, test_attr=False) balance_counter += 1 continue if token_type == OR_TOKEN: builder.NewOrClause(token) continue if token_type == ALTERNATES_TOKEN: builder.AlternatesWith() continue if token_type == END_TOKEN: balance_counter -= 1 if balance_counter < 0: # TODO: Show some context for errors raise TemplateSyntaxError( 'Got too many %send%s statements. You may have mistyped an ' "earlier 'section' or 'repeated section' directive." % (meta_left, meta_right)) builder.EndSection() continue if token_type == SUBST_TOKEN: parts = [p.strip() for p in token.split(format_char)] if len(parts) == 1: if default_formatter is None: raise MissingFormatter('This template requires explicit formatters.') # If no formatter is specified, the default is the 'str' formatter, # which the user can define however they desire. name = token formatters = [default_formatter] else: name = parts[0] formatters = parts[1:] builder.AppendSubstitution(name, formatters) continue if token_type == SUBST_TEMPLATE_TOKEN: # no formatters builder.AppendTemplateSubstitution(token) continue if balance_counter != 0: raise TemplateSyntaxError('Got too few %send%s statements' % (meta_left, meta_right)) if comment_counter != 0: raise CompilationError('Got %d more {##BEGIN}s than {##END}s' % comment_counter) return builder.Root(), has_defines
Compile the template string, calling methods on the 'program builder'. Args: template_str: The template string. It should not have any compilation options in the header -- those are parsed by FromString/FromFile builder: The interface of _ProgramBuilder isn't fixed. Use at your own risk. meta: The metacharacters to use, e.g. '{}', '[]'. default_formatter: The formatter to use for substitutions that are missing a formatter. The 'str' formatter the "default default" -- it just tries to convert the context value to a string in some unspecified manner. whitespace: 'smart' or 'strip-line'. In smart mode, if a directive is alone on a line, with only whitespace on either side, then the whitespace is removed. In 'strip-line' mode, every line is stripped of its leading and trailing whitespace. Returns: The compiled program (obtained from the builder) Raises: The various subclasses of CompilationError. For example, if default_formatter=None, and a variable is missing a formatter, then MissingFormatter is raised. This function is public so it can be used by other tools, e.g. a syntax checking tool run before submitting a template to source control.
def color(self): """Get light color.""" return (self.get_value(CONST.STATUSES_KEY).get('hue'), self.get_value(CONST.STATUSES_KEY).get('saturation'))
Get light color.
def resolve_metric_as_tuple(metric): """ Resolve metric key to a given target. :param metric: the metric name. :type metric: ``str`` :rtype: :class:`Metric` """ if "." in metric: _, metric = metric.split(".") r = [ (operator, match) for operator, match in ALL_METRICS if match[0] == metric ] if not r or len(r) == 0: raise ValueError(f"Metric {metric} not recognised.") else: return r[0]
Resolve metric key to a given target. :param metric: the metric name. :type metric: ``str`` :rtype: :class:`Metric`
def fingerprint(self, word, n_bits=16, most_common=MOST_COMMON_LETTERS_CG): """Return the occurrence halved fingerprint. Based on the occurrence halved fingerprint from :cite:`Cislak:2017`. Parameters ---------- word : str The word to fingerprint n_bits : int Number of bits in the fingerprint returned most_common : list The most common tokens in the target language, ordered by frequency Returns ------- int The occurrence halved fingerprint Examples -------- >>> ohf = OccurrenceHalved() >>> bin(ohf.fingerprint('hat')) '0b1010000000010' >>> bin(ohf.fingerprint('niall')) '0b10010100000' >>> bin(ohf.fingerprint('colin')) '0b1001010000' >>> bin(ohf.fingerprint('atcg')) '0b10100000000000' >>> bin(ohf.fingerprint('entreatment')) '0b1111010000110000' """ if n_bits % 2: n_bits += 1 w_len = len(word) // 2 w_1 = set(word[:w_len]) w_2 = set(word[w_len:]) fingerprint = 0 for letter in most_common: if n_bits: fingerprint <<= 1 if letter in w_1: fingerprint += 1 fingerprint <<= 1 if letter in w_2: fingerprint += 1 n_bits -= 2 else: break if n_bits > 0: fingerprint <<= n_bits return fingerprint
Return the occurrence halved fingerprint. Based on the occurrence halved fingerprint from :cite:`Cislak:2017`. Parameters ---------- word : str The word to fingerprint n_bits : int Number of bits in the fingerprint returned most_common : list The most common tokens in the target language, ordered by frequency Returns ------- int The occurrence halved fingerprint Examples -------- >>> ohf = OccurrenceHalved() >>> bin(ohf.fingerprint('hat')) '0b1010000000010' >>> bin(ohf.fingerprint('niall')) '0b10010100000' >>> bin(ohf.fingerprint('colin')) '0b1001010000' >>> bin(ohf.fingerprint('atcg')) '0b10100000000000' >>> bin(ohf.fingerprint('entreatment')) '0b1111010000110000'
def coerce(self, value): """Coerce a cleaned value.""" if self._coerce is not None: value = self._coerce(value) return value
Coerce a cleaned value.
def _install_one( repo_url, branch, destination, commit='', patches=None, exclude_modules=None, include_modules=None, base=False, work_directory='' ): """ Install a third party odoo add-on :param string repo_url: url of the repo that contains the patch. :param string branch: name of the branch to checkout. :param string destination: the folder where the add-on should end up at. :param string commit: Optional commit rev to checkout to. If mentioned, that take over the branch :param string work_directory: the path to the directory of the yaml file. :param list patches: Optional list of patches to apply. """ patches = patches or [] patches = [ core.FilePatch(file=patch['file'], work_directory=work_directory) if 'file' in patch else core.Patch(**patch) for patch in patches ] addon_cls = core.Base if base else core.Addon addon = addon_cls( repo_url, branch, commit=commit, patches=patches, exclude_modules=exclude_modules, include_modules=include_modules) addon.install(destination)
Install a third party odoo add-on :param string repo_url: url of the repo that contains the patch. :param string branch: name of the branch to checkout. :param string destination: the folder where the add-on should end up at. :param string commit: Optional commit rev to checkout to. If mentioned, that take over the branch :param string work_directory: the path to the directory of the yaml file. :param list patches: Optional list of patches to apply.
def identical_signature_wrapper(original_function, wrapped_function): ''' Return a function with identical signature as ``original_function``'s which will call the ``wrapped_function``. ''' context = {'__wrapped__': wrapped_function} function_def = compile( 'def {0}({1}):\n' ' return __wrapped__({2})'.format( # Keep the original function name original_function.__name__, # The function signature including defaults, i.e., 'timeout=1' inspect.formatargspec( *salt.utils.args.get_function_argspec(original_function) )[1:-1], # The function signature without the defaults inspect.formatargspec( formatvalue=lambda val: '', *salt.utils.args.get_function_argspec(original_function) )[1:-1] ), '<string>', 'exec' ) six.exec_(function_def, context) return wraps(original_function)(context[original_function.__name__])
Return a function with identical signature as ``original_function``'s which will call the ``wrapped_function``.
def mount(directory, lower_dir, upper_dir, mount_table=None): """Creates a mount""" return OverlayFS.mount(directory, lower_dir, upper_dir, mount_table=mount_table)
Creates a mount
def aget(dct, key): r"""Allow to get values deep in a dict with iterable keys Accessing leaf values is quite straightforward: >>> dct = {'a': {'x': 1, 'b': {'c': 2}}} >>> aget(dct, ('a', 'x')) 1 >>> aget(dct, ('a', 'b', 'c')) 2 If key is empty, it returns unchanged the ``dct`` value. >>> aget({'x': 1}, ()) {'x': 1} """ key = iter(key) try: head = next(key) except StopIteration: return dct if isinstance(dct, list): try: idx = int(head) except ValueError: raise IndexNotIntegerError( "non-integer index %r provided on a list." % head) try: value = dct[idx] except IndexError: raise IndexOutOfRange( "index %d is out of range (%d elements in list)." % (idx, len(dct))) else: try: value = dct[head] except KeyError: ## Replace with a more informative KeyError raise MissingKeyError( "missing key %r in dict." % (head, )) except Exception: raise NonDictLikeTypeError( "can't query subvalue %r of a leaf%s." % (head, (" (leaf value is %r)" % dct) if len(repr(dct)) < 15 else "")) return aget(value, key)
r"""Allow to get values deep in a dict with iterable keys Accessing leaf values is quite straightforward: >>> dct = {'a': {'x': 1, 'b': {'c': 2}}} >>> aget(dct, ('a', 'x')) 1 >>> aget(dct, ('a', 'b', 'c')) 2 If key is empty, it returns unchanged the ``dct`` value. >>> aget({'x': 1}, ()) {'x': 1}
def env(var_name, default=False): """ Get the environment variable. If not found use a default or False, but print to stderr a warning about the missing env variable.""" try: value = os.environ[var_name] if str(value).strip().lower() in ['false', 'f', 'no', 'off' '0', 'none', 'null', '', ]: return None return value except: from traceback import format_exc msg = "Unable to find the %s environment variable.\nUsing the value %s (the default) instead.\n" % (var_name, default) sys.stderr.write(format_exc()) sys.stderr.write(msg) return default
Get the environment variable. If not found use a default or False, but print to stderr a warning about the missing env variable.
def parse_template(self, tmp, reset=False, only_body=False): """parses a template or user edited string to fills this envelope. :param tmp: the string to parse. :type tmp: str :param reset: remove previous envelope content :type reset: bool """ logging.debug('GoT: """\n%s\n"""', tmp) if self.sent_time: self.modified_since_sent = True if only_body: self.body = tmp else: m = re.match(r'(?P<h>([a-zA-Z0-9_-]+:.+\n)*)\n?(?P<b>(\s*.*)*)', tmp) assert m d = m.groupdict() headertext = d['h'] self.body = d['b'] # remove existing content if reset: self.headers = {} # go through multiline, utf-8 encoded headers # we decode the edited text ourselves here as # email.message_from_file can't deal with raw utf8 header values key = value = None for line in headertext.splitlines(): if re.match('[a-zA-Z0-9_-]+:', line): # new k/v pair if key and value: # save old one from stack self.add(key, value) # save key, value = line.strip().split(':', 1) # parse new pair # strip spaces, otherwise we end up having " foo" as value # of "Subject: foo" value = value.strip() elif key and value: # append new line without key prefix value += line if key and value: # save last one if present self.add(key, value) # interpret 'Attach' pseudo header if 'Attach' in self: to_attach = [] for line in self.get_all('Attach'): gpath = os.path.expanduser(line.strip()) to_attach += [g for g in glob.glob(gpath) if os.path.isfile(g)] logging.debug('Attaching: %s', to_attach) for path in to_attach: self.attach(path) del self['Attach']
parses a template or user edited string to fills this envelope. :param tmp: the string to parse. :type tmp: str :param reset: remove previous envelope content :type reset: bool
def _dcm_array_to_matrix3(self, dcm): """ Converts dcm array into Matrix3 :param dcm: 3x3 dcm array :returns: Matrix3 """ assert(dcm.shape == (3, 3)) a = Vector3(dcm[0][0], dcm[0][1], dcm[0][2]) b = Vector3(dcm[1][0], dcm[1][1], dcm[1][2]) c = Vector3(dcm[2][0], dcm[2][1], dcm[2][2]) return Matrix3(a, b, c)
Converts dcm array into Matrix3 :param dcm: 3x3 dcm array :returns: Matrix3
def update_by_example(cls, collection, example_data, new_value, keep_null=False, wait_for_sync=None, limit=None): """ This will find all documents in the collection that match the specified example object, and partially update the document body with the new value specified. Note that document meta-attributes such as _id, _key, _from, _to etc. cannot be replaced. Note: the limit attribute is not supported on sharded collections. Using it will result in an error. Returns result dict of the request. :param collection Collection instance :param example_data An example document that all collection documents are compared against. :param new_value A document containing all the attributes to update in the found documents. :param keep_null This parameter can be used to modify the behavior when handling null values. Normally, null values are stored in the database. By setting the keepNull parameter to false, this behavior can be changed so that all attributes in data with null values will be removed from the updated document. :param wait_for_sync if set to true, then all removal operations will instantly be synchronised to disk. If this is not specified, then the collection's default sync behavior will be applied. :param limit an optional value that determines how many documents to update at most. If limit is specified but is less than the number of documents in the collection, it is undefined which of the documents will be updated. :returns dict """ kwargs = { 'newValue': new_value, 'options': { 'keepNull': keep_null, 'waitForSync': wait_for_sync, 'limit': limit, } } return cls._construct_query(name='update-by-example', collection=collection, example=example_data, result=False, **kwargs)
This will find all documents in the collection that match the specified example object, and partially update the document body with the new value specified. Note that document meta-attributes such as _id, _key, _from, _to etc. cannot be replaced. Note: the limit attribute is not supported on sharded collections. Using it will result in an error. Returns result dict of the request. :param collection Collection instance :param example_data An example document that all collection documents are compared against. :param new_value A document containing all the attributes to update in the found documents. :param keep_null This parameter can be used to modify the behavior when handling null values. Normally, null values are stored in the database. By setting the keepNull parameter to false, this behavior can be changed so that all attributes in data with null values will be removed from the updated document. :param wait_for_sync if set to true, then all removal operations will instantly be synchronised to disk. If this is not specified, then the collection's default sync behavior will be applied. :param limit an optional value that determines how many documents to update at most. If limit is specified but is less than the number of documents in the collection, it is undefined which of the documents will be updated. :returns dict
async def websocket_disconnect(self, message): """ Handle the disconnect message. This is propagated to all upstream applications. """ # set this flag so as to ensure we don't send a downstream `websocket.close` message due to all # child applications closing. self.closing = True # inform all children await self.send_upstream(message) await super().websocket_disconnect(message)
Handle the disconnect message. This is propagated to all upstream applications.
def commit(func): '''Used as a decorator for automatically making session commits''' def wrap(**kwarg): with session_withcommit() as session: a = func(**kwarg) session.add(a) return session.query(songs).order_by( songs.song_id.desc()).first().song_id return wrap
Used as a decorator for automatically making session commits
def utterances_from_tier(eafob: Eaf, tier_name: str) -> List[Utterance]: """ Returns utterances found in the given Eaf object in the given tier.""" try: speaker = eafob.tiers[tier_name][2]["PARTICIPANT"] except KeyError: speaker = None # We don't know the name of the speaker. tier_utterances = [] annotations = sort_annotations( list(eafob.get_annotation_data_for_tier(tier_name))) for i, annotation in enumerate(annotations): eaf_stem = eafob.eaf_path.stem utter_id = "{}.{}.{}".format(eaf_stem, tier_name, i) start_time = eafob.time_origin + annotation[0] end_time = eafob.time_origin + annotation[1] text = annotation[2] utterance = Utterance(eafob.media_path, eafob.eaf_path, utter_id, start_time, end_time, text, speaker) tier_utterances.append(utterance) return tier_utterances
Returns utterances found in the given Eaf object in the given tier.
def Dirname(self): """Get a new copied object with only the directory path.""" result = self.Copy() while 1: last_directory = posixpath.dirname(result.last.path) if last_directory != "/" or len(result) <= 1: result.last.path = last_directory # Make sure to clear the inode information. result.last.inode = None break result.Pop(-1) return result
Get a new copied object with only the directory path.
def fetchone(self, query, *args): """ Returns the first result of the given query. :param query: The query to be executed as a `str`. :param params: A `tuple` of parameters that will be replaced for placeholders in the query. :return: The retrieved row with each field being one element in a `tuple`. """ cursor = self.connection.cursor() try: cursor.execute(query, args) return cursor.fetchone() finally: cursor.close()
Returns the first result of the given query. :param query: The query to be executed as a `str`. :param params: A `tuple` of parameters that will be replaced for placeholders in the query. :return: The retrieved row with each field being one element in a `tuple`.
def writelines_nl(fileobj: TextIO, lines: Iterable[str]) -> None: """ Writes lines, plus terminating newline characters, to the file. (Since :func:`fileobj.writelines` doesn't add newlines... http://stackoverflow.com/questions/13730107/writelines-writes-lines-without-newline-just-fills-the-file) """ # noqa fileobj.write('\n'.join(lines) + '\n')
Writes lines, plus terminating newline characters, to the file. (Since :func:`fileobj.writelines` doesn't add newlines... http://stackoverflow.com/questions/13730107/writelines-writes-lines-without-newline-just-fills-the-file)
def load_file(self, app, pathname, relpath, pypath): """Loads a file and creates a View from it. Files are split between a YAML front-matter and the content (unless it is a .yml file). """ try: view_class = self.get_file_view_cls(relpath) return create_view_from_file(pathname, source_template=relpath, view_class=view_class) except DeclarativeViewError: pass
Loads a file and creates a View from it. Files are split between a YAML front-matter and the content (unless it is a .yml file).
def _ensure_frames(cls, documents): """ Ensure all items in a list are frames by converting those that aren't. """ frames = [] for document in documents: if not isinstance(document, Frame): frames.append(cls(document)) else: frames.append(document) return frames
Ensure all items in a list are frames by converting those that aren't.
def ParseMessage(descriptor, byte_str): """Generate a new Message instance from this Descriptor and a byte string. Args: descriptor: Protobuf Descriptor object byte_str: Serialized protocol buffer byte string Returns: Newly created protobuf Message object. """ result_class = MakeClass(descriptor) new_msg = result_class() new_msg.ParseFromString(byte_str) return new_msg
Generate a new Message instance from this Descriptor and a byte string. Args: descriptor: Protobuf Descriptor object byte_str: Serialized protocol buffer byte string Returns: Newly created protobuf Message object.
def infer_datetime_units(dates): """Given an array of datetimes, returns a CF compatible time-unit string of the form "{time_unit} since {date[0]}", where `time_unit` is 'days', 'hours', 'minutes' or 'seconds' (the first one that can evenly divide all unique time deltas in `dates`) """ dates = np.asarray(dates).ravel() if np.asarray(dates).dtype == 'datetime64[ns]': dates = pd.to_datetime(dates, box=False) dates = dates[pd.notnull(dates)] reference_date = dates[0] if len(dates) > 0 else '1970-01-01' reference_date = pd.Timestamp(reference_date) else: reference_date = dates[0] if len(dates) > 0 else '1970-01-01' reference_date = format_cftime_datetime(reference_date) unique_timedeltas = np.unique(np.diff(dates)) if unique_timedeltas.dtype == np.dtype('O'): # Convert to np.timedelta64 objects using pandas to work around a # NumPy casting bug: https://github.com/numpy/numpy/issues/11096 unique_timedeltas = pd.to_timedelta(unique_timedeltas, box=False) units = _infer_time_units_from_diff(unique_timedeltas) return '%s since %s' % (units, reference_date)
Given an array of datetimes, returns a CF compatible time-unit string of the form "{time_unit} since {date[0]}", where `time_unit` is 'days', 'hours', 'minutes' or 'seconds' (the first one that can evenly divide all unique time deltas in `dates`)
def flush_stream_threads(process, out_formatter=None, err_formatter=terminal.fg.red, size=1): """ Context manager that creates 2 threads, one for each standard stream (stdout/stderr), updating in realtime the piped data. The formatters are callables that receives manipulates the data, e.g. coloring it before writing to a ``sys`` stream. See ``FlushStreamThread`` for more information. """ out = FlushStreamThread(process=process, stream_name="stdout", formatter=out_formatter, size=size) err = FlushStreamThread(process=process, stream_name="stderr", formatter=err_formatter, size=size) out.start() err.start() yield out, err out.join() err.join()
Context manager that creates 2 threads, one for each standard stream (stdout/stderr), updating in realtime the piped data. The formatters are callables that receives manipulates the data, e.g. coloring it before writing to a ``sys`` stream. See ``FlushStreamThread`` for more information.
def get_item_query_session(self): """Gets the ``OsidSession`` associated with the item query service. return: (osid.assessment.ItemQuerySession) - an ``ItemQuerySession`` raise: OperationFailed - unable to complete request raise: Unimplemented - ``supports_item_query()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_item_query()`` is ``true``.* """ if not self.supports_item_query(): raise errors.Unimplemented() # pylint: disable=no-member return sessions.ItemQuerySession(runtime=self._runtime)
Gets the ``OsidSession`` associated with the item query service. return: (osid.assessment.ItemQuerySession) - an ``ItemQuerySession`` raise: OperationFailed - unable to complete request raise: Unimplemented - ``supports_item_query()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_item_query()`` is ``true``.*
def prepare_inputs(self, times=None, weather=None): """ Prepare the solar position, irradiance, and weather inputs to the model. Parameters ---------- times : None or DatetimeIndex, default None Times at which to evaluate the model. Can be None if attribute `times` is already set. weather : None or DataFrame, default None If ``None``, the weather attribute is used. If the weather attribute is also ``None`` assumes air temperature is 20 C, wind speed is 0 m/s and irradiation calculated from clear sky data. Column names must be ``'wind_speed'``, ``'temp_air'``, ``'dni'``, ``'ghi'``, ``'dhi'``. Do not pass incomplete irradiation data. Use method :py:meth:`~pvlib.modelchain.ModelChain.complete_irradiance` instead. Notes ----- Assigns attributes: ``times``, ``solar_position``, ``airmass``, ``total_irrad``, `aoi` """ if weather is not None: self.weather = weather if self.weather is None: self.weather = pd.DataFrame(index=times) if times is not None: self.times = times self.solar_position = self.location.get_solarposition( self.times, method=self.solar_position_method) self.airmass = self.location.get_airmass( solar_position=self.solar_position, model=self.airmass_model) if not any([x in ['ghi', 'dni', 'dhi'] for x in self.weather.columns]): self.weather[['ghi', 'dni', 'dhi']] = self.location.get_clearsky( self.solar_position.index, self.clearsky_model, solar_position=self.solar_position, airmass_absolute=self.airmass['airmass_absolute']) if not {'ghi', 'dni', 'dhi'} <= set(self.weather.columns): raise ValueError( "Uncompleted irradiance data set. Please check you input " + "data.\nData set needs to have 'dni', 'dhi' and 'ghi'.\n" + "Detected data: {0}".format(list(self.weather.columns))) # PVSystem.get_irradiance and SingleAxisTracker.get_irradiance # and PVSystem.get_aoi and SingleAxisTracker.get_aoi # have different method signatures. Use partial to handle # the differences. if isinstance(self.system, SingleAxisTracker): self.tracking = self.system.singleaxis( self.solar_position['apparent_zenith'], self.solar_position['azimuth']) self.tracking['surface_tilt'] = ( self.tracking['surface_tilt'] .fillna(self.system.axis_tilt)) self.tracking['surface_azimuth'] = ( self.tracking['surface_azimuth'] .fillna(self.system.axis_azimuth)) self.aoi = self.tracking['aoi'] get_irradiance = partial( self.system.get_irradiance, self.tracking['surface_tilt'], self.tracking['surface_azimuth'], self.solar_position['apparent_zenith'], self.solar_position['azimuth']) else: self.aoi = self.system.get_aoi( self.solar_position['apparent_zenith'], self.solar_position['azimuth']) get_irradiance = partial( self.system.get_irradiance, self.solar_position['apparent_zenith'], self.solar_position['azimuth']) self.total_irrad = get_irradiance( self.weather['dni'], self.weather['ghi'], self.weather['dhi'], airmass=self.airmass['airmass_relative'], model=self.transposition_model) if self.weather.get('wind_speed') is None: self.weather['wind_speed'] = 0 if self.weather.get('temp_air') is None: self.weather['temp_air'] = 20 return self
Prepare the solar position, irradiance, and weather inputs to the model. Parameters ---------- times : None or DatetimeIndex, default None Times at which to evaluate the model. Can be None if attribute `times` is already set. weather : None or DataFrame, default None If ``None``, the weather attribute is used. If the weather attribute is also ``None`` assumes air temperature is 20 C, wind speed is 0 m/s and irradiation calculated from clear sky data. Column names must be ``'wind_speed'``, ``'temp_air'``, ``'dni'``, ``'ghi'``, ``'dhi'``. Do not pass incomplete irradiation data. Use method :py:meth:`~pvlib.modelchain.ModelChain.complete_irradiance` instead. Notes ----- Assigns attributes: ``times``, ``solar_position``, ``airmass``, ``total_irrad``, `aoi`